diff --git a/.codacy.yml b/.codacy.yml new file mode 100644 index 0000000000000000000000000000000000000000..bf12fc2d77140a852ff1b9197370766a0ecec7c3 --- /dev/null +++ b/.codacy.yml @@ -0,0 +1,21 @@ +# codacy configuration file + +--- + +engines: + coverage: + enabled: true + metrics: + enabled: true + duplication: + enabled: true + prospector: + enabled: true + pylint: + enabled: true + python_version: 2 + +exclude_paths: [ + 'doc/**', + 'data/**', +] diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..08c1355a02d158efce8b3c8438df86fdc3275b6e --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,43 @@ +before_script: + - export GIT_SSL_NO_VERIFY=1 + - export PATH="$HOME/miniconda2/bin:$PATH" + +stages: + - prepare + - test + - report + - clean + +cache: + paths: + - test/report + +prepare: + stage: prepare + script: + - conda update conda + +test_python2: + stage: test + script: + - conda env update -f environment.yml -n hermesv3_gr python=2.7 + - source activate hermesv3_gr + - python run_test.py + - pip install codacy-coverage --upgrade + - python-codacy-coverage -r tests/report/python2/coverage.xml + +#test_python3: +# stage: test +# script: +# - git submodule sync --recursive +# - git submodule update --init --recursive +# - conda env update -f environment.yml -n earthdiagnostics3 python=3.6 +# - source activate earthdiagnostics3 +# - python run_test.py + +clean: + stage: clean + script: + - conda clean --all --yes + + diff --git a/.prospector.yml b/.prospector.yml new file mode 100644 index 0000000000000000000000000000000000000000..b9c6fa952133de951c9005edb1b2f01d9bcb851b --- /dev/null +++ b/.prospector.yml @@ -0,0 +1,36 @@ +# prospector configuration file + +--- + +output-format: grouped + +strictness: veryhigh +doc-warnings: true +test-warnings: true +member-warnings: false + +pyroma: + run: true + +pydocroma: + run: true + +pep8: + disable: [ + E501, # Line-length, already controlled by pylint + ] + +pep257: + run: true + # see http://pep257.readthedocs.io/en/latest/error_codes.html + disable: [ + # For short descriptions it makes sense not to end with a period: + D400, # First line should end with a period + # Disable because not part of PEP257 official convention: + D203, # 1 blank line required before class docstring + D212, # Multi-line docstring summary should start at the first line + D213, # Multi-line docstring summary should start at the second line + D404, # First word of the docstring should not be This + D107, # We are using numpy style and constructor should be documented in class docstring + D105, # Docstring in magic methods should not be required: we all now what they are for + ] diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000000000000000000000000000000000000..db7741b994ec5e5720e803d9e73b639a55435ff8 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,407 @@ +[MASTER] + +# Specify a configuration file. +#rcfile= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Pickle collected data for later comparisons. +persistent=yes + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Use multiple processes to speed up Pylint. +jobs=1 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +extension-pkg-whitelist= + +# Allow optimization of some AST trees. This will activate a peephole AST +# optimizer, which will apply various small optimizations. For instance, it can +# be used to obtain the result of joining multiple strings with the addition +# operator. Joining a lot of strings can lead to a maximum recursion error in +# Pylint and this flag can prevent that. It has one side effect, the resulting +# AST will be different than the one from reality. This option is deprecated +# and it will be removed in Pylint 2.0. +optimize-ast=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +#enable= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". This option is deprecated +# and it will be removed in Pylint 2.0. +files-output=no + +# Tells whether to display a full report or only the messages +reports=yes + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=120 + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + +# List of optional constructs for which whitespace checking is disabled. `dict- +# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. +# `trailing-comma` allows a space between comma and closing bracket: (a, ). +# `empty-line` allows space-only lines. +no-space-check=trailing-comma,dict-separator + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format=LF + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,FIX-ME,XXX,TODO + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,future.builtins + + +[TYPECHECK] + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + + +[BASIC] + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_,logger + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=yes + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +property-classes=abc.abstractproperty + +# Regular expression matching correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for function names +function-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for variable names +variable-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct constant names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Naming hint for constant names +const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression matching correct attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for attribute names +attr-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for argument names +argument-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct class attribute names +class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Naming hint for class attribute names +class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Regular expression matching correct inline iteration names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Naming hint for inline iteration names +inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ + +# Regular expression matching correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Naming hint for class names +class-name-hint=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression matching correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Naming hint for module names +module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression matching correct method names +method-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for method names +method-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + + +[ELIF] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + + +[SPELLING] + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=5 + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of boolean expressions in a if statement +max-bool-expr=5 + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,TERMIOS,Bastion,rexec,optparse + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=Exception diff --git a/CHANGELOG b/CHANGELOG index 2409f3cb18e3cb457c5a6998578ebc5f254218d9..02048fdcba12b560db50a1b27b87a56b51af2ce6 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,2 +1,4 @@ 0.0.0 - HERMESv3_GR first release \ No newline at end of file + 2018/09/18 + + HERMESv3_GR beta version first release \ No newline at end of file diff --git a/README.md b/README.md index 9146f781f3ad978e2740515083009ec7f749ab24..b393f1f503d737afb63e16c85f268c9cbc7ee09c 100644 --- a/README.md +++ b/README.md @@ -1 +1,2 @@ -# HERMESv3 Global/Regional \ No newline at end of file +# HERMESv3 Global/Regional +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/34fc5d6c803444178034b99dd28c7e3c)](https://www.codacy.com/app/carlestena/hermesv3_gr?utm_source=earth.bsc.es&utm_medium=referral&utm_content=gitlab/es/hermesv3_gr&utm_campaign=Badge_Grade) diff --git a/VERSION b/VERSION deleted file mode 100644 index bd52db81d0cdfa45b8ccaf4e811f178160eb261e..0000000000000000000000000000000000000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.0.0 \ No newline at end of file diff --git a/conf/EI_configuration.csv b/conf/EI_configuration.csv new file mode 100644 index 0000000000000000000000000000000000000000..8dba53247cbb20dd48d3ec18669c50da123ce17f --- /dev/null +++ b/conf/EI_configuration.csv @@ -0,0 +1,98 @@ +ei;sector;ref_year;active;factor_mask;regrid_mask;pollutants;path;frequency;source_type;p_vertical;p_month;p_day;p_hour;p_speciation;comment +GFASv12;;2015;0;;;co,nox_no,pm25,oc,bc,so2,ch3oh,c2h5oh,c3h8,c2h4,c3h6,c5h8,terpenes,hialkenes,hialkanes,ch2o,c2h4o,c3h6o,nh3,c2h6s,c2h6,c7h8,c6h6,c8h10,c4h8,c5h10,c6h12,c8h16,c4h10,c5h12,c6h14,c7h16;/ecmwf/gfas/daily_mean;daily;area;method=sovief,approach=uniform;;;H001;E001; +HTAPv2;energy;2010;1;;;co,nox_no2,pm10,pm25,oc,bc,so2,nh3,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc21,voc22,voc23,voc24;/jrc/htapv2/monthly_mean;monthly;area;V001;;D002;H002;E002; +HTAPv2;industry;2010;1;;;co,nox_no2,pm10,pm25,oc,bc,so2,nh3,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jrc/htapv2/monthly_mean;monthly;area;V002;;D003;H004;E003; +HTAPv2;residential;2010;1;;;co,nox_no2,pm10,pm25,oc,bc,so2,nh3,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc19,voc21,voc22,voc23,voc24;/jrc/htapv2/monthly_mean;monthly;area;;;D003;H003;E004; +HTAPv2;transport;2010;1;;;co,nox_no2,pm10,pm25,oc,bc,so2,nh3,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc21,voc22,voc23;/jrc/htapv2/monthly_mean;monthly;area;;;D005;weekday=H006, saturday=H009, sunday=H010;E005; +HTAPv2;agriculture;2010;1;;;nh3;/jrc/htapv2/monthly_mean;monthly;area;;;D001;H007;E006; +HTAPv2;air_lto;2010;1;;;co,nox_no2,pm10,pm25,oc,bc,so2,voc02,voc03,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc17,voc21,voc22,voc23;/jrc/htapv2/yearly_mean;yearly;area;V003;M001;D001;H001;E007; +HTAPv2;air_cds;2010;1;;;co,nox_no2,pm10,pm25,oc,bc,so2,voc02,voc03,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc17,voc21,voc22,voc23;/jrc/htapv2/yearly_mean;yearly;area;V004;M001;D001;H001;E007; +HTAPv2;air_crs;2010;1;;;co,nox_no2,pm10,pm25,oc,bc,so2,voc02,voc03,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc17,voc21,voc22,voc23;/jrc/htapv2/yearly_mean;yearly;area;V005;M001;D001;H001;E007; +HTAPv2;ships;2010;1;;;co,nox_no2,pm10,pm25,oc,bc,so2,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc21,voc22,voc23,voc24;/jrc/htapv2/yearly_mean;yearly;area;;M001;D001;H001;E008; +wiedinmyer;;2010;1;;;bc,c2h2,c2h4,c3h6,c6h6,ch2o,ch3cooh,ch3oh,co,hcl,nh3,nox_no,oc,pm10,pm25,so2;/ucar/wiedinmyer/yearly_mean;yearly;area;;M001;D001;H008;E009; +TNO_MACC-III;snap1;2011;0;;;co,nox_no2,so2,nh3,pm10,pm25,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc19,voc21,voc22,voc23,voc24;/tno/tno_macc_iii/yearly_mean/;yearly;area;V001;M002;D002;H002;E010; +TNO_MACC-III;snap2;2011;0;;;co,nox_no2,so2,nh3,pm10,pm25,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc19,voc21,voc22,voc23,voc24;/tno/tno_macc_iii/yearly_mean/;yearly;area;;M003;D003;H003;E011; +TNO_MACC-III;snap34;2011;0;;;co,nox_no2,so2,nh3,pm10,pm25,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc19,voc21,voc22,voc23,voc24;/tno/tno_macc_iii/yearly_mean/;yearly;area;V002;M004;D003;H004;E012; +TNO_MACC-III;snap5;2011;0;;;co,nox_no2,so2,nh3,pm10,pm25,voc02,voc03,voc04,voc05,voc06,voc12,voc13,voc14,voc15;/tno/tno_macc_iii/yearly_mean/;yearly;area;;M005;D001;H001;E013; +TNO_MACC-III;snap6;2011;0;;;co,nox_no2,so2,nh3,pm10,pm25,voc01,voc06,voc14,voc15,voc17,voc18,voc19,voc20,voc23;/tno/tno_macc_iii/yearly_mean/;yearly;area;;M006;D004;H005;E014; +TNO_MACC-III;snap71;2011;0;;;co,nox_no2,so2,nh3,pm10,pm25,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc21,voc22,voc23;/tno/tno_macc_iii/yearly_mean/;yearly;area;;M007;D005;weekday=H006, saturday=H009, sunday=H010;E015; +TNO_MACC-III;snap72;2011;0;;;co,nox_no2,so2,nh3,pm10,pm25,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc17,voc21,voc22,voc23;/tno/tno_macc_iii/yearly_mean/;yearly;area;;M007;D005;weekday=H006, saturday=H009, sunday=H010;E016; +TNO_MACC-III;snap73;2011;0;;;co,nox_no2,so2,nh3,pm10,pm25,voc02,voc03,voc07,voc08,voc12,voc17,voc21,voc22;/tno/tno_macc_iii/yearly_mean/;yearly;area;;M007;D005;weekday=H006, saturday=H009, sunday=H010;E017; +TNO_MACC-III;snap74;2011;0;;;voc03,voc04,voc05,voc06,voc12,voc13,voc14,voc15;/tno/tno_macc_iii/yearly_mean/;yearly;area;;M007;D005;weekday=H006, saturday=H009, sunday=H010;E018; +TNO_MACC-III;snap75;2011;0;;;pm10,pm25;/tno/tno_macc_iii/yearly_mean/;yearly;area;;M007;D005;weekday=H006, saturday=H009, sunday=H010;E019; +TNO_MACC-III;snap8;2011;0;;;co,nox_no2,so2,nh3,pm10,pm25,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc21,voc22,voc23,voc24;/tno/tno_macc_iii/yearly_mean/;yearly;area;;M008;D001;H001;E020; +TNO_MACC-III;snap9;2011;0;;;co,nox_no2,so2,nh3,pm10,pm25,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/tno/tno_macc_iii/yearly_mean/;yearly;area;;M001;D001;H001;E021; +TNO_MACC-III;snap10;2011;0;;;co,nox_no2,so2,nh3,pm10,pm25;/tno/tno_macc_iii/yearly_mean/;yearly;area;;M009;D001;H007;E022; +ECLIPSEv5a;agriculture;2010;0;;;pm10,pm25,nh3;/iiasa/eclipsev5a/monthly_mean;monthly;area;;;D001;H007;E023; +ECLIPSEv5a;flaring;2010;0;;;co,nox_no2,nh3,pm10,pm25,oc,bc,so2,nmvoc;/iiasa/eclipsev5a/yearly_mean;yearly;area;V006;M001;D001;H001;E024; +ECLIPSEv5a;energy;2010;0;;;co,nox_no2,nh3,pm10,pm25,oc,bc,so2,nmvoc;/iiasa/eclipsev5a/monthly_mean;monthly;area;V001;;D002;H002;E025; +ECLIPSEv5a;industry;2010;0;;;co,nox_no2,nh3,pm10,pm25,oc,bc,so2,nmvoc;/iiasa/eclipsev5a/monthly_mean;monthly;area;V002;;D003;H004;E026; +ECLIPSEv5a;transport;2010;0;;;co,nox_no2,nh3,pm10,pm25,oc,bc,so2,nmvoc;/iiasa/eclipsev5a/monthly_mean;monthly;area;;;D005;weekday=H006, saturday=H009, sunday=H010;E027; +ECLIPSEv5a;residential;2010;0;;;co,nox_no2,nh3,pm10,pm25,oc,bc,so2,nmvoc;/iiasa/eclipsev5a/monthly_mean;monthly;area;;;D003;H003;E028; +ECLIPSEv5a;waste;2010;0;;;co,nox_no2,nh3,pm10,pm25,oc,bc,so2,nmvoc;/iiasa/eclipsev5a/monthly_mean;monthly;area;;;D001;H001;E029; +ECLIPSEv5.a;solvent;2010;0;;;nmvoc;/iiasa/eclipsev5a/monthly_mean;monthly;area;;M006;D004;H005;E030; +ECLIPSEv5a;agricultural_waste;2010;0;;;co,nox_no2,nh3,pm10,pm25,oc,bc,so2,nmvoc;/iiasa/eclipsev5a/monthly_mean;monthly;area;;;D001;H007;E031; +ECLIPSEv5a;ships;2010;0;;;co,nox_no2,pm10,pm25,oc,bc,so2,nmvoc;/iiasa/eclipsev5a/yearly_mean;yearly;area;;M001;D001;H001;E032; +EDGARv432_AP;ags;2010;0;;;nox_no2,nh3,pm10,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;;;D001;H007;E033; +EDGARv432_AP;awb;2010;0;;;nox_no2,co,so2,nh3,nmvoc,pm10,bc,oc,pm25_bio;/jrc/edgarv432_ap/monthly_mean;monthly;area;;;D001;H007;E034; +EDGARv432_AP;che;2010;0;;;nox_no2,co,so2,nh3,nmvoc,pm10,bc,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;V002;;D003;H004;E035; +EDGARv432_AP;ene;2010;0;;;nox_no2,co,so2,nh3,nmvoc,pm10,bc,oc,pm25_bio,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;V001;;D002;H002;E036; +EDGARv432_AP;foo_pap;2010;0;;;nox_no2,co,so2,nmvoc,pm10,bc,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;V002;;D003;H004;E037; +EDGARv432_AP;fff;2010;0;;;nox_no2,co,so2,nmvoc,pm10,bc,oc,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;V006;;D001;H001;E038; +EDGARv432_AP;ind;2010;0;;;nox_no2,co,so2,nh3,nmvoc,pm10,bc,oc,pm25_bio,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;V002;;D003;H004;E039; +EDGARv432_AP;iro;2010;0;;;nox_no2,co,so2,nmvoc,pm10,bc,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;V002;;D003;H004;E040; +EDGARv432_AP;mnm;2010;0;;;nox_no2,nh3,pm10,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;;;D001;H007;E041; +EDGARv432_AP;neu;2010;0;;;pm10;/jrc/edgarv432_ap/monthly_mean;monthly;area;;;D001;H001;E042; +EDGARv432_AP;nfe;2010;0;;;nox_no2,co,so2,pm10,bc,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;V002;;D003;H004;E043; +EDGARv432_AP;nmm;2010;0;;;co,so2,nh3,nmvoc,pm10,bc,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;V002;;D003;H004;E044; +EDGARv432_AP;pro;2010;0;;;nox_no2,co,so2,nmvoc,pm10,bc,oc,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;;;D001;H001;E045; +EDGARv432_AP;pru_sol;2010;0;;;nh3,nmvoc,pm10,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;;;D004;H005;E046; +EDGARv432_AP;rco;2010;0;;;nox_no2,co,so2,nh3,nmvoc,pm10,bc,oc,pm25_bio,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;;;D003;H003;E047; +EDGARv432_AP;ref_trf;2010;0;;;nox_no2,co,so2,nh3,nmvoc,pm10,bc,oc,pm25_bio,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;V006;;D001;H001;E048; +EDGARv432_AP;swd_inc;2010;0;;;nox_no2,co,so2,nh3,nmvoc,pm10,bc,oc,pm25_bio,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;V006;;D001;H001;E049; +EDGARv432_AP;swd_ldf;2010;0;;;nh3,nmvoc,pm10,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;;;D001;H001;E050; +EDGARv432_AP;tnr_aviation_cds;2010;0;;;nox_no2,co,so2,nmvoc,pm10,bc,oc,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;V004;;D001;H001;E051; +EDGARv432_AP;tnr_aviation_crs;2010;0;;;nox_no2,co,so2,nmvoc,pm10,bc,oc,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;V005;;D001;H001;E052; +EDGARv432_AP;tnr_aviation_lto;2010;0;;;nox_no2,co,so2,nmvoc,pm10,bc,oc,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;V003;;D001;H001;E053; +EDGARv432_AP;tnr_other;2010;0;;;nox_no2,co,so2,nh3,nmvoc,pm10,bc,oc,pm25_bio,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;;;D001;H001;E054; +EDGARv432_AP;tnr_ship;2010;0;;;nox_no2,co,so2,nh3,nmvoc,pm10,bc,oc,pm25_bio,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;;;D001;H001;E055; +EDGARv432_AP;tro;2010;0;;;nox_no2,co,so2,nh3,nmvoc,pm10,bc,oc,pm25_bio,pm25_fossil;/jrc/edgarv432_ap/monthly_mean;monthly;area;;;D005;weekday=H006, saturday=H009, sunday=H010;E056; +EDGARv432_AP;wwt;2010;0;;;nh3,nmvoc;/jrc/edgarv432_ap/monthly_mean;monthly;area;;;D001;H001;E057; +EDGARv432_VOC;awb;2010;0;;;voc2,voc3,voc4,voc5,voc6,voc7,voc8,voc9,voc10,voc11,voc12,voc13,voc14,voc15,voc16,voc17,voc19,voc21,voc22,voc23;/jrc/edgarv432_voc/monthly_mean;monthly;area;;;D001;H007;E058; +EDGARv432_VOC;ene;2010;0;;;voc1,voc2,voc3,voc4,voc5,voc6,voc7,voc8,voc9,voc10,voc11,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jrc/edgarv432_voc/monthly_mean;monthly;area;V001;;D002;H002;E059; +EDGARv432_VOC;fff;2010;0;;;voc2,voc3,voc4,voc5,voc6,voc8,voc10,voc12,voc13,voc14,voc15,voc17,voc18,voc19,voc20;/jrc/edgarv432_voc/monthly_mean;monthly;area;V006;;D001;H001;E060; +EDGARv432_VOC;ind;2010;0;;;voc1,voc2,voc3,voc4,voc5,voc6,voc7,voc8,voc9,voc10,voc11,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jrc/edgarv432_voc/monthly_mean;monthly;area;V002;;D003;H004;E061; +EDGARv432_VOC;ppa;2010;0;;;voc1,voc2,voc3,voc4,voc5,voc6,voc7,voc8,voc9,voc10,voc11,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jrc/edgarv432_voc/monthly_mean;monthly;area;;;D001;H001;E062; +EDGARv432_VOC;pro;2010;0;;;voc2,voc3,voc4,voc5,voc6,voc7,voc8,voc9,voc10,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23;/jrc/edgarv432_voc/monthly_mean;monthly;area;;;D001;H001;E063; +EDGARv432_VOC;rco;2010;0;;;voc1,voc2,voc3,voc4,voc5,voc6,voc7,voc8,voc9,voc10,voc11,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jrc/edgarv432_voc/monthly_mean;monthly;area;;;D003;H003;E064; +EDGARv432_VOC;ref;2010;0;;;voc1,voc2,voc3,voc4,voc5,voc6,voc7,voc8,voc9,voc10,voc12,voc13,voc14,voc15,voc16,voc17,voc23;/jrc/edgarv432_voc/monthly_mean;monthly;area;V006;;D003;H004;E065; +EDGARv432_VOC;swd;2010;0;;;voc1,voc2,voc3,voc4,voc5,voc6,voc7,voc8,voc9,voc10,voc11,voc12,voc13,voc14,voc15,voc17,voc18,voc19,voc21,voc22,voc23,voc24;/jrc/edgarv432_voc/monthly_mean;monthly;area;;;D001;H001;E066; +EDGARv432_VOC;tnr_aviation_cds;2010;0;;;voc1,voc2,voc3,voc5,voc6,voc7,voc8,voc9,voc12,voc13,voc14,voc15,voc16,voc17,voc21,voc22,voc23;/jrc/edgarv432_voc/monthly_mean;monthly;area;V004;;D001;H001;E067; +EDGARv432_VOC;tnr_aviation_crs;2010;0;;;voc1,voc2,voc3,voc5,voc6,voc7,voc8,voc9,voc12,voc13,voc14,voc15,voc16,voc17,voc21,voc22,voc23;/jrc/edgarv432_voc/monthly_mean;monthly;area;V005;;D001;H001;E068; +EDGARv432_VOC;tnr_aviation_lto;2010;0;;;voc1,voc2,voc3,voc5,voc6,voc7,voc8,voc9,voc12,voc13,voc14,voc15,voc16,voc17,voc21,voc22,voc23;/jrc/edgarv432_voc/monthly_mean;monthly;area;V003;;D001;H001;E069; +EDGARv432_VOC;tnr_other;2010;0;;;voc1,voc2,voc3,voc4,voc5,voc6,voc7,voc8,voc9,voc10,voc11,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jrc/edgarv432_voc/monthly_mean;monthly;area;;;D001;H001;E070; +EDGARv432_VOC;tnr_ship;2010;0;;;voc1,voc2,voc3,voc4,voc5,voc6,voc7,voc8,voc9,voc10,voc11,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jrc/edgarv432_voc/monthly_mean;monthly;area;;;D001;H001;E071; +EDGARv432_VOC;trf;2010;0;;;voc1,voc2,voc3,voc4,voc5,voc6,voc7,voc8,voc9,voc10,voc11,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jrc/edgarv432_voc/monthly_mean;monthly;area;;;D001;H001;E072; +EDGARv432_VOC;tro;2010;0;;;voc1,voc2,voc3,voc4,voc5,voc6,voc7,voc8,voc9,voc10,voc11,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jrc/edgarv432_voc/monthly_mean;monthly;area;;;D005;weekday=H006, saturday=H009, sunday=H010;E073; +EMEP;a_publicpower;2015;0;;;co,nox_no2,pm10,pm25,so2,nmvoc,nh3;/ceip/emepv18/yearly_mean;yearly;area;V001;M002;D002;H002;E074; +EMEP;b_industry;2015;0;;;co,nox_no2,pm10,pm25,so2,nmvoc,nh3;/ceip/emepv18/yearly_mean;yearly;area;V002;M004;D003;H004;E075; +EMEP;c_otherstationarycomb;2015;0;;;co,nox_no2,pm10,pm25,so2,nmvoc,nh3;/ceip/emepv18/yearly_mean;yearly;area;;M003;D003;H003;E076; +EMEP;d_fugitive;2015;0;;;co,nox_no2,pm10,pm25,so2,nmvoc,nh3;/ceip/emepv18/yearly_mean;yearly;area;;M005;D001;H001;E077; +EMEP;e_solvents;2015;0;;;co,nox_no2,pm10,pm25,so2,nmvoc,nh3;/ceip/emepv18/yearly_mean;yearly;area;;M006;D004;H005;E078; +EMEP;f_roadtransport;2015;0;;;co,nox_no2,pm10,pm25,so2,nmvoc,nh3;/ceip/emepv18/yearly_mean;yearly;area;;M007;D005;weekday=H006, saturday=H009, sunday=H010;E079; +EMEP;g_shipping;2015;0;;;co,nox_no2,pm10,pm25,so2,nmvoc,nh3;/ceip/emepv18/yearly_mean;yearly;area;;M001;D001;H001;E080; +EMEP;h_aviation;2015;0;;;co,nox_no2,pm10,pm25,so2,nmvoc,nh3;/ceip/emepv18/yearly_mean;yearly;area;V003;M001;D001;H001;E081; +EMEP;i_offroad;2015;0;;;co,nox_no2,pm10,pm25,so2,nmvoc,nh3;/ceip/emepv18/yearly_mean;yearly;area;;M001;D001;H001;E082; +EMEP;j_waste;2015;0;;;co,nox_no2,pm10,pm25,so2,nmvoc,nh3;/ceip/emepv18/yearly_mean;yearly;area;;M001;D001;H001;E083; +EMEP;k_agrilivestock;2015;0;;;co,nox_no2,pm10,pm25,so2,nmvoc,nh3;/ceip/emepv18/yearly_mean;yearly;area;;M009;D001;H007;E084; +EMEP;l_agriother;2015;0;;;co,nox_no2,pm10,pm25,so2,nmvoc,nh3;/ceip/emepv18/yearly_mean;yearly;area;;M009;D001;H007;E085; +carn;;2015;1;;;so2;/mtu/carnetal/yearly_mean;yearly;point;;M001;D001;H001;E086; +CEDS;agriculture;2014;0;;;nox_no2,nh3;/jgcri/ceds/monthly_mean;monthly;area;;;D001;H007;E087; +CEDS;energy;2014;0;;;co,nox_no2,oc,bc,so2,nh3,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jgcri/ceds/monthly_mean;monthly;area;V001;;D002;H002;E088; +CEDS;industrial;2014;0;;;co,nox_no2,oc,bc,so2,nh3,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jgcri/ceds/monthly_mean;monthly;area;V002;;D003;H004;E089; +CEDS;transportation;2014;0;;;co,nox_no2,oc,bc,so2,nh3,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jgcri/ceds/monthly_mean;monthly;area;;;D005;weekday=H006, saturday=H009, sunday=H010;E090; +CEDS;residential;2014;0;;;co,nox_no2,oc,bc,so2,nh3,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jgcri/ceds/monthly_mean;monthly;area;;;D003;H003;E091; +CEDS;solvent;2014;0;;;voc1,voc2,voc3,voc4,voc5,voc6,voc7,voc8,voc9,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jgcri/ceds/monthly_mean;monthly;area;;;D004;H005;E092; +CEDS;waste;2014;0;;;co,nox_no2,oc,bc,so2,nh3,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jgcri/ceds/monthly_mean;monthly;area;;;D001;H001;E093; +CEDS;shipping;2014;0;;;co,nox_no2,oc,bc,so2,nh3,voc01,voc02,voc03,voc04,voc05,voc06,voc07,voc08,voc09,voc12,voc13,voc14,voc15,voc16,voc17,voc18,voc19,voc20,voc21,voc22,voc23,voc24;/jgcri/ceds/monthly_mean;monthly;area;;;D001;H001;E094; +CEDS;air;2014;0;;;co,nox_no2,oc,bc,so2,nh3,nmvoc;/jgcri/ceds/monthly_mean;monthly;area;;;D001;H001;E095; \ No newline at end of file diff --git a/conf/hermes.conf b/conf/hermes.conf new file mode 100644 index 0000000000000000000000000000000000000000..aa0e356c180cf33a17ca0e02407834f19d6e7cc1 --- /dev/null +++ b/conf/hermes.conf @@ -0,0 +1,79 @@ +[GENERAL] +log_level = 3 +input_dir = /home/user/HERMES/HERMES_IN +data_path = /home/user/HERMES/datasets +output_dir = /home/user/HERMES/HERMES_OUT +output_name = HERMESv3_.nc +start_date = 2018/01/01 00:00:00 +# ***** end_date = start_date [DEFAULT] ***** +# end_date = 2018/01/02 00:00:00 +# ***** output_timestep_type = [hourly, daily, monthly, yearly] ***** +output_timestep_type = hourly +output_timestep_num = 24 +output_timestep_freq = 1 + +[DOMAIN] + +# ***** output_model = [MONARCH, CMAQ, WRF_CHEM] ***** +output_model = MONARCH +# output_model = CMAQ +# output_model = WRF_CHEM +output_attributes = /data/global_attributes.csv + +# ***** domain_type=[global, lcc, rotated, mercator] ***** +domain_type = global +# domain_type = lcc +# domain_type = rotated +# domain_type = mercator +vertical_description = /data/profiles/vertical/Benchmark_15layers_vertical_description.csv +auxiliar_files_path = /data/auxiliar_files/_ + +# if domain_type == global: + inc_lat = 1. + inc_lon = 1.40625 + +# if domain_type == rotated: + #centre_lat = 35 + #centre_lon = 20 + #west_boundary = -51 + #south_boundary = -35 + #inc_rlat = 0.1 + #inc_rlon = 0.1 + +# if domain_type == lcc: + #lat_1 = 37 + #lat_2 = 43 + #lon_0 = -3 + #lat_0 = 40 + #nx = 478 + #ny = 398 + #inc_x = 12000 + #inc_y = 12000 + #x_0 = -2131849.000 + #y_0 = -2073137.875 + +# if domain_type == mercator: + #lat_ts = -1.5 + #lon_0 = -18 + #nx = 210 + #ny = 236 + #inc_x = 50000 + #inc_y = 50000 + #x_0 = -126017.5 + #y_0 = -5407460 + + +[EMISSION_INVENTORY_CONFIGURATION] + +cross_table = /conf/EI_configuration.csv + +[EMISSION_INVENTORY_PROFILES] + +p_vertical = /data/profiles/vertical/Vertical_profile.csv +p_month = /data/profiles/temporal/TemporalProfile_Monthly.csv +p_day = /data/profiles/temporal/TemporalProfile_Daily.csv +p_hour = /data/profiles/temporal/TemporalProfile_Hourly.csv +p_speciation = /data/profiles/speciation/Speciation_profile_cb05_aero5_MONARCH.csv + +molecular_weights = /data/profiles/speciation/MolecularWeights.csv +world_info = /data/profiles/temporal/tz_world_country_iso3166.csv diff --git a/data/global_attributes.csv b/data/global_attributes.csv new file mode 100644 index 0000000000000000000000000000000000000000..77cd4a2d321a18b8cc683e3c43763f7d39a07c17 --- /dev/null +++ b/data/global_attributes.csv @@ -0,0 +1,44 @@ +attribute,value +BOTTOM-TOP_GRID_DIMENSION,35 +GRIDTYPE,C +DIFF_OPT,1 +KM_OPT,4 +DAMP_OPT,0 +DAMPCOEF,0.01 +KHDIF,0. +KVDIF,0. +MP_PHYSICS,4 +RA_LW_PHYSICS,1 +RA_SW_PHYSICS,2 +SF_SFCLAY_PHYSICS,1 +SF_SURFACE_PHYSICS,2 +BL_PBL_PHYSICS,1 +CU_PHYSICS,5 +SF_LAKE_PHYSICS,0 +SURFACE_INPUT_SOURCE,1 +SST_UPDATE,0 +GRID_FDDA,0 +GFDDA_INTERVAL_M,0 +GFDDA_END_H,0 +GRID_SFDDA,0 +SGFDDA_INTERVAL_M,0 +SGFDDA_END_H,0 +BOTTOM-TOP_PATCH_START_UNSTAG,1 +BOTTOM-TOP_PATCH_END_UNSTAG,34 +BOTTOM-TOP_PATCH_START_STAG,1 +BOTTOM-TOP_PATCH_END_STAG,35 +GRID_ID,1 +PARENT_ID,0 +I_PARENT_START,1 +J_PARENT_START,1 +PARENT_GRID_RATIO,1 +DT,5. +MMINLU,USGS +NUM_LAND_CAT,24 +ISWATER,16 +ISLAKE,-1 +ISICE,24 +ISURBAN,1 +ISOILWATER,14 +CEN_LAT,-2.840012 +CEN_LON,-79.16 \ No newline at end of file diff --git a/data/profiles/speciation/MolecularWeights.csv b/data/profiles/speciation/MolecularWeights.csv new file mode 100644 index 0000000000000000000000000000000000000000..f005210cc7e5ade22ae2dc05df1b85cfe274fb0a --- /dev/null +++ b/data/profiles/speciation/MolecularWeights.csv @@ -0,0 +1,65 @@ +Specie;MW +nox_no;30.01 +nox_no2;46.01 +co;28.01 +so2;64.06 +nh3;17.03 +pm10;1.0 +pm25;1.0 +pm25_fossil;1.0 +pm25_bio;1.0 +oc;1.0 +bc;1.0 +c2h6s;62.13 +hcl;36.46 +c2h2;26.04 +ch3cooh;60.05 +c2h4o;44.05 +c4h10;58.12 +c2h5oh;46.07 +ch2o;30.03 +c6h14;86.18 +hialkanes;118.89 +c5h8;68.12 +c8h16;112.21 +c5h10;70.13 +c3h6;42.08 +c7h8;92.14 +c8h10;106.17 +c3h6o;58.08 +c6h6;78.11 +c4h8;56.11 +c2h6;30.07 +c2h4;28.05 +c7h16;100.2 +c6h12;84.16 +hialkenes;75.78 +ch3oh;32.04 +c5h12;72.15 +c3h8;44.1 +terpenes;160.0 +voc01;46.2 +voc02;30.07 +voc03;44.1 +voc04;58.12 +voc05;72.15 +voc06;106.8 +voc07;28.05 +voc08;42.08 +voc09;26.04 +voc10;68.12 +voc11;136.24 +voc12;67 +voc13;78.11 +voc14;92.14 +voc15;106.17 +voc16;120.0 +voc17;126.8 +voc18;104.7 +voc19;81.5 +voc20;138.8 +voc21;30.03 +voc22;68.8 +voc23;75.3 +voc24;59.1 +voc25;86.9 diff --git a/data/profiles/speciation/Speciation_profile_cb05_aero5_CMAQ.csv b/data/profiles/speciation/Speciation_profile_cb05_aero5_CMAQ.csv new file mode 100644 index 0000000000000000000000000000000000000000..d667082d115bd9a1d77f07139e6f719751fe3ddb --- /dev/null +++ b/data/profiles/speciation/Speciation_profile_cb05_aero5_CMAQ.csv @@ -0,0 +1,98 @@ +ID;NO;NO2;HONO;CO;SO2;NH3;ALD2;ALDX;BENZENE;ETH;ETHA;ETOH;FORM;IOLE;ISOP;MEOH;OLE;PAR;SESQ;TERP;TOL;XYL;DMS;HCL;SULF;POC;PEC;PNO3;PSO4;PMFINE;PMC +units;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1 +short_description;nitrogen_monoxide;nitrogen_dioxide;nitrous_acid;carbon_monoxide;sulfur_dioxide;ammonia;acetaldehyde;higher_aldehydes;benzene;ethene;ethane;ethanol;formaldehyde;internal_olefin_carbon_bond;isoprene;methanol;terminal_olefin_carbon_bond;paraffin_carbon_bond;sesquiterpenes;terpene;toluene;xylene;dimethyl_sulfide;hydrogen_chloride;sulfuric_acid;primary_organic_carbon;primary_elemental_carbon;primary_nitrate_fine;primary_sulfate_fine;primary_others_fine;pm_coarse +E001;0.72*nox_no;0.18*nox_no;0.1*nox_no;co;so2;nh3;c2h4o;0;c6h6;c2h4;c2h6;c2h5oh;0;0.5*hialkenes;c5h8;ch3oh;c8h16+c5h10+c3h6+c4h8+c6h12+0.5*hialkanes;4*c4h10+6*c6h14+5*hialkanes+6*c8h16+3*c5h10+c3h6+3*c3h6o+2*c4h8+7*c7h16+4*c6h12+hialkenes+5*c5h12+1.5*c3h8;0;terpenes;ch2o+c7h8;c8h10;c2h6s;0;0;oc;5.9*bc;0;0;3.3*pm25-3*oc-5.9*bc;0 +E002;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;(pm25-oc-bc)*0.155;(pm25-oc-bc)*0.845;pm10-pm25 +E003;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;(pm25-oc-bc)*0.089;(pm25-oc-bc)*0.911;pm10-pm25 +E004;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;(pm25-oc-bc)*0.047;(pm25-oc-bc)*0.953;pm10-pm25 +E005;0.823*nox_no2;0.16*nox_no2;0.017*nox_no2;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;(pm25-oc-bc)*0.059;(pm25-oc-bc)*0.941;pm10-pm25 +E006;0;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E007;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+5*voc05+7.5*voc06+voc08+voc09+voc13+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc17;0;0;0;oc;bc;0;0;pm25-oc-bc;pm10-pm25 +E008;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;(pm25-oc-bc)*0.537;(pm25-oc-bc)*0.463;pm10-pm25 +E009;0.9*nox_no;0.1*nox_no;0;co;so2;nh3;0;0;c6h6;c2h4;0;0;0;0;0;ch3oh;c3h6;c2h2+c3h6+ch3cooh;0;0;ch2o;0;0;hcl;0;oc;bc;0;0;pm25-oc-bc;pm10-pm25 +E010;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0.02*pm25;0.01*pm25;0;0.15*pm25;0.82*pm25;pm10-pm25 +E011;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0.35*pm25;0.18*pm25;0;0.02*pm25;0.45*pm25;pm10-pm25 +E012;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0.03*pm25;0.01*pm25;0;0.1*pm25;0.86*pm25;pm10-pm25 +E013;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;voc13;0;voc02;0;0;0.666*voc12;0;0;0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc13;0;0;voc14;voc15;0;0;0;0;0;0;0;1*pm25;pm10-pm25 +E014;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0.01*voc18+0.3*voc19;0;0;0;0.5*voc01;0;0;0;0.5*voc01;0;7.5*voc06+2.2*voc17+4.11*voc18+4*voc19+4*voc23;0;0;voc14+0.2*voc17;voc15+voc17;0;0;0;0;0;0;0;0;0 +E015;0.95*nox_no2;0.042*nox_no2;0.008*nox_no2;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0.58*pm25;0.21*pm25;0;0.01*pm25;0.21*pm25;pm10-pm25 +E016;0.7*nox_no2;0.283*nox_no2;0.017*nox_no2;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc17;0;0;0;0.288*pm25;0.675*pm25;0;0.01*pm25;0.037*pm25;pm10-pm25 +E017;0.95*nox_no2;0.042*nox_no2;0.008*nox_no2;co;so2;nh3;0.625*voc22;0.375*voc22;0;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+voc08+2.2*voc17+1.875*voc22;0;0;0.2*voc17;voc17;0;0;0;0.58*pm25;0.21*pm25;0;0.01*pm25;0.21*pm25;pm10-pm25 +E018;0;0;0;0;0;0;0;0;voc13;0;0;0;0;0.666*voc12;0;0;0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc13;0;0;voc14;voc15;0;0;0;0;0;0;0;0;0 +E019;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0.2*pm25;0.06*pm25;0;0;0.74*pm25;pm10-pm25 +E020;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0.31*pm25;0.41*pm25;0;0.03*pm25;0.25*pm25;pm10-pm25 +E021;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0.31*pm25;0.2*pm25;0;0;0.49*pm25;pm10-pm25 +E022;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0.48*pm25;0.15*pm25;0;0;0.37*pm25;pm10-pm25 +E023;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25;pm10-pm25 +E024;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;0;pm25-oc-bc;pm10-pm25 +E025;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000008*nmvoc;0;0;0.000858*nmvoc;0.004177*nmvoc;0;0.018548*nmvoc;0;0;0;0.00104*nmvoc;0.011594*nmvoc;0;0;0;0.000893*nmvoc;0;0;0;oc;bc;0;(pm25-oc-bc)*0.155;(pm25-oc-bc)*0.845;pm10-pm25 +E026;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000013*nmvoc;0;0;0.003224*nmvoc;0.001662*nmvoc;0.000659*nmvoc;0.008038*nmvoc;0;0;0.000175*nmvoc;0.000496*nmvoc;0.025751*nmvoc;0;0;0;0.001726*nmvoc;0;0;0;oc;bc;0;(pm25-oc-bc)*0.089;(pm25-oc-bc)*0.911;pm10-pm25 +E027;0.823*nox_no2;0.16*nox_no2;0.017*nox_no2;co;so2;nh3;0.000592*nmvoc;0;0;0.002423*nmvoc;0.001607*nmvoc;0;0.000899*nmvoc;0;0;0;0.002589*nmvoc;0.028079*nmvoc;0;0;0;0.003302*nmvoc;0;0;0;oc;bc;0;(pm25-oc-bc)*0.059;(pm25-oc-bc)*0.941;pm10-pm25 +E028;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000047*nmvoc;0;0;0.002914*nmvoc;0.004187*nmvoc;0.000849*nmvoc;0.00186*nmvoc;0;0;0;0.002559*nmvoc;0.029992*nmvoc;0;0;0;0.001755*nmvoc;0;0;0;oc;bc;0;(pm25-oc-bc)*0.047;(pm25-oc-bc)*0.953;pm10-pm25 +E029;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0.001867*nmvoc;0.008553*nmvoc;0.000529*nmvoc;0.005349*nmvoc;0;0.000169*nmvoc;0.000133*nmvoc;0.000435*nmvoc;0.025797*nmvoc;0;0;0;0.000742*nmvoc;0;0;0;oc;bc;0;0;pm25-oc-bc;pm10-pm25 +E030;0;0;0;0;0;0;0;0;0;0.000087*nmvoc;0.000148*nmvoc;0.003563*nmvoc;0.000004*nmvoc;0;0.000001*nmvoc;0.001904*nmvoc;0.000161*nmvoc;0.036006*nmvoc;0;0;0;0.001686*nmvoc;0;0;0;0;0;0;0;0;0 +E031;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.002093*nmvoc;0.000525*nmvoc;0.000105*nmvoc;0.002929*nmvoc;0.001894*nmvoc;0.000023*nmvoc;0.002826*nmvoc;0.000116*nmvoc;0.000043*nmvoc;0.003664*nmvoc;0.003078*nmvoc;0.010261*nmvoc;0;0.000006*nmvoc;0.00013*nmvoc;0.000006*nmvoc;0;0;0;oc;bc;0;0;pm25-oc-bc;pm10-pm25 +E032;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0.002496*nmvoc;0.004456*nmvoc;0;0;0.001693*nmvoc;0;0;0;0.001111*nmvoc;0.030672*nmvoc;0;0;0.001132*nmvoc;0.000762*nmvoc;0;0;0;oc;bc;0;(pm25-oc-bc)*0.537;(pm25-oc-bc)*0.463;pm10-pm25 +E033;nox_no2;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25_fossil;pm10-pm25_fossil +E034;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;0;pm25_bio;pm10-pm25_bio +E035;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;bc;0;0;pm25_fossil-bc;pm10-pm25_fossil +E036;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;(pm25_fossil+pm25_bio-oc-bc)*0.155;(pm25_fossil+pm25_bio-oc-bc)*0.845;pm10-pm25_fossil-pm25_bio +E037;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;bc;0;0;pm25_fossil-bc;pm10-pm25_fossil +E038;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;0;pm25_fossil;pm10-pm25_fossil +E039;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;(pm25_fossil+pm25_bio-oc-bc)*0.089;(pm25_fossil+pm25_bio-oc-bc)*0.911;pm10-pm25_fossil-pm25_bio +E040;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;bc;0;0;pm25_fossil-bc;pm10-pm25_fossil +E041;0.9*nox_no2;0.1*nox_no2;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25_fossil;pm10-pm25_fossil +E042;0.9*nox_no2;0.1*nox_no2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10 +E043;0;0;0;0;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;bc;0;0;pm25_fossil-bc;pm10-pm25_fossil +E044;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;bc;0;0;pm25_fossil-bc;pm10-pm25_fossil +E045;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;0;pm25_fossil-oc-bc;pm10-pm25_fossil +E046;0;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25_fossil;pm10-pm25_fossil +E047;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;(pm25_bio+pm25_fossil-oc-bc)*0.047;(pm25_bio+pm25_fossil-oc-bc)*0.953;pm10-pm25_fossil-pm25_bio +E048;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;(pm25_bio+pm25_fossil-oc-bc)*0.155;(pm25_bio+pm25_fossil-oc-bc)*0.845;pm10-pm25_fossil-pm25_bio +E049;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;0;pm25_fossil+pm25_bio;pm10-pm25_fossil-pm25_bio +E050;0;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25_fossil;pm10-pm25_fossil +E051;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;0;pm25_fossil-oc-bc;pm10-pm25_fossil +E052;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;0;pm25_fossil-oc-bc;pm10-pm25_fossil +E053;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;0;pm25_fossil-oc-bc;pm10-pm25_fossil +E054;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;(pm25_bio+pm25_fossil-oc-bc)*0.059;(pm25_bio+pm25_fossil-oc-bc)*0.941;pm10-pm25_bio-pm25_fossil +E055;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;(pm25_bio+pm25_fossil-oc-bc)*0.537;(pm25_bio+pm25_fossil-oc-bc)*0.463;pm10-pm25_bio-pm25_fossil +E056;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;(pm25_bio+pm25_fossil-oc-bc)*0.059;(pm25_bio+pm25_fossil-oc-bc)*0.941;pm10-pm25_bio-pm25_fossil +E057;0;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E058;0;0;0;0;0;0;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;voc10;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E059;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E060;0;0;0;0;0;0;0;0.01*voc18+0.3*voc19;voc13;voc07;voc02;0;0;0.666*voc12;voc10;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc13+2.2*voc17+4.11*voc18+4*voc19;0;0;voc14+0.2*voc17;voc15+voc17;0;0;0;0;0;0;0;0;0 +E061;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E062;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E063;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;voc10;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E064;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E065;0;0;0;0;0;0;0;0;voc13;voc07;voc02;0.5*voc01;0;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E066;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E067;0;0;0;0;0;0;0.625*voc22;0.666*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E068;0;0;0;0;0;0;0.625*voc22;0.666*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E069;0;0;0;0;0;0;0.625*voc22;0.666*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E070;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E071;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E072;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E073;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0;0 +E074;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000008*nmvoc;0;0;0.000858*nmvoc;0.004177*nmvoc;0;0.018548*nmvoc;0;0;0;0.00104*nmvoc;0.011594*nmvoc;0;0;0;0.000893*nmvoc;0;0;0;0.02*pm25;0.01*pm25;0;0.15*pm25;0.82*pm25;pm10-pm25 +E075;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000013*nmvoc;0;0;0.003224*nmvoc;0.001662*nmvoc;0.000659*nmvoc;0.008038*nmvoc;0;0;0.000175*nmvoc;0.000496*nmvoc;0.025751*nmvoc;0;0;0;0.001726*nmvoc;0;0;0;0.03*pm25;0.01*pm25;0;0.1*pm25;0.86*pm25;pm10-pm25 +E076;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000047*nmvoc;0;0;0.002914*nmvoc;0.004187*nmvoc;0.000849*nmvoc;0.00186*nmvoc;0;0;0;0.002559*nmvoc;0.029992*nmvoc;0;0;0;0.001755*nmvoc;0;0;0;0.35*pm25;0.18*pm25;0;0.02*pm25;0.45*pm25;pm10-pm25 +E077;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0.000006*nmvoc;0.005934*nmvoc;0;0.000026*nmvoc;0;0.000001*nmvoc;0;0.000373*nmvoc;0.055357*nmvoc;0;0;0;0.000048*nmvoc;0;0;0;0;0;0;0;1*pm25;pm10-pm25 +E078;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0.000087*nmvoc;0.000148*nmvoc;0.003563*nmvoc;0.000004*nmvoc;0;0.000001*nmvoc;0.001904*nmvoc;0.000161*nmvoc;0.036006*nmvoc;0;0;0;0.001686*nmvoc;0;0;0;0;0;0;0;1*pm25;pm10-pm25 +E079;0.7*nox_no2;0.283*nox_no2;0.017*nox_no2;co;so2;nh3;0.000592*nmvoc;0;0;0.002423*nmvoc;0.001607*nmvoc;0;0.000899*nmvoc;0;0;0;0.002589*nmvoc;0.028079*nmvoc;0;0;0;0.003302*nmvoc;0;0;0;0.32*pm25;0.49*pm25;0;0.01*pm25;0.18*pm25;pm10-pm25 +E080;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0.002496*nmvoc;0.004456*nmvoc;0;0;0.001693*nmvoc;0;0;0;0.001111*nmvoc;0.030672*nmvoc;0;0;0.001132*nmvoc;0.000762*nmvoc;0;0;0;0.12*pm25;0.005*pm25;0;0.40*pm25;0.475*pm25;pm10-pm25 +E081;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.001084*nmvoc;0.000756*nmvoc;0.000248*nmvoc;0.00622*nmvoc;0.000293*nmvoc;0;0.005437*nmvoc;0.000086*nmvoc;0;0;0.003296*nmvoc;0.011816*nmvoc;0;0;0.000192*nmvoc;0.000124*nmvoc;0;0;0;0.62*pm25;0.16*pm25;0;0.15*pm25;0.07*pm25;pm10-pm25 +E082;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000227*nmvoc;0.000311*nmvoc;0.000333*nmvoc;0.003921*nmvoc;0.000166*nmvoc;0;0.00206*nmvoc;0.00079*nmvoc;0;0;0.00111*nmvoc;0.039123*nmvoc;0;0;0.000245*nmvoc;0.000476*nmvoc;0;0;0;0.31*pm25;0.41*pm25;0;0.03*pm25;0.25*pm25;pm10-pm25 +E083;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0.001867*nmvoc;0.008553*nmvoc;0.000529*nmvoc;0.005349*nmvoc;0;0.000169*nmvoc;0.000133*nmvoc;0.000435*nmvoc;0.025797*nmvoc;0;0;0;0.000742*nmvoc;0;0;0;0.31*pm25;0.2*pm25;0;0;0.49*pm25;pm10-pm25 +E084;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.001591*nmvoc;0.000103*nmvoc;0.000038*nmvoc;0;0;0.000022*nmvoc;0;0;0;0;0.000023*nmvoc;0.03941*nmvoc;0;0.000007*nmvoc;0.000237*nmvoc;0;0;0;0;0.318*pm25;0.0516*pm25;0;0.0446*pm25;0.5858*pm25;pm10-pm25 +E085;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.002093*nmvoc;0.000525*nmvoc;0.000105*nmvoc;0.002929*nmvoc;0.001894*nmvoc;0.000023*nmvoc;0.002826*nmvoc;0.000116*nmvoc;0.000043*nmvoc;0.003664*nmvoc;0.003078*nmvoc;0.010261*nmvoc;0;0.000006*nmvoc;0.00013*nmvoc;0.000006*nmvoc;0;0;0;0.48*pm25;0.15*pm25;0;0;0.37*pm25;pm10-pm25 +E086;0;0;0;0;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E087;nox_no2;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;0;0;0 +E088;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;0;0;0 +E089;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;0;0;0 +E090;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;0;0;0 +E091;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;0;0;0 +E092;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;0;0;0 +E093;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;0;0;0 +E094;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;0;0;0 +E095;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.001084*nmvoc;0.000756*nmvoc;0.000248*nmvoc;0.00622*nmvoc;0.000293*nmvoc;0;0.005437*nmvoc;0.000086*nmvoc;0;0;0.003296*nmvoc;0.011816*nmvoc;0;0;0.000192*nmvoc;0.000124*nmvoc;0;0;0;oc;bc;0;0;0;0 diff --git a/data/profiles/speciation/Speciation_profile_cb05_aero5_MONARCH.csv b/data/profiles/speciation/Speciation_profile_cb05_aero5_MONARCH.csv new file mode 100644 index 0000000000000000000000000000000000000000..dcee133336ebaae5449ed68043e2fb664f699dce --- /dev/null +++ b/data/profiles/speciation/Speciation_profile_cb05_aero5_MONARCH.csv @@ -0,0 +1,98 @@ +ID;NO;NO2;HONO;CO;SO2;NH3;ALD2;ALDX;BENZENE;ETH;ETHA;ETOH;FORM;IOLE;ISOP;MEOH;OLE;PAR;SESQ;TERP;TOL;XYL;DMS;HCL;POA;PEC;PNO3;PSO4;PMFINE;PMC +units;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;mol.s-1.m-2;kg.s-1.m-2;kg.s-1.m-2;kg.s-1.m-2;kg.s-1.m-2;kg.s-1.m-2;kg.s-1.m-2 +short_description;nitrogen_monoxide;nitrogen_dioxide;nitrous_acid;carbon_monoxide;sulfur_dioxide;ammonia;acetaldehyde;higher_aldehydes;benzene;ethene;ethane;ethanol;formaldehyde;internal_olefin_carbon_bond;isoprene;methanol;terminal_olefin_carbon_bond;paraffin_carbon_bond;sesquiterpenes;terpene;toluene;xylene;dimethyl_sulfide;hydrogen_chloride;primary_organic_carbon;primary_elemental_carbon;primary_nitrate_fine;primary_sulfate_fine;primary_others_fine;pm_coarse +E001;0.72*nox_no;0.18*nox_no;0.1*nox_no;co;so2;nh3;c2h4o;0;c6h6;c2h4;c2h6;c2h5oh;0;0.5*hialkenes;c5h8;ch3oh;c8h16+c5h10+c3h6+c4h8+c6h12+0.5*hialkanes;4*c4h10+6*c6h14+5*hialkanes+6*c8h16+3*c5h10+c3h6+3*c3h6o+2*c4h8+7*c7h16+4*c6h12+hialkenes+5*c5h12+1.5*c3h8;0;terpenes;ch2o+c7h8;c8h10;c2h6s;0;oc;bc;0;0;3.3*pm25-3*oc-5.9*bc;0 +E002;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;1.3*oc;bc;0;(pm25-oc-bc)*0.155;(pm25-oc-bc)*0.845;pm10-pm25 +E003;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;1.3*oc;bc;0;(pm25-oc-bc)*0.089;(pm25-oc-bc)*0.911;pm10-pm25 +E004;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;1.8*oc;bc;0;(pm25-oc-bc)*0.047;(pm25-oc-bc)*0.953;pm10-pm25 +E005;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;1.3*oc;bc;0;(pm25-oc-bc)*0.059;(pm25-oc-bc)*0.941;pm10-pm25 +E006;0;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E007;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+5*voc05+7.5*voc06+voc08+voc09+voc13+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc17;0;0;1.3*oc;bc;0;0;pm25-oc-bc;pm10-pm25 +E008;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;1.3*oc;bc;0;(pm25-oc-bc)*0.537;(pm25-oc-bc)*0.463;pm10-pm25 +E009;0.9*nox_no;0.1*nox_no;0;co;so2;nh3;0;0;c6h6;c2h4;0;0;0;0;0;ch3oh;c3h6;c2h2+c3h6+ch3cooh;0;0;ch2o;0;0;hcl;1.44*oc;bc;0;0;pm25-oc-bc;pm10-pm25 +E010;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0.02*1.3*pm25;0.01*pm25;0;0.15*pm25;0.82*pm25;pm10-pm25 +E011;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0.35*1.8*pm25;0.18*pm25;0;0.02*pm25;0.45*pm25;pm10-pm25 +E012;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0.03*1.3*pm25;0.01*pm25;0;0.1*pm25;0.86*pm25;pm10-pm25 +E013;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;voc13;0;voc02;0;0;0.666*voc12;0;0;0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc13;0;0;voc14;voc15;0;0;0;0;0;0;1*pm25;pm10-pm25 +E014;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0.01*voc18+0.3*voc19;0;0;0;0.5*voc01;0;0;0;0.5*voc01;0;7.5*voc06+2.2*voc17+4.11*voc18+4*voc19+4*voc23;0;0;voc14+0.2*voc17;voc15+voc17;0;0;0;0;0;0;0;0 +E015;0.95*nox_no2;0.042*nox_no2;0.008*nox_no2;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0.58*1.3*pm25;0.21*pm25;0;0.01*pm25;0.21*pm25;pm10-pm25 +E016;0.7*nox_no2;0.283*nox_no2;0.017*nox_no2;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc17;0;0;0.288*1.3*pm25;0.675*pm25;0;0.01*pm25;0.037*pm25;pm10-pm25 +E017;0.95*nox_no2;0.042*nox_no2;0.008*nox_no2;co;so2;nh3;0.625*voc22;0.375*voc22;0;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+voc08+2.2*voc17+1.875*voc22;0;0;0.2*voc17;voc17;0;0;0.58*1.3*pm25;0.21*pm25;0;0.01*pm25;0.21*pm25;pm10-pm25 +E018;0;0;0;0;0;0;0;0;voc13;0;0;0;0;0.666*voc12;0;0;0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc13;0;0;voc14;voc15;0;0;0;0;0;0;0;0 +E019;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0.2*1.3*pm25;0.06*pm25;0;0;0.74*pm25;pm10-pm25 +E020;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0.31*1.3*pm25;0.41*pm25;0;0.03*pm25;0.25*pm25;pm10-pm25 +E021;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0.31*1.3*pm25;0.2*pm25;0;0;0.49*pm25;pm10-pm25 +E022;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0.48*1.8*pm25;0.15*pm25;0;0;0.37*pm25;pm10-pm25 +E023;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25;pm10-pm25 +E024;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;0;pm25-oc-bc;pm10-pm25 +E025;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000008*nmvoc;0;0;0.000858*nmvoc;0.004177*nmvoc;0;0.018548*nmvoc;0;0;0;0.00104*nmvoc;0.011594*nmvoc;0;0;0;0.000893*nmvoc;0;0;1.3*oc;bc;0;(pm25-oc-bc)*0.155;(pm25-oc-bc)*0.845;pm10-pm25 +E026;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000013*nmvoc;0;0;0.003224*nmvoc;0.001662*nmvoc;0.000659*nmvoc;0.008038*nmvoc;0;0;0.000175*nmvoc;0.000496*nmvoc;0.025751*nmvoc;0;0;0;0.001726*nmvoc;0;0;1.3*oc;bc;0;(pm25-oc-bc)*0.089;(pm25-oc-bc)*0.911;pm10-pm25 +E027;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000592*nmvoc;0;0;0.002423*nmvoc;0.001607*nmvoc;0;0.000899*nmvoc;0;0;0;0.002589*nmvoc;0.028079*nmvoc;0;0;0;0.003302*nmvoc;0;0;1.3*oc;bc;0;(pm25-oc-bc)*0.059;(pm25-oc-bc)*0.941;pm10-pm25 +E028;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000047*nmvoc;0;0;0.002914*nmvoc;0.004187*nmvoc;0.000849*nmvoc;0.00186*nmvoc;0;0;0;0.002559*nmvoc;0.029992*nmvoc;0;0;0;0.001755*nmvoc;0;0;1.8*oc;bc;0;(pm25-oc-bc)*0.047;(pm25-oc-bc)*0.953;pm10-pm25 +E029;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0.001867*nmvoc;0.008553*nmvoc;0.000529*nmvoc;0.005349*nmvoc;0;0.000169*nmvoc;0.000133*nmvoc;0.000435*nmvoc;0.025797*nmvoc;0;0;0;0.000742*nmvoc;0;0;1.3*oc;bc;0;0;pm25-oc-bc;pm10-pm25 +E030;0;0;0;0;0;0;0;0;0;0.000087*nmvoc;0.000148*nmvoc;0.003563*nmvoc;0.000004*nmvoc;0;0.000001*nmvoc;0.001904*nmvoc;0.000161*nmvoc;0.036006*nmvoc;0;0;0;0.001686*nmvoc;0;0;0;0;0;0;0;0 +E031;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.002093*nmvoc;0.000525*nmvoc;0.000105*nmvoc;0.002929*nmvoc;0.001894*nmvoc;0.000023*nmvoc;0.002826*nmvoc;0.000116*nmvoc;0.000043*nmvoc;0.003664*nmvoc;0.003078*nmvoc;0.010261*nmvoc;0;0.000006*nmvoc;0.00013*nmvoc;0.000006*nmvoc;0;0;1.3*oc;bc;0;0;pm25-oc-bc;pm10-pm25 +E032;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0.002496*nmvoc;0.004456*nmvoc;0;0;0.001693*nmvoc;0;0;0;0.001111*nmvoc;0.030672*nmvoc;0;0;0.001132*nmvoc;0.000762*nmvoc;0;0;1.3*oc;bc;0;(pm25-oc-bc)*0.537;(pm25-oc-bc)*0.463;pm10-pm25 +E033;nox_no2;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25_fossil;pm10-pm25_fossil +E034;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;0;pm25_bio;pm10-pm25_bio +E035;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;bc;0;0;pm25_fossil-bc;pm10-pm25_fossil +E036;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;(pm25_fossil+pm25_bio-oc-bc)*0.155;(pm25_fossil+pm25_bio-oc-bc)*0.845;pm10-pm25_fossil-pm25_bio +E037;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;bc;0;0;pm25_fossil-bc;pm10-pm25_fossil +E038;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;0;pm25_fossil;pm10-pm25_fossil +E039;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;(pm25_fossil+pm25_bio-oc-bc)*0.089;(pm25_fossil+pm25_bio-oc-bc)*0.911;pm10-pm25_fossil-pm25_bio +E040;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;bc;0;0;pm25_fossil-bc;pm10-pm25_fossil +E041;0.9*nox_no2;0.1*nox_no2;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25_fossil;pm10-pm25_fossil +E042;0.9*nox_no2;0.1*nox_no2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10 +E043;0;0;0;0;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;bc;0;0;pm25_fossil-bc;pm10-pm25_fossil +E044;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;bc;0;0;pm25_fossil-bc;pm10-pm25_fossil +E045;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;0;pm25_fossil-oc-bc;pm10-pm25_fossil +E046;0;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25_fossil;pm10-pm25_fossil +E047;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.8*oc;bc;0;(pm25_bio+pm25_fossil-oc-bc)*0.047;(pm25_bio+pm25_fossil-oc-bc)*0.953;pm10-pm25_fossil-pm25_bio +E048;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;(pm25_bio+pm25_fossil-oc-bc)*0.155;(pm25_bio+pm25_fossil-oc-bc)*0.845;pm10-pm25_fossil-pm25_bio +E049;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;0;pm25_fossil+pm25_bio;pm10-pm25_fossil-pm25_bio +E050;0;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25_fossil;pm10-pm25_fossil +E051;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;0;pm25_fossil-oc-bc;pm10-pm25_fossil +E052;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;0;pm25_fossil-oc-bc;pm10-pm25_fossil +E053;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;0;pm25_fossil-oc-bc;pm10-pm25_fossil +E054;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;(pm25_bio+pm25_fossil-oc-bc)*0.059;(pm25_bio+pm25_fossil-oc-bc)*0.941;pm10-pm25_bio-pm25_fossil +E055;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;(pm25_bio+pm25_fossil-oc-bc)*0.537;(pm25_bio+pm25_fossil-oc-bc)*0.463;pm10-pm25_bio-pm25_fossil +E056;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;(pm25_bio+pm25_fossil-oc-bc)*0.059;(pm25_bio+pm25_fossil-oc-bc)*0.941;pm10-pm25_bio-pm25_fossil +E057;0;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E058;0;0;0;0;0;0;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;voc10;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E059;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E060;0;0;0;0;0;0;0;0.01*voc18+0.3*voc19;voc13;voc07;voc02;0;0;0.666*voc12;voc10;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc13+2.2*voc17+4.11*voc18+4*voc19;0;0;voc14+0.2*voc17;voc15+voc17;0;0;0;0;0;0;0;0 +E061;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E062;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E063;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;voc10;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E064;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E065;0;0;0;0;0;0;0;0;voc13;voc07;voc02;0.5*voc01;0;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E066;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E067;0;0;0;0;0;0;0.625*voc22;0.666*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E068;0;0;0;0;0;0;0.625*voc22;0.666*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E069;0;0;0;0;0;0;0.625*voc22;0.666*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E070;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E071;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E072;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E073;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;voc11;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0;0;0;0;0 +E074;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000008*nmvoc;0;0;0.000858*nmvoc;0.004177*nmvoc;0;0.018548*nmvoc;0;0;0;0.00104*nmvoc;0.011594*nmvoc;0;0;0;0.000893*nmvoc;0;0;0.02*1.3*pm25;0.01*pm25;0;0.15*pm25;0.82*pm25;pm10-pm25 +E075;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000013*nmvoc;0;0;0.003224*nmvoc;0.001662*nmvoc;0.000659*nmvoc;0.008038*nmvoc;0;0;0.000175*nmvoc;0.000496*nmvoc;0.025751*nmvoc;0;0;0;0.001726*nmvoc;0;0;0.03*1.3*pm25;0.01*pm25;0;0.1*pm25;0.86*pm25;pm10-pm25 +E076;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000047*nmvoc;0;0;0.002914*nmvoc;0.004187*nmvoc;0.000849*nmvoc;0.00186*nmvoc;0;0;0;0.002559*nmvoc;0.029992*nmvoc;0;0;0;0.001755*nmvoc;0;0;0.35*1.8*pm25;0.18*pm25;0;0.02*pm25;0.45*pm25;pm10-pm25 +E077;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0.000006*nmvoc;0.005934*nmvoc;0;0.000026*nmvoc;0;0.000001*nmvoc;0;0.000373*nmvoc;0.055357*nmvoc;0;0;0;0.000048*nmvoc;0;0;0;0;0;0;1*pm25;pm10-pm25 +E078;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0.000087*nmvoc;0.000148*nmvoc;0.003563*nmvoc;0.000004*nmvoc;0;0.000001*nmvoc;0.001904*nmvoc;0.000161*nmvoc;0.036006*nmvoc;0;0;0;0.001686*nmvoc;0;0;0;0;0;0;1*pm25;pm10-pm25 +E079;0.7*nox_no2;0.283*nox_no2;0.017*nox_no2;co;so2;nh3;0.000592*nmvoc;0;0;0.002423*nmvoc;0.001607*nmvoc;0;0.000899*nmvoc;0;0;0;0.002589*nmvoc;0.028079*nmvoc;0;0;0;0.003302*nmvoc;0;0;0.32*1.3*pm25;0.49*pm25;0;0.01*pm25;0.18*pm25;pm10-pm25 +E080;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0.002496*nmvoc;0.004456*nmvoc;0;0;0.001693*nmvoc;0;0;0;0.001111*nmvoc;0.030672*nmvoc;0;0;0.001132*nmvoc;0.000762*nmvoc;0;0;0.12*1.3*pm25;0.005*pm25;0;0.40*pm25;0.475*pm25;pm10-pm25 +E081;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.001084*nmvoc;0.000756*nmvoc;0.000248*nmvoc;0.00622*nmvoc;0.000293*nmvoc;0;0.005437*nmvoc;0.000086*nmvoc;0;0;0.003296*nmvoc;0.011816*nmvoc;0;0;0.000192*nmvoc;0.000124*nmvoc;0;0;0.62*1.3*pm25;0.16*pm25;0;0.15*pm25;0.07*pm25;pm10-pm25 +E082;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.000227*nmvoc;0.000311*nmvoc;0.000333*nmvoc;0.003921*nmvoc;0.000166*nmvoc;0;0.00206*nmvoc;0.00079*nmvoc;0;0;0.00111*nmvoc;0.039123*nmvoc;0;0;0.000245*nmvoc;0.000476*nmvoc;0;0;0.31*1.3*pm25;0.41*pm25;0;0.03*pm25;0.25*pm25;pm10-pm25 +E083;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0.001867*nmvoc;0.008553*nmvoc;0.000529*nmvoc;0.005349*nmvoc;0;0.000169*nmvoc;0.000133*nmvoc;0.000435*nmvoc;0.025797*nmvoc;0;0;0;0.000742*nmvoc;0;0;0.31*1.3*pm25;0.2*pm25;0;0;0.49*pm25;pm10-pm25 +E084;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.001591*nmvoc;0.000103*nmvoc;0.000038*nmvoc;0;0;0.000022*nmvoc;0;0;0;0;0.000023*nmvoc;0.03941*nmvoc;0;0.000007*nmvoc;0.000237*nmvoc;0;0;0;0.318*1.3*pm25;0.0516*pm25;0;0.0446*pm25;0.5858*pm25;pm10-pm25 +E085;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.002093*nmvoc;0.000525*nmvoc;0.000105*nmvoc;0.002929*nmvoc;0.001894*nmvoc;0.000023*nmvoc;0.002826*nmvoc;0.000116*nmvoc;0.000043*nmvoc;0.003664*nmvoc;0.003078*nmvoc;0.010261*nmvoc;0;0.000006*nmvoc;0.00013*nmvoc;0.000006*nmvoc;0;0;0.48*1.8*pm25;0.15*pm25;0;0;0.37*pm25;pm10-pm25 +E086;0;0;0;0;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E087;nox_no2;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1.3*oc;bc;0;0;0;0 +E088;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;1.3*oc;bc;0;0;0;0 +E089;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;1.3*oc;bc;0;0;0;0 +E090;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;1.3*oc;bc;0;0;0;0 +E091;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;1.8*oc;bc;0;0;0;0 +E092;0;0;0;0;0;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;1.3*oc;bc;0;0;0;0 +E093;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;1.3*oc;bc;0;0;0;0 +E094;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;voc10;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;1.3*oc;bc;0;0;0;0 +E095;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.001084*nmvoc;0.000756*nmvoc;0.000248*nmvoc;0.00622*nmvoc;0.000293*nmvoc;0;0.005437*nmvoc;0.000086*nmvoc;0;0;0.003296*nmvoc;0.011816*nmvoc;0;0;0.000192*nmvoc;0.000124*nmvoc;0;0;1.3*oc;bc;0;0;0;0 diff --git a/data/profiles/speciation/Speciation_profile_cb05_aero6_CMAQ.csv b/data/profiles/speciation/Speciation_profile_cb05_aero6_CMAQ.csv new file mode 100644 index 0000000000000000000000000000000000000000..15467354fd0fb6a99c84a680da24971aa5f0b692 --- /dev/null +++ b/data/profiles/speciation/Speciation_profile_cb05_aero6_CMAQ.csv @@ -0,0 +1,28 @@ +ID;NO;NO2;HONO;CO;SO2;NH3;ALD2;ALDX;BENZENE;ETH;ETHA;ETOH;FORM;IOLE;ISOP;MEOH;OLE;PAR;SESQ;TERP;TOL;XYL;DMS;HCL;SULF;POC;PEC;PNO3;PSO4;PH2O;PCL;PNCOM;PCA;PSI;PMG;PMN;PNA;PNH4;PAL;PFE;PTI;PK;PMOTHR;PMC +units;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1 +short_description;nitrogen_monoxide;nitrogen_dioxide;nitrous_acid;carbon_monoxide;sulfur_dioxide;ammonia;acetaldehyde;higher_aldehydes;benzene;ethene;ethane;ethanol;formaldehyde;internal_olefin_carbon_bond;isoprene;methanol;terminal_olefin_carbon_bond;paraffin_carbon_bond;sesquiterpenes;terpene;toluene;xylene;dimethyl_sulfide;hydrogen_chloride;sulfuric_acid;primary_organic_aerosol;primary_elemental_carbon;primary_nitrate_fine;primary_sulfate_fine;particle_bound_water;particulate_chloride;non-carbon_organic_matter_OM_OC;particulate_calcium;particulate_silica;particulate_magnesium;particulate_manganese;particulate_sodium;particulate_ammonium;particulate_aluminum;particulate_iron;particulate_titanium;particulate_potassium;primary_others_fine;pm_coarse +E001;0.72*nox_no;0.18*nox_no;0.1*nox_no;co;so2;nh3;c2h4o;0;c6h6;c2h4;c2h6;c2h5oh;0;0.5*hialkenes;c5h8;ch3oh;c8h16+c5h10+c3h6+c4h8+c6h12+0.5*hialkanes;4*c4h10+6*c6h14+5*hialkanes+6*c8h16+3*c5h10+c3h6+3*c3h6o+2*c4h8+7*c7h16+4*c6h12+hialkenes+5*c5h12+1.5*c3h8;0;terpenes;ch2o+c7h8;c8h10;c2h6s;0;0;oc;bc;0;0;0;(pm25-oc-bc)*0.09669;(pm25-oc-bc)*0.75259;(pm25-oc-bc)*0.00899;(pm25-oc-bc)*0.00424;(pm25-oc-bc)*0.00073;(pm25-oc-bc)*0.00003;(pm25-oc-bc)*0.01335;(pm25-oc-bc)*0.02048;(pm25-oc-bc)*0.00141;(pm25-oc-bc)*0.00101;(pm25-oc-bc)*0.00011;(pm25-oc-bc)*0.0685;(pm25-oc-bc)*0.03181;0 +E002;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;(pm25-oc-bc)*0.155;0;(pm25-oc-bc)*0.00391;(pm25-oc-bc)*0.01187;(pm25-oc-bc)*0.03542;(pm25-oc-bc)*0.08697;(pm25-oc-bc)*0.00234;(pm25-oc-bc)*0.00022;(pm25-oc-bc)*0.00066;(pm25-oc-bc)*0.01621;(pm25-oc-bc)*0.05185;(pm25-oc-bc)*0.02467;(pm25-oc-bc)*0.00321;(pm25-oc-bc)*0.00452;(pm25-oc-bc)*0.60309;pm10-pm25 +E003;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;(pm25-oc-bc)*0.089;(pm25-oc-bc)*0.02646;0;(pm25-oc-bc)*0.03282;0;0;0;0;0;0;0;0;0;0;(pm25-oc-bc)*0.85171;pm10-pm25 +E004;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;(pm25-oc-bc)*0.047;0;(pm25-oc-bc)*0.00538;(pm25-oc-bc)*0.76284;(pm25-oc-bc)*0.01625;(pm25-oc-bc)*0.01;(pm25-oc-bc)*0.00418;(pm25-oc-bc)*0.00007;(pm25-oc-bc)*0.00158;(pm25-oc-bc)*0.02167;(pm25-oc-bc)*0.00946;(pm25-oc-bc)*0.01206;(pm25-oc-bc)*0.00055;(pm25-oc-bc)*0.02084;(pm25-oc-bc)*0.08805;pm10-pm25 +E005;0.823*nox_no2;0.16*nox_no2;0.017*nox_no2;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;(pm25-oc-bc)*0.059;0;(pm25-oc-bc)*0.00257;(pm25-oc-bc)*0.64241;(pm25-oc-bc)*0.00736;(pm25-oc-bc)*0.01712;(pm25-oc-bc)*0.00125;(pm25-oc-bc)*0.00007;(pm25-oc-bc)*0.00294;(pm25-oc-bc)*0.04996;(pm25-oc-bc)*0.00301;(pm25-oc-bc)*0.01009;(pm25-oc-bc)*0.0001;(pm25-oc-bc)*0.00048;(pm25-oc-bc)*0.20357;pm10-pm25 +E006;0;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E007;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+5*voc05+7.5*voc06+voc08+voc09+voc13+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc17;0;0;0;oc;bc;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25-bc-oc;pm10-pm25 +E008;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;oc;bc;0;(pm25-oc-bc)*0.537;(pm25-oc-bc)*0.37066;0;(pm25-oc-bc)*0.04508;(pm25-oc-bc)*0.00259;0;(pm25-oc-bc)*0.00318;0;0;0;(pm25-oc-bc)*0.00773;(pm25-oc-bc)*0.00527;(pm25-oc-bc)*0.00005;0;(pm25-oc-bc)*0.02842;pm10-pm25 +E009;0.9*nox_no;0.1*nox_no;0;co;so2;nh3;0;0;c6h6;c2h4;0;0;0;0;0;ch3oh;c3h6;c2h2+c3h6+ch3cooh;0;0;ch2o;0;0;hcl;0;oc;bc;0;0;0;(pm25-oc-bc)*0.1686;(pm25-oc-bc)*0.04105;(pm25-oc-bc)*0.02675;(pm25-oc-bc)*0.08393;0;(pm25-oc-bc)*0.00073;(pm25-oc-bc)*0.00988;(pm25-oc-bc)*0.10042;(pm25-oc-bc)*0.02431;(pm25-oc-bc)*0.02907;(pm25-oc-bc)*0.00293;(pm25-oc-bc)*0.02638;(pm25-oc-bc)*0.4859;pm10-pm25 +E010;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0.02*pm25;0.01*pm25;0;0.15*pm25;0;pm25*0.00379;pm25*0.01152;pm25*0.03437;pm25*0.0844;pm25*0.00227;pm25*0.00021;pm25*0.00064;pm25*0.01573;pm25*0.05032;pm25*0.02394;pm25*0.00311;pm25*0.00438;pm25*0.58524;pm10-pm25 +E011;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0.35*pm25;0.18*pm25;0;0.02*pm25;0;pm25*0.00254;pm25*0.3602;pm25*0.00767;pm25*0.00472;pm25*0.00197;pm25*0.00003;pm25*0.00074;pm25*0.01023;pm25*0.00446;pm25*0.00569;pm25*0.00026;pm25*0.00984;pm25*0.04158;pm10-pm25 +E012;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0.03*pm25;0.01*pm25;0;0.1*pm25;pm25*0.02498;0;pm25*0.03098;0;0;0;0;0;0;0;0;0;0;pm25*0.80402;pm10-pm25 +E013;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;voc13;0;voc02;0;0;0.666*voc12;0;0;0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc13;0;0;voc14;voc15;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25;pm10-pm25 +E014;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0.01*voc18+0.3*voc19;0;0;0;0.5*voc01;0;0;0;0.5*voc01;0;7.5*voc06+2.2*voc17+4.11*voc18+4*voc19+4*voc23;0;0;voc14+0.2*voc17;voc15+voc17;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E015;0.95*nox_no2;0.042*nox_no2;0.008*nox_no2;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0.58*pm25;0.21*pm25;0;0.01*pm25;0;pm25*0.00063;pm25*0.10898;pm25*0.00229;pm25*0.00377;pm25*0.00042;pm25*0.00003;pm25*0.00085;pm25*0.01328;pm25*0.00116;pm25*0.00321;pm25*0.00004;pm25*0.00014;pm25*0.06514;pm10-pm25 +E016;0.7*nox_no2;0.283*nox_no2;0.017*nox_no2;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+2.2*voc17+1.875*voc22+4*voc23;0;0;voc14+0.2*voc17;voc15+voc17;0;0;0;0.288*pm25;0.675*pm25;0;0.01*pm25;0;pm25*0.00006;pm25*0.02119;pm25*0.00013;pm25*0.00047;pm25*0.00002;0;pm25*0.00006;pm25*0.00116;pm25*0.00003;pm25*0.00018;0;0;pm25*0.00364;pm10-pm25 +E017;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.375*voc22;0;voc07;voc02;0;voc21;0.666*voc12;0;0;voc08+0.333*voc12;1.5*voc03+voc08+2.2*voc17+1.875*voc22;0;0;0.2*voc17;voc17;0;0;0;0.58*pm25;0.21*pm25;0;0.01*pm25;0;pm25*0.00063;pm25*0.10898;pm25*0.00229;pm25*0.00377;pm25*0.00042;pm25*0.00003;pm25*0.00085;pm25*0.01328;pm25*0.00116;pm25*0.00321;pm25*0.00004;pm25*0.00014;pm25*0.06514;pm10-pm25 +E018;0;0;0;0;0;0;0;0;voc13;0;0;0;0;0.666*voc12;0;0;0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc13;0;0;voc14;voc15;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E019;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0.2*pm25;0.06*pm25;0;0;pm25*0.01036;pm25*0.00617;pm25*0.15491;pm25*0.0074;pm25*0.05955;pm25*0.07439;pm25*0.00074;pm25*0.00049;pm25*0.0001;pm25*0.00118;pm25*0.07992;pm25*0.00261;pm25*0.00035;pm25*0.34144;pm10-pm25 +E020;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0.31*pm25;0.41*pm25;0;0.03*pm25;0;0;pm25*0.07404;pm25*0.00018;pm25*0.00074;0;0;0;0;0;pm25*0.00006;0;0;pm25*0.17496;pm10-pm25 +E021;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0;voc14+0.2*voc17;voc15+voc16+voc17;0;0;0;0.31*pm25;0.2*pm25;0;0;0;pm25*0.08261;pm25*0.02011;pm25*0.01311;pm25*0.04112;0;pm25*0.00035;pm25*0.00484;pm25*0.0492;pm25*0.01191;pm25*0.01424;pm25*0.00143;pm25*0.01293;pm25*0.23809;pm10-pm25 +E022;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0.48*pm25;0.15*pm25;0;0;pm25*0.00427;pm25*0.04911;pm25*0.17393;pm25*0.00506;pm25*0.00919;pm25*0.00042;pm25*0.00009;pm25*0.00463;pm25*0.01719;pm25*0.0031;pm25*0.00277;pm25*0.00011;pm25*0.04219;pm25*0.05788;pm10-pm25 +E023;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25;pm10-pm25 +E024;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25-oc-bc;pm10-pm25 +E086;0;0;0;0;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 diff --git a/data/profiles/speciation/Speciation_profile_cb05e51_aero6_CMAQ.csv b/data/profiles/speciation/Speciation_profile_cb05e51_aero6_CMAQ.csv new file mode 100644 index 0000000000000000000000000000000000000000..eb1b2293ddc13c55b76d084603fbfb0ed0e58710 --- /dev/null +++ b/data/profiles/speciation/Speciation_profile_cb05e51_aero6_CMAQ.csv @@ -0,0 +1,28 @@ +ID;NO;NO2;HONO;CO;SO2;NH3;ALD2;ALDX;BENZENE;ETH;ETHA;ETOH;FORM;IOLE;ISOP;MEOH;NAPH;OLE;PAR;SESQ;SOAALK;TERP;TOL;XYLMN;DMS;HCL;SULF;POC;PEC;PNO3;PSO4;PH2O;PCL;PNCOM;PCA;PSI;PMG;PMN;PNA;PNH4;PAL;PFE;PTI;PK;PMOTHR;PMC +units;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;mol.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1;g.s-1 +short_description;nitrogen_monoxide;nitrogen_dioxide;nitrous_acid;carbon_monoxide;sulfur_dioxide;ammonia;acetaldehyde;higher_aldehydes;benzene;ethene;ethane;ethanol;formaldehyde;internal_olefin_carbon_bond;isoprene;methanol;naphthalene;terminal_olefin_carbon_bond;paraffin_carbon_bond;sesquiterpenes;alkanes that produce aerosol material;terpene;toluene;xylene without naphtalene;dimethyl_sulfide;hydrogen_chloride;sulfuric_acid;primary_organic_aerosol;primary_elemental_carbon;primary_nitrate_fine;primary_sulfate_fine;particle_bound_water;particulate_chloride;non-carbon_organic_matter_OM_OC;particulate_calcium;particulate_silica;particulate_magnesium;particulate_manganese;particulate_sodium;particulate_ammonium;particulate_aluminum;particulate_iron;particulate_titanium;particulate_potassium;primary_others_fine;pm_coarse +E001;0.72*nox_no;0.18*nox_no;0.1*nox_no;co;so2;nh3;c2h4o;0;c6h6;c2h4;c2h6;c2h5oh;0;0.5*hialkenes;c5h8;ch3oh;0.002*c8h10;c8h16+c5h10+c3h6+c4h8+c6h12+0.5*hialkanes;4*c4h10+6*c6h14+5*hialkanes+6*c8h16+3*c5h10+c3h6+3*c3h6o+2*c4h8+7*c7h16+4*c6h12+hialkenes+5*c5h12+1.5*c3h8;0;0.108*(4*c4h10+6*c6h14+5*hialkanes+6*c8h16+3*c5h10+c3h6+3*c3h6o+2*c4h8+7*c7h16+4*c6h12+hialkenes+5*c5h12+1.5*c3h8);terpenes;ch2o+c7h8;0.998*c8h10;c2h6s;0;0;oc;bc;0;0;0;(pm25-oc-bc)*0.09669;(pm25-oc-bc)*0.75259;(pm25-oc-bc)*0.00899;(pm25-oc-bc)*0.00424;(pm25-oc-bc)*0.00073;(pm25-oc-bc)*0.00003;(pm25-oc-bc)*0.01335;(pm25-oc-bc)*0.02048;(pm25-oc-bc)*0.00141;(pm25-oc-bc)*0.00101;(pm25-oc-bc)*0.00011;(pm25-oc-bc)*0.0685;(pm25-oc-bc)*0.03181;0 +E002;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;0.002*(voc15+voc16+voc17);voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23+voc24;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23+voc24);0;voc14+0.2*voc17;0.998*(voc15+voc16+voc17);0;0;0;oc;bc;0;(pm25-oc-bc)*0.155;0;(pm25-oc-bc)*0.00391;(pm25-oc-bc)*0.01187;(pm25-oc-bc)*0.03542;(pm25-oc-bc)*0.08697;(pm25-oc-bc)*0.00234;(pm25-oc-bc)*0.00022;(pm25-oc-bc)*0.00066;(pm25-oc-bc)*0.01621;(pm25-oc-bc)*0.05185;(pm25-oc-bc)*0.02467;(pm25-oc-bc)*0.00321;(pm25-oc-bc)*0.00452;(pm25-oc-bc)*0.60309;pm10-pm25 +E003;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;0.002*(voc15+voc16+voc17);voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24);0;voc14+0.2*voc17;0.998*(voc15+voc16+voc17);0;0;0;oc;bc;0;(pm25-oc-bc)*0.089;(pm25-oc-bc)*0.02646;0;(pm25-oc-bc)*0.03282;0;0;0;0;0;0;0;0;0;0;(pm25-oc-bc)*0.85171;pm10-pm25 +E004;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;0.002*(voc15+voc16+voc17);voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24);0;voc14+0.2*voc17;0.998*(voc15+voc16+voc17);0;0;0;oc;bc;0;(pm25-oc-bc)*0.047;0;(pm25-oc-bc)*0.00538;(pm25-oc-bc)*0.76284;(pm25-oc-bc)*0.01625;(pm25-oc-bc)*0.01;(pm25-oc-bc)*0.00418;(pm25-oc-bc)*0.00007;(pm25-oc-bc)*0.00158;(pm25-oc-bc)*0.02167;(pm25-oc-bc)*0.00946;(pm25-oc-bc)*0.01206;(pm25-oc-bc)*0.00055;(pm25-oc-bc)*0.02084;(pm25-oc-bc)*0.08805;pm10-pm25 +E005;0.823*nox_no2;0.16*nox_no2;0.017*nox_no2;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;0.002*(voc15+voc16+voc17);voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23);0;voc14+0.2*voc17;0.998*(voc15+voc16+voc17);0;0;0;oc;bc;0;(pm25-oc-bc)*0.059;0;(pm25-oc-bc)*0.00257;(pm25-oc-bc)*0.64241;(pm25-oc-bc)*0.00736;(pm25-oc-bc)*0.01712;(pm25-oc-bc)*0.00125;(pm25-oc-bc)*0.00007;(pm25-oc-bc)*0.00294;(pm25-oc-bc)*0.04996;(pm25-oc-bc)*0.00301;(pm25-oc-bc)*0.01009;(pm25-oc-bc)*0.0001;(pm25-oc-bc)*0.00048;(pm25-oc-bc)*0.20357;pm10-pm25 +E006;0;0;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E007;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;0.002*(voc15+voc17);voc08+0.333*voc12;1.5*voc03+5*voc05+7.5*voc06+voc08+voc09+voc13+2.2*voc17+1.875*voc22+4*voc23;0;0.108*(1.5*voc03+5*voc05+7.5*voc06+voc08+voc09+voc13+2.2*voc17+1.875*voc22+4*voc23);0;voc14+0.2*voc17;0.998*(voc15+voc17);0;0;0;oc;bc;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25-bc-oc;pm10-pm25 +E008;0.9*nox_no2;0.1*nox_no2;0;co;so2;0;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;0.002*(voc15+voc16+voc17);voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24);0;voc14+0.2*voc17;0.998*(voc15+voc16+voc17);0;0;0;oc;bc;0;(pm25-oc-bc)*0.537;(pm25-oc-bc)*0.37066;0;(pm25-oc-bc)*0.04508;(pm25-oc-bc)*0.00259;0;(pm25-oc-bc)*0.00318;0;0;0;(pm25-oc-bc)*0.00773;(pm25-oc-bc)*0.00527;(pm25-oc-bc)*0.00005;0;(pm25-oc-bc)*0.02842;pm10-pm25 +E009;0.9*nox_no;0.1*nox_no;0;co;so2;nh3;0;0;c6h6;c2h4;0;0;0;0;0;ch3oh;0;c3h6;c2h2+c3h6+ch3cooh;0;0.108*(c2h2+c3h6+ch3cooh);0;ch2o;0;0;hcl;0;oc;bc;0;0;0;(pm25-oc-bc)*0.1686;(pm25-oc-bc)*0.04105;(pm25-oc-bc)*0.02675;(pm25-oc-bc)*0.08393;0;(pm25-oc-bc)*0.00073;(pm25-oc-bc)*0.00988;(pm25-oc-bc)*0.10042;(pm25-oc-bc)*0.02431;(pm25-oc-bc)*0.02907;(pm25-oc-bc)*0.00293;(pm25-oc-bc)*0.02638;(pm25-oc-bc)*0.4859;pm10-pm25 +E010;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;0.002*(voc15+voc16+voc17);voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24);0;voc14+0.2*voc17;0.998*(voc15+voc16+voc17);0;0;0;0.02*pm25;0.01*pm25;0;0.15*pm25;0;pm25*0.00379;pm25*0.01152;pm25*0.03437;pm25*0.0844;pm25*0.00227;pm25*0.00021;pm25*0.00064;pm25*0.01573;pm25*0.05032;pm25*0.02394;pm25*0.00311;pm25*0.00438;pm25*0.58524;pm10-pm25 +E011;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;0.002*(voc15+voc16+voc17);voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24);0;voc14+0.2*voc17;0.998*(voc15+voc16+voc17);0;0;0;0.35*pm25;0.18*pm25;0;0.02*pm25;0;pm25*0.00254;pm25*0.3602;pm25*0.00767;pm25*0.00472;pm25*0.00197;pm25*0.00003;pm25*0.00074;pm25*0.01023;pm25*0.00446;pm25*0.00569;pm25*0.00026;pm25*0.00984;pm25*0.04158;pm10-pm25 +E012;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;0.002*(voc15+voc16+voc17);voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4*voc19+1.875*voc22+4*voc23+voc24);0;voc14+0.2*voc17;0.998*(voc15+voc16+voc17);0;0;0;0.03*pm25;0.01*pm25;0;0.1*pm25;pm25*0.02498;0;pm25*0.03098;0;0;0;0;0;0;0;0;0;0;pm25*0.80402;pm10-pm25 +E013;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;voc13;0;voc02;0;0;0.666*voc12;0;0;0.002*voc15;0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc13;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc13);0;voc14;0.998*voc15;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25;pm10-pm25 +E014;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0.01*voc18+0.3*voc19;0;0;0;0.5*voc01;0;0;0;0.5*voc01;0.002*(voc15+voc17);0;7.5*voc06+2.2*voc17+4.11*voc18+4*voc19+4*voc23;0;0.108*(7.5*voc06+2.2*voc17+4.11*voc18+4*voc19+4*voc23);0;voc14+0.2*voc17;0.998*(voc15+voc17);0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E015;0.95*nox_no2;0.042*nox_no2;0.008*nox_no2;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;0.002*(voc15+voc16+voc17);voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23);0;voc14+0.2*voc17;0.998*(voc15+voc16+voc17);0;0;0;0.58*pm25;0.21*pm25;0;0.01*pm25;0;pm25*0.00063;pm25*0.10898;pm25*0.00229;pm25*0.00377;pm25*0.00042;pm25*0.00003;pm25*0.00085;pm25*0.01328;pm25*0.00116;pm25*0.00321;pm25*0.00004;pm25*0.00014;pm25*0.06514;pm10-pm25 +E016;0.7*nox_no2;0.283*nox_no2;0.017*nox_no2;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0;voc21;0.666*voc12;0;0;0.002*(voc15+voc17);voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+2.2*voc17+1.875*voc22+4*voc23;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+2.2*voc17+1.875*voc22+4*voc23);0;voc14+0.2*voc17;0.998*(voc15+voc17);0;0;0;0.288*pm25;0.675*pm25;0;0.01*pm25;0;pm25*0.00006;pm25*0.02119;pm25*0.00013;pm25*0.00047;pm25*0.00002;0;pm25*0.00006;pm25*0.00116;pm25*0.00003;pm25*0.00018;0;0;pm25*0.00364;pm10-pm25 +E017;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.375*voc22;0;voc07;voc02;0;voc21;0.666*voc12;0;0;0.002*voc17;voc08+0.333*voc12;1.5*voc03+voc08+2.2*voc17+1.875*voc22;0;0.108*(1.5*voc03+voc08+2.2*voc17+1.875*voc22);0;0.2*voc17;0.998*voc17;0;0;0;0.58*pm25;0.21*pm25;0;0.01*pm25;0;pm25*0.00063;pm25*0.10898;pm25*0.00229;pm25*0.00377;pm25*0.00042;pm25*0.00003;pm25*0.00085;pm25*0.01328;pm25*0.00116;pm25*0.00321;pm25*0.00004;pm25*0.00014;pm25*0.06514;pm10-pm25 +E018;0;0;0;0;0;0;0;0;voc13;0;0;0;0;0.666*voc12;0;0;0.002*voc15;0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc13;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc13);0;voc14;0.998*voc15;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E019;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0.2*pm25;0.06*pm25;0;0;pm25*0.01036;pm25*0.00617;pm25*0.15491;pm25*0.0074;pm25*0.05955;pm25*0.07439;pm25*0.00074;pm25*0.00049;pm25*0.0001;pm25*0.00118;pm25*0.07992;pm25*0.00261;pm25*0.00035;pm25*0.34144;pm10-pm25 +E020;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;0.002*(voc15+voc16+voc17);voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23+voc24;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+1.875*voc22+4*voc23+voc24);0;voc14+0.2*voc17;0.998*(voc15+voc16+voc17);0;0;0;0.31*pm25;0.41*pm25;0;0.03*pm25;0;0;pm25*0.07404;pm25*0.00018;pm25*0.00074;0;0;0;0;0;pm25*0.00006;0;0;pm25*0.17496;pm10-pm25 +E021;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0.625*voc22;0.01*voc18+0.3*voc19+0.375*voc22;voc13;voc07;voc02;0.5*voc01;voc21;0.666*voc12;0;0.5*voc01;0.002*(voc15+voc16+voc17);voc08+0.333*voc12;1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24;0;0.108*(1.5*voc03+4*voc04+5*voc05+7.5*voc06+voc08+voc09+voc13+voc16+2.2*voc17+4.11*voc18+4*voc19+1.875*voc22+4*voc23+voc24);0;voc14+0.2*voc17;0.998*(voc15+voc16+voc17);0;0;0;0.31*pm25;0.2*pm25;0;0;0;pm25*0.08261;pm25*0.02011;pm25*0.01311;pm25*0.04112;0;pm25*0.00035;pm25*0.00484;pm25*0.0492;pm25*0.01191;pm25*0.01424;pm25*0.00143;pm25*0.01293;pm25*0.23809;pm10-pm25 +E022;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0.48*pm25;0.15*pm25;0;0;pm25*0.00427;pm25*0.04911;pm25*0.17393;pm25*0.00506;pm25*0.00919;pm25*0.00042;pm25*0.00009;pm25*0.00463;pm25*0.01719;pm25*0.0031;pm25*0.00277;pm25*0.00011;pm25*0.04219;pm25*0.05788;pm10-pm25 +E023;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25;pm10-pm25 +E024;0.9*nox_no2;0.1*nox_no2;0;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;oc;bc;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm25-oc-bc;pm10-pm25 +E086;0;0;0;0;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 diff --git a/data/profiles/speciation/Speciation_profile_radm2_madesorgam_WRF_CHEM.csv b/data/profiles/speciation/Speciation_profile_radm2_madesorgam_WRF_CHEM.csv new file mode 100644 index 0000000000000000000000000000000000000000..56926c3896b9dc90a8b1aa81370984d9212cc0e4 --- /dev/null +++ b/data/profiles/speciation/Speciation_profile_radm2_madesorgam_WRF_CHEM.csv @@ -0,0 +1,98 @@ +ID;E_NO;E_CO;E_SO2;E_NH3;E_ALD;E_CSL;E_ETH;E_HC3;E_HC5;E_HC8;E_HCHO;E_ISO;E_KET;E_OL2;E_OLI;E_OLT;E_ORA1;E_ORA2;E_TOL;E_XYL;E_PM_10;E_PM25J;E_PM25I;E_ECJ;E_ECI;E_ORGJ;E_ORGI;E_NO3J;E_NO3I;E_SO4J;E_SO4I +units;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;mol.h-1.km-2;ug.s-1.m-2;ug.s-1.m-2;ug.s-1.m-2;ug.s-1.m-2;ug.s-1.m-2;ug.s-1.m-2;ug.s-1.m-2;ug.s-1.m-2;ug.s-1.m-2;ug.s-1.m-2 +short_description;nitrogen_oxides;carbon_monoxide;sulfur_dioxide;ammonia;higher_aldehydes;phenols_cresols;ethane;propane;alkanes_0.5_1;alkanes_1_2;formaldehyde;isoprene;ketones;ethene;alkenes_internal;propene;formic_acid;organic_acids;toluene;xylene_and_higher_aromatics;unspeciated_primary_PM10;unspeciated_primary_PM2.5_accumulation_mode;unspeciated_primary_PM2.5_nuclei_mode;elemental_carbon_PM2.5_accumulation_mode;elemental_carbon_PM2.5_nuclei_mode;organic_carbon_PM2.5_accumulation_mode;organic_carbon_PM2.5_nuclei_mode;nitrate_PM2.5_accumulation_mode;nitrate_PM2.5_nuclei_mode;sulfate_PM2.5_accumulation_mode;sulfate_PM2.5_nuclei_mode +E001;nox_no;co;so2;nh3;c2h4o;0;c2h6;c4h10+1.198*c2h5oh+0.402*ch3oh+0.519*c3h8;1.075*c2h6s+0.956*c6h14+0.43*hialkanes+0.956*c7h16+0.956*c5h12;0.57*hialkanes;ch2o;c5h8;0.253*c3h6o;c2h4;0.5*c5h10+hialkenes+terpenes;c8h16+0.5*c5h10+c3h6+c4h8+c6h12;0;0;c7h8+0.293*c6h6;c8h10;0;(pm25-oc-bc)*0.8;(pm25-oc-bc)*0.2;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 +E002;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09;0.05*voc01+voc05+0.43*voc06;0.57*voc06+voc17;voc21;0;voc23;voc07;voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;pm10;(pm25-oc-bc)*0.676;(pm25-oc-bc)*0.169;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;(pm25-oc-bc)*0.124;(pm25-oc-bc)*0.031 +E003;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;0;voc23;voc07;voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;pm10;(pm25-oc-bc)*0.7288;(pm25-oc-bc)*0.1822;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;(pm25-oc-bc)*0.0712;(pm25-oc-bc)*0.0178 +E004;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09;0.05*voc01+voc05+0.43*voc06;0.57*voc06+voc17+voc19;voc21;0;voc23;voc07;voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;pm10;(pm25-oc-bc)*0.7624;(pm25-oc-bc)*0.1906;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;(pm25-oc-bc)*0.0376;(pm25-oc-bc)*0.0094 +E005;nox_no2;co;so2;nh3;voc22;0;voc02;voc03+voc04+0.4*voc09;voc05+0.43*voc06;0.57*voc06+voc17;voc21;0;voc23;voc07;voc12;voc08;0;0;0.293*voc13+voc14;voc16+voc17;pm10;(pm25-oc-bc)*0.7528;(pm25-oc-bc)*0.1882;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;(pm25-oc-bc)*0.0472;(pm25-oc-bc)*0.0118 +E006;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E007;nox_no2;co;so2;0;voc22;0;voc02;voc03+0.4*voc09;voc05+0.43*voc06;0.57*voc06+voc17;voc21;0;voc23;voc07;voc12;voc08;0;0;0.293*voc13+voc14;voc17;pm10;(pm25-oc-bc)*0.8;(pm25-oc-bc)*0.2;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 +E008;nox_no2;co;so2;0;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;0;voc23;voc07;voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;pm10;(pm25-oc-bc)*0.3704;(pm25-oc-bc)*0.0926;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;(pm25-oc-bc)*0.4296;(pm25-oc-bc)*0.1074 +E009;nox_no;co;so2;nh3;0;0;0;0.343*c2h2+0.402*ch3oh;0;0;ch2o;0;0;c2h4;0;c3h6;0;ch3cooh;0.293*c6h6;0;pm10;(pm25-oc-bc)*0.8;(pm25-oc-bc)*0.2;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 +E010;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09;0.05*voc01+voc05+0.43*voc06;0.57*voc06+voc17+voc19;voc21;0;voc23;voc07;voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc15+voc16;pm10;pm25*0.656;pm25*0.164;pm25*0.008;pm25*0.002;pm25*0.016;pm25*0.004;0;0;pm25*0.12;pm25*0.03 +E011;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09;0.05*voc01+voc05+0.43*voc06;0.57*voc06+voc17+voc19;voc21;0;voc23;voc07;voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc15+voc16;pm10;pm25*0.36;pm25*0.09;pm25*0.144;pm25*0.036;pm25*0.28;pm25*0.07;0;0;pm25*0.016;pm25*0.004 +E012;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09;0.05*voc01+voc05+0.43*voc06;0.57*voc06+voc17+voc19;voc21;0;voc23;voc07;voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc15+voc16;pm10;pm25*0.688;pm25*0.172;pm25*0.008;pm25*0.002;pm25*0.024;pm25*0.006;0;0;pm25*0.08;pm25*0.02 +E013;nox_no2;co;so2;nh3;0;0;voc02;voc03+voc04;voc05+0.43*voc06;0.57*voc06;0;0;0;0;voc12;0;0;0;0.293*voc13+voc14;voc15;pm10;pm25*0.8;pm25*0.2;0;0;0;0;0;0;0;0 +E014;nox_no2;co;so2;nh3;0;0;0;0.95*voc01+0.69*voc18+voc20;0.05*voc01+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;0;0;voc23;0;0;0;0;0;voc14;voc15;0;0;0;0;0;0;0;0;0;0;0 +E015;nox_no2;co;so2;nh3;voc22;0;voc02;voc03+voc04+0.4*voc09;voc05+0.43*voc06;0.57*voc06+voc17;voc21;0;voc23;voc07;voc12;voc08;0;0;0.293*voc13+voc14;voc15+voc16;pm10;pm25*0.168;pm25*0.042;pm25*0.168;pm25*0.042;pm25*0.464;pm25*0.116;0;0;pm25*0.008;pm25*0.002 +E016;nox_no2;co;so2;nh3;voc22;0;voc02;voc03+voc04+0.4*voc09;voc05+0.43*voc06;0.57*voc06+voc17;voc21;0;voc23;voc07;voc12;voc08;0;0;0.293*voc13+voc14;voc15;pm10;pm25*0.0296;pm25*0.0074;pm25*0.54;pm25*0.135;pm25*0.2304;pm25*0.0576;0;0;pm25*0.008;pm25*0.002 +E017;nox_no2;co;so2;nh3;voc22;0;voc02;voc03;0;voc17;voc21;0;0;voc07;voc12;voc08;0;0;0;0;pm10;pm25*0.168;pm25*0.042;pm25*0.168;pm25*0.042;pm25*0.464;pm25*0.116;0;0;pm25*0.008;pm25*0.002 +E018;0;0;0;0;0;0;0;voc03+voc04;voc05;0;0;0;0;0;voc12;0;0;0;0.293*voc13+voc14;voc15;0;0;0;0;0;0;0;0;0;0;0 +E019;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;pm25*0.12;pm25*0.03;pm25*0.36;pm25*0.09;pm25*0.32;pm25*0.08;0;0;0;0 +E020;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09;0.05*voc01+voc05+0.43*voc06;0.57*voc06+voc17;voc21;0;voc23;voc07;voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc15+voc16;pm10;pm25*0.2;pm25*0.05;pm25*0.328;pm25*0.082;pm25*0.248;pm25*0.062;0;0;pm25*0.024;pm25*0.006 +E021;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;0;voc23;voc07;voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc15+voc16;pm10;pm25*0.392;pm25*0.098;pm25*0.16;pm25*0.04;pm25*0.248;pm25*0.062;0;0;0;0 +E022;nox_no2;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;pm25*0.296;pm25*0.074;pm25*0.12;pm25*0.03;pm25*0.384;pm25*0.096;0;0;0;0 +E023;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;pm25*0.8;pm25*0.2;0;0;0;0;0;0;0;0 +E024;nox_no2;co;so2;nh3;0;0;nmvoc*0.00998;nmvoc*0.00353;0;0;nmvoc*0.00666;0;0;0;0;0;0;0;0;0;pm10;(pm25-oc-bc)*0.8;(pm25-oc-bc)*0.2;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 +E025;nox_no2;co;so2;nh3;0.000008*nmvoc;0;0.004177*nmvoc;0.002461*nmvoc;0;0;0.018548*nmvoc;0;0.000086*nmvoc;0.000858*nmvoc;0;0.00104*nmvoc;0;0;0;0.000893*nmvoc;pm10;(pm25-oc-bc)*0.676;(pm25-oc-bc)*0.169;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;(pm25-oc-bc)*0.124;(pm25-oc-bc)*0.031 +E026;nox_no2;co;so2;nh3;0.000013*nmvoc;0;0.001662*nmvoc;0.006766*nmvoc;0;0;0.008038*nmvoc;0;0.000187*nmvoc;0.003224*nmvoc;0;0.000496*nmvoc;0;0;0;0.001726*nmvoc;pm10;(pm25-oc-bc)*0.7288;(pm25-oc-bc)*0.1822;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;(pm25-oc-bc)*0.0712;(pm25-oc-bc)*0.0178 +E027;nox_no2;co;so2;nh3;0.000592*nmvoc;0;0.001607*nmvoc;0.006087*nmvoc;0;0;0.000899*nmvoc;0;0.000058*nmvoc;0.002423*nmvoc;0;0.002589*nmvoc;0;0;0;0.003302*nmvoc;pm10;(pm25-oc-bc)*0.7528;(pm25-oc-bc)*0.1882;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;(pm25-oc-bc)*0.0472;(pm25-oc-bc)*0.0118 +E028;nox_no2;co;so2;nh3;0.000047*nmvoc;0;0.004187*nmvoc;0.007617*nmvoc;0;0;0.00186*nmvoc;0;0.000012*nmvoc;0.002914*nmvoc;0;0.002559*nmvoc;0;0;0;0.001755*nmvoc;pm10;(pm25-oc-bc)*0.7624;(pm25-oc-bc)*0.1906;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;(pm25-oc-bc)*0.0376;(pm25-oc-bc)*0.0094 +E029;nox_no2;co;so2;nh3;0;0;nmvoc*0.008553;nmvoc*0.006788;0;0;nmvoc*0.005349;nmvoc*0.000169;nmvoc*0.000013;nmvoc*0.001867;0;nmvoc*0.000435;0;0;0;nmvoc*0.000742;pm10;(pm25-oc-bc)*0.8;(pm25-oc-bc)*0.2;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 +E030;0;0;0;0;0;0;0.000148*nmvoc;0.012341*nmvoc;0;0;0.000004*nmvoc;0.000001*nmvoc;0.001382*nmvoc;0.000087*nmvoc;0;0.000161*nmvoc;0;0;0;0.001686*nmvoc;0;0;0;0;0;0;0;0;0;0;0 +E031;nox_no2;co;so2;nh3;0.000972*nmvoc;0.000614*nmvoc;0.001894*nmvoc;0.002246*nmvoc;0.000215*nmvoc;0.000253*nmvoc;0.002737*nmvoc;0.000043*nmvoc;0.000524*nmvoc;0.002929*nmvoc;0.000309*nmvoc;0.001607*nmvoc;0.000281*nmvoc;0.000782*nmvoc;0.000083*nmvoc;0.000468*nmvoc;pm10;(pm25-oc-bc)*0.8;(pm25-oc-bc)*0.2;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 +E032;nox_no2;co;so2;0;0;0;0;0;0;0.002161*nmvoc;0;0;0;0.004456*nmvoc;0.001693*nmvoc;0.001111*nmvoc;0;0;0.001864*nmvoc;0.000762*nmvoc;pm10;(pm25-oc-bc)*0.3704;(pm25-oc-bc)*0.0926;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;(pm25-oc-bc)*0.4296;(pm25-oc-bc)*0.1074 +E033;nox_no2;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E034;nox_no2;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E035;nox_no2;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E036;nox_no2;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E037;nox_no2;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E038;nox_no2;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E039;nox_no2;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E040;nox_no2;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E041;nox_no2;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E042;nox_no2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E043;0;0;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E044;nox_no2;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E045;nox_no2;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E046;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E047;nox_no2;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E048;nox_no2;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E049;nox_no2;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E050;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E051;nox_no2;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E052;nox_no2;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E053;nox_no2;co;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E054;nox_no2;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E055;nox_no2;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E056;nox_no2;co;so2;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;pm10;;;;;;;;;; +E057;0;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;;;;;;;;;; +E058;0;0;0;0;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09;0.05*voc01+voc05+0.43*voc06;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0;0;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E059;0;0;0;0;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E060;0;0;0;0;0;0;voc02;voc03+voc04+0.69*voc18+voc20;voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;0;voc10;0;0;voc12;voc08;0;0;0.293*voc13+voc14;voc17;0;0;0;0;0;0;0;0;0;0;0 +E061;0;0;0;0;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E062;0;0;0;0;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E063;0;0;0;0;voc22;0;voc02;voc03+voc04+0.4*voc09+0.69*voc18+voc20;voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc12;voc08;0;0;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E064;0;0;0;0;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E065;0;0;0;0;0;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09;0.05*voc01+voc05+0.43*voc06;0.57*voc06+voc17;0;voc10;voc23;voc07;voc12;voc08;0;0;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E066;0;0;0;0;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E067;0;0;0;0;voc22;0;voc02;0.95*voc01+voc03+0.4*voc09;0.05*voc01+voc05+0.43*voc06;0.57*voc06+voc17;voc21;voc10;voc23;voc07;voc12;voc08;0;0;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E068;0;0;0;0;voc22;0;voc02;0.95*voc01+voc03+0.4*voc09;0.05*voc01+voc05+0.43*voc06;0.57*voc06+voc17;voc21;voc10;voc23;voc07;voc12;voc08;0;0;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E069;0;0;0;0;voc22;0;voc02;0.95*voc01+voc03+0.4*voc09;0.05*voc01+voc05+0.43*voc06;0.57*voc06+voc17;voc21;voc10;voc23;voc07;voc12;voc08;0;0;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E070;0;0;0;0;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E071;0;0;0;0;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E072;0;0;0;0;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E073;0;0;0;0;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;0;0;0;0;0;0;0;0 +E074;nox_no2;co;so2;nh3;0.000008*nmvoc;0;0.004177*nmvoc;0.002461*nmvoc;0;0;0.018548*nmvoc;0;0.000086*nmvoc;0.000858*nmvoc;0;0.00104*nmvoc;0;0;0;0.000893*nmvoc;pm10;pm25*0.656;pm25*0.164;pm25*0.008;pm25*0.002;pm25*0.016;pm25*0.004;0;0;pm25*0.12;pm25*0.03 +E075;nox_no2;co;so2;nh3;0.000013*nmvoc;0;0.001662*nmvoc;0.006766*nmvoc;0;0;0.008038*nmvoc;0;0.000187*nmvoc;0.003224*nmvoc;0;0.000496*nmvoc;0;0;0;0.001726*nmvoc;pm10;pm25*0.688;pm25*0.172;pm25*0.008;pm25*0.002;pm25*0.024;pm25*0.006;0;0;pm25*0.08;pm25*0.02 +E076;nox_no2;co;so2;nh3;0.000047*nmvoc;0;0.004187*nmvoc;0.007617*nmvoc;0;0;0.00186*nmvoc;0;0.000012*nmvoc;0.002914*nmvoc;0;0.002559*nmvoc;0;0;0;0.001755*nmvoc;pm10;pm25*0.36;pm25*0.09;pm25*0.144;pm25*0.036;pm25*0.28;pm25*0.07;0;0;pm25*0.016;pm25*0.004 +E077;nox_no2;co;so2;nh3;0;0;0.005934*nmvoc;0.013251*nmvoc;0;0;0.000026*nmvoc;0.000001*nmvoc;0;0.000006*nmvoc;0;0.000373*nmvoc;0;0;0;0.000048*nmvoc;pm10;pm25*0.8;pm25*0.2;0;0;0;0;0;0;0;0 +E078;nox_no2;co;so2;nh3;0;0;0.000148*nmvoc;0.012341*nmvoc;0;0;0.000004*nmvoc;0.000001*nmvoc;0.001382*nmvoc;0.000087*nmvoc;0;0.000161*nmvoc;0;0;0;0.001686*nmvoc;0;0;0;0;0;0;0;0;0;0;0 +E079;nox_no2;co;so2;nh3;0.000592*nmvoc;0;0.001607*nmvoc;0.006087*nmvoc;0;0;0.000899*nmvoc;0;0.000058*nmvoc;0.002423*nmvoc;0;0.002589*nmvoc;0;0;0;0.003302*nmvoc;pm10;pm25*0.0296;pm25*0.0074;pm25*0.54;pm25*0.135;pm25*0.2304;pm25*0.0576;0;0;pm25*0.008;pm25*0.002 +E080;nox_no2;co;so2;nh3;0;0;0;0;0;0.002161*nmvoc;0;0;0;0.004456*nmvoc;0.001693*nmvoc;0.001111*nmvoc;0;0;0.001864*nmvoc;0.000762*nmvoc;pm10;pm25*0.2;pm25*0.05;pm25*0.328;pm25*0.082;pm25*0.248;pm25*0.062;0;0;pm25*0.024;pm25*0.006 +E081;nox_no2;co;so2;nh3;0.001407*nmvoc;0.000026*nmvoc;0.000293*nmvoc;0.000571*nmvoc;0.000077*nmvoc;0.000265*nmvoc;0.004999*nmvoc;0;0.000107*nmvoc;0.00622*nmvoc;0.000447*nmvoc;0.002007*nmvoc;0;0;0.000414*nmvoc;0.000124*nmvoc;pm10;pm25*0.2;pm25*0.05;pm25*0.328;pm25*0.082;pm25*0.248;pm25*0.062;0;0;pm25*0.024;pm25*0.006 +E082;nox_no2;co;so2;nh3;0.000538*nmvoc;0;0.000166*nmvoc;0.00153*nmvoc;0.00201*nmvoc;0.001817*nmvoc;0.001965*nmvoc;0;0;0.003921*nmvoc;0.000885*nmvoc;0.00111*nmvoc;0;0;0.000343*nmvoc;0.000476*nmvoc;pm10;pm25*0.2;pm25*0.05;pm25*0.328;pm25*0.082;pm25*0.248;pm25*0.062;0;0;pm25*0.024;pm25*0.006 +E083;nox_no2;co;so2;nh3;0;0;nmvoc*0.008553;nmvoc*0.006788;0;0;nmvoc*0.005349;nmvoc*0.000169;nmvoc*0.000013;nmvoc*0.001867;0;nmvoc*0.000435;0;0;0;nmvoc*0.000742;pm10;pm25*0.392;pm25*0.098;pm25*0.16;pm25*0.04;pm25*0.248;pm25*0.062;0;0;0;0 +E084;nox_no2;co;so2;nh3;0.001624*nmvoc;0.000252*nmvoc;0;0.001519*nmvoc;0.009379*nmvoc;0.000267*nmvoc;0;0;0.000344*nmvoc;0;0.000031*nmvoc;0;0;0.000618*nmvoc;0.000128*nmvoc;0;pm10;pm25*0.296;pm25*0.074;pm25*0.12;pm25*0.03;pm25*0.384;pm25*0.096;0;0;0;0 +E085;nox_no2;co;so2;nh3;0.000972*nmvoc;0.000614*nmvoc;0.001894*nmvoc;0.002246*nmvoc;0.000215*nmvoc;0.000253*nmvoc;0.002737*nmvoc;0.000043*nmvoc;0.000524*nmvoc;0.002929*nmvoc;0.000309*nmvoc;0.001607*nmvoc;0.000281*nmvoc;0.000782*nmvoc;0.000083*nmvoc;0.000468*nmvoc;pm10;pm25*0.296;pm25*0.074;pm25*0.12;pm25*0.03;pm25*0.384;pm25*0.096;0;0;0;0 +E086;0;0;so2;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E087;nox_no2;0;0;nh3;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0 +E088;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 +E089;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 +E090;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 +E091;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 +E092;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 +E093;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 +E094;nox_no2;co;so2;nh3;voc22;0;voc02;0.95*voc01+voc03+voc04+0.4*voc09+0.69*voc18+voc20;0.05*voc01+voc05+0.43*voc06+0.31*voc18;0.57*voc06+voc17+voc19;voc21;voc10;voc23;voc07;voc11+voc12;voc08;0.44*voc24;0.56*voc24;0.293*voc13+voc14;voc16+voc17;0;0;0;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 +E095;nox_no2;co;so2;nh3;0.001407*nmvoc;0.000026*nmvoc;0.000293*nmvoc;0.000571*nmvoc;0.000077*nmvoc;0.000265*nmvoc;0.004999*nmvoc;0;0.000107*nmvoc;0.00622*nmvoc;0.000447*nmvoc;0.002007*nmvoc;0;0;0.000414*nmvoc;0.000124*nmvoc;0;0;0;bc*0.8;bc*0.2;oc*0.8;oc*0.2;0;0;0;0 diff --git a/data/profiles/temporal/TemporalProfile_Daily.csv b/data/profiles/temporal/TemporalProfile_Daily.csv new file mode 100644 index 0000000000000000000000000000000000000000..e48ec1dc8b1e98af2f484c96578626a582ba1a61 --- /dev/null +++ b/data/profiles/temporal/TemporalProfile_Daily.csv @@ -0,0 +1,7 @@ +TP_D,0,1,2,3,4,5,6 +D001,1,1,1,1,1,1,1 +D002,1.06,1.06,1.06,1.06,1.06,0.85,0.85 +D003,1.08,1.08,1.08,1.08,1.08,0.8,0.8 +D004,1.2,1.2,1.2,1.2,1.2,0.5,0.5 +D005,1.02,1.06,1.08,1.1,1.14,0.81,0.79 + diff --git a/data/profiles/temporal/TemporalProfile_Hourly.csv b/data/profiles/temporal/TemporalProfile_Hourly.csv new file mode 100644 index 0000000000000000000000000000000000000000..857270e37aa66ad6c3f8f76968f26341ba7bc978 --- /dev/null +++ b/data/profiles/temporal/TemporalProfile_Hourly.csv @@ -0,0 +1,12 @@ +TP_H,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23 +H001,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1. +H002,0.79,0.72,0.72,0.71,0.74,0.8,0.92,1.08,1.19,1.22,1.21,1.21,1.17,1.15,1.14,1.13,1.1,1.07,1.04,1.02,1.02,1.01,0.96,0.88 +H003,0.38,0.36,0.36,0.36,0.37,0.69,1.19,1.53,1.57,1.56,1.35,1.16,1.07,1.06,1,0.98,0.99,1.12,1.41,1.52,1.39,1.35,1.19,0.42 +H004,0.75,0.75,0.78,0.82,0.88,0.95,1.02,1.09,1.16,1.22,1.28,1.3,1.22,1.24,1.25,1.16,1.08,1.01,0.95,0.9,0.85,0.81,0.78,0.75 +H005,0.5,0.35,0.2,0.1,0.1,0.2,0.75,1.25,1.4,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.4,1.25,1.1,1,0.9,0.8,0.7 +H006,0.19,0.09,0.06,0.05,0.09,0.22,0.86,1.84,1.86,1.41,1.24,1.2,1.32,1.44,1.45,1.59,2.03,2.08,1.51,1.06,0.74,0.62,0.61,0.44 +H007,0.6,0.6,0.6,0.6,0.6,0.65,0.75,0.9,1.1,1.35,1.45,1.6,1.65,1.75,1.7,1.55,1.35,1.1,0.9,0.75,0.65,0.6,0.6,0.6 +H008,0.06,0.168,0.192,0.18,0.24,0.192,0.204,0.216,0.336,0.6,1.08,1.8,2.52,3.12,3.12,3.12,2.64,1.68,0.96,0.72,0.42,0.36,0.036,0.036 +H009,0.597235,0.552995,0.497696,0.508756,0.575115,0.741014,0.917972,1.15023,1.32719,1.42673,1.45991,1.43779,1.39355,1.37143,1.36037,1.29401,1.24977,1.19447,1.09493,0.973272,0.829493,0.729954,0.685714,0.630415 +H010,0.613383,0.557621,0.490706,0.468401,0.479554,0.568773,0.669145,0.814126,0.97026,1.12639,1.22677,1.27138,1.29368,1.3829,1.46097,1.47212,1.46097,1.46097,1.3829,1.23792,1.08178,0.936803,0.847584,0.724907 + diff --git a/data/profiles/temporal/TemporalProfile_Monthly.csv b/data/profiles/temporal/TemporalProfile_Monthly.csv new file mode 100644 index 0000000000000000000000000000000000000000..b66babdffa0fc04e52fe898724bde6c202eb951c --- /dev/null +++ b/data/profiles/temporal/TemporalProfile_Monthly.csv @@ -0,0 +1,12 @@ +TP_M,1,2,3,4,5,6,7,8,9,10,11,12 +M001,1,1,1,1,1,1,1,1,1,1,1,1 +M002,1.2,1.15,1.05,1,0.9,0.85,0.8,0.875,0.95,1,1.075,1.15 +M003,1.7,1.5,1.3,1,0.7,0.4,0.2,0.4,0.7,1.05,1.4,1.65 +M004,1.1,1.075,1.05,1,0.95,0.9,0.93,0.95,0.97,1,1.025,1.05 +M005,1.2,1.2,1.2,0.8,0.8,0.8,0.8,0.8,0.8,1.2,1.2,1.2 +M006,0.95,0.96,1.02,1,1.01,1.03,1.03,1.01,1.04,1.03,1.01,0.91 +M007,0.88,0.92,0.98,1.03,1.05,1.06,1.01,1.02,1.06,1.05,1.01,0.93 +M008,0.88,0.92,0.98,1.03,1.05,1.06,1.01,1.02,1.06,1.05,1.01,0.93 +M009,0.45,1.3,2.35,1.7,0.85,0.85,0.85,1,1.1,0.65,0.45,0.45 +M999,0,0,0,0,0,0,0,0,1,0,0,0 + diff --git a/data/profiles/temporal/tz_world_country_iso3166.csv b/data/profiles/temporal/tz_world_country_iso3166.csv new file mode 100644 index 0000000000000000000000000000000000000000..4c8218028d6d827280913bdebf0816a4dcd908d1 --- /dev/null +++ b/data/profiles/temporal/tz_world_country_iso3166.csv @@ -0,0 +1,423 @@ +"country";"country_code";"time_zone";"time_zone_code";"country_code_alpha" +"Afghanistan";4;"Asia/Kabul";237;"AFG" +"Albania";8;"Europe/Tirane";355;"ALB" +"Antarctica";10;"uninhabited";418;"ATA" +"Algeria";12;"Africa/Algiers";4;"DZA" +"American Samoa";16;"Pacific/Pago_Pago";405;"ASM" +"Andorra";20;"Europe/Andorra";309;"AND" +"Angola";24;"Africa/Luanda";34;"AGO" +"Antigua and Barbuda";28;"America/Antigua";56;"ATG" +"Azerbaijan";31;"Asia/Baku";212;"AZE" +"Argentina";32;"America/Argentina/Salta";65;"ARG" +"Argentina";32;"America/Argentina/Catamarca";59;"ARG" +"Argentina";32;"America/Argentina/Tucuman";68;"ARG" +"Argentina";32;"America/Argentina/Cordoba";60;"ARG" +"Argentina";32;"America/Argentina/Rio_Gallegos";64;"ARG" +"Argentina";32;"America/Argentina/La_Rioja";62;"ARG" +"Argentina";32;"America/Argentina/Buenos_Aires";58;"ARG" +"Argentina";32;"America/Argentina/Ushuaia";69;"ARG" +"Argentina";32;"America/Argentina/Jujuy";61;"ARG" +"Argentina";32;"America/Argentina/San_Juan";66;"ARG" +"Argentina";32;"America/Argentina/San_Luis";67;"ARG" +"Argentina";32;"America/Argentina/Mendoza";63;"ARG" +"Australia";36;"Australia/Brisbane";297;"AUS" +"Australia";36;"Australia/Broken_Hill";298;"AUS" +"Australia";36;"Australia/Lindeman";303;"AUS" +"Australia";36;"Australia/Darwin";300;"AUS" +"Australia";36;"Australia/Melbourne";305;"AUS" +"Australia";36;"Australia/Perth";306;"AUS" +"Australia";36;"Antarctica/Macquarie";201;"AUS" +"Australia";36;"Australia/Lord_Howe";304;"AUS" +"Australia";36;"Australia/Eucla";301;"AUS" +"Australia";36;"Australia/Hobart";302;"AUS" +"Australia";36;"Australia/Sydney";307;"AUS" +"Australia";36;"Australia/Currie";299;"AUS" +"Australia";36;"Australia/Adelaide";296;"AUS" +"Austria";40;"Europe/Vienna";360;"AUT" +"Bahamas";44;"America/Nassau";154;"BHS" +"Bahrain";48;"Asia/Bahrain";211;"BHR" +"Bangladesh";50;"Asia/Dhaka";223;"BGD" +"Armenia";51;"Asia/Yerevan";285;"ARM" +"Barbados";52;"America/Barbados";75;"BRB" +"Belgium";56;"Europe/Brussels";315;"BEL" +"Bermuda";60;"Atlantic/Bermuda";287;"BMU" +"Bhutan";64;"Asia/Thimphu";275;"BTN" +"Bolivia Plurinational State of";68;"America/La_Paz";133;"BOL" +"Bosnia and Herzegovina";70;"Europe/Sarajevo";349;"BIH" +"Botswana";72;"Africa/Gaborone";23;"BWA" +"Brazil";76;"America/Bahia";73;"BRA" +"Brazil";76;"America/Santarem";177;"BRA" +"Brazil";76;"America/Sao_Paulo";180;"BRA" +"Brazil";76;"America/Araguaina";57;"BRA" +"Brazil";76;"America/Belem";76;"BRA" +"Brazil";76;"America/Maceio";137;"BRA" +"Brazil";76;"America/Cuiaba";93;"BRA" +"Brazil";76;"America/Manaus";139;"BRA" +"Brazil";76;"America/Eirunepe";102;"BRA" +"Brazil";76;"America/Noronha";158;"BRA" +"Brazil";76;"America/Porto_Velho";169;"BRA" +"Brazil";76;"America/Boa_Vista";79;"BRA" +"Brazil";76;"America/Campo_Grande";83;"BRA" +"Brazil";76;"America/Recife";173;"BRA" +"Brazil";76;"America/Rio_Branco";176;"BRA" +"Brazil";76;"America/Fortaleza";105;"BRA" +"Belize";84;"America/Belize";77;"BLZ" +"British Indian Ocean Territory";86;"Indian/Chagos";368;"IOT" +"Solomon Islands";90;"Pacific/Guadalcanal";391;"SLB" +"Virgin Islands British";92;"America/Tortola";195;"VGB" +"Brunei Darussalam";96;"Asia/Brunei";217;"BRN" +"Bulgaria";100;"Europe/Sofia";352;"BGR" +"Myanmar";104;"Asia/Rangoon";263;"MMR" +"Burundi";108;"Africa/Bujumbura";12;"BDI" +"Belarus";112;"Europe/Minsk";338;"BLR" +"Cambodia";116;"Asia/Phnom_Penh";258;"KHM" +"Cameroon";120;"Africa/Douala";20;"CMR" +"Canada";124;"America/Edmonton";101;"CAN" +"Canada";124;"America/Iqaluit";127;"CAN" +"Canada";124;"America/Vancouver";196;"CAN" +"Canada";124;"America/Winnipeg";198;"CAN" +"Canada";124;"America/Moncton";149;"CAN" +"Canada";124;"America/Dawson";96;"CAN" +"Canada";124;"America/Montreal";152;"CAN" +"Canada";124;"America/Goose_Bay";108;"CAN" +"Canada";124;"America/Inuvik";126;"CAN" +"Canada";124;"America/Rainy_River";171;"CAN" +"Canada";124;"America/Atikokan";72;"CAN" +"Canada";124;"America/Dawson_Creek";97;"CAN" +"Canada";124;"America/Coral_Harbour";90;"CAN" +"Canada";124;"America/Toronto";194;"CAN" +"Canada";124;"America/Creston";92;"CAN" +"Canada";124;"America/Nipigon";156;"CAN" +"Canada";124;"America/Regina";174;"CAN" +"Canada";124;"America/Thunder_Bay";192;"CAN" +"Canada";124;"America/Fort_Nelson";104;"CAN" +"Canada";124;"America/Pangnirtung";164;"CAN" +"Canada";124;"America/Halifax";115;"CAN" +"Canada";124;"America/Yellowknife";200;"CAN" +"Canada";124;"America/Resolute";175;"CAN" +"Canada";124;"America/Rankin_Inlet";172;"CAN" +"Canada";124;"America/Glace_Bay";106;"CAN" +"Canada";124;"America/Blanc-Sablon";78;"CAN" +"Canada";124;"America/Cambridge_Bay";82;"CAN" +"Canada";124;"America/Swift_Current";189;"CAN" +"Canada";124;"America/St_Johns";184;"CAN" +"Canada";124;"America/Whitehorse";197;"CAN" +"Cape Verde";132;"Atlantic/Cape_Verde";289;"CPV" +"Cayman Islands";136;"America/Cayman";87;"CYM" +"Central African Republic";140;"Africa/Bangui";7;"CAF" +"Sri Lanka";144;"Asia/Colombo";221;"LKA" +"Chad";148;"Africa/Ndjamena";44;"TCD" +"Chile";152;"America/Punta_Arenas";1003;"CHL" +"Chile";152;"Pacific/Easter";383;"CHL" +"Chile";152;"America/Santiago";178;"CHL" +"China";156;"Asia/Shanghai";268;"CHN" +"China";156;"Asia/Harbin";228;"CHN" +"China";156;"Asia/Kashgar";240;"CHN" +"China";156;"Asia/Chongqing";220;"CHN" +"China";156;"Asia/Urumqi";279;"CHN" +"Taiwan Province of China";158;"Asia/Taipei";271;"TWN" +"Christmas Island";162;"Indian/Christmas";369;"CXR" +"Cocos (Keeling) Islands";166;"Indian/Cocos";370;"CCK" +"Colombia";170;"America/Bogota";80;"COL" +"Comoros";174;"Indian/Comoro";371;"COM" +"Mayotte";175;"Indian/Mayotte";376;"MYT" +"Congo";178;"Africa/Brazzaville";11;"COG" +"Congo the Democratic Republic of the";180;"Africa/Lubumbashi";35;"COD" +"Congo the Democratic Republic of the";180;"Africa/Kinshasa";30;"COD" +"Cook Islands";184;"Pacific/Rarotonga";410;"COK" +"Costa Rica";188;"America/Costa_Rica";91;"CRI" +"Croatia";191;"Europe/Zagreb";364;"HRV" +"Cuba";192;"America/Havana";116;"CUB" +"Cyprus";196;"Asia/Nicosia";253;"CYP" +"Cyprus";196;"Asia/Famagusta";1000;"CYP" +"Czech Republic";203;"Europe/Prague";344;"CZE" +"Benin";204;"Africa/Porto-Novo";48;"BEN" +"Denmark";208;"Europe/Copenhagen";320;"DNK" +"Dominica";212;"America/Dominica";100;"DMA" +"Dominican Republic";214;"America/Santo_Domingo";179;"DOM" +"Ecuador";218;"America/Guayaquil";113;"ECU" +"Ecuador";218;"Pacific/Galapagos";389;"ECU" +"El Salvador";222;"America/El_Salvador";103;"SLV" +"Equatorial Guinea";226;"Africa/Malabo";37;"GNQ" +"Ethiopia";231;"Africa/Addis_Ababa";3;"ETH" +"Eritrea";232;"Africa/Asmara";5;"ERI" +"Estonia";233;"Europe/Tallinn";354;"EST" +"Faroe Islands";234;"Atlantic/Faroe";290;"FRO" +"Falkland Islands (Malvinas)";238;"Atlantic/Stanley";295;"FLK" +"South Georgia and the South Sandwich Islands";239;"Atlantic/South_Georgia";293;"SGS" +"Fiji";242;"Pacific/Fiji";387;"FJI" +"Finland";246;"Europe/Helsinki";324;"FIN" +"Aland Islands";248;"Europe/Mariehamn";337;"ALA" +"France";250;"Europe/Paris";342;"FRA" +"French Guiana";254;"America/Cayenne";86;"GUF" +"French Polynesia";258;"Pacific/Gambier";390;"PYF" +"French Polynesia";258;"Pacific/Marquesas";399;"PYF" +"French Polynesia";258;"Pacific/Tahiti";412;"PYF" +"French Southern Territories";260;"Indian/Kerguelen";372;"ATF" +"Djibouti";262;"Africa/Djibouti";19;"DJI" +"Gabon";266;"Africa/Libreville";32;"GAB" +"Georgia";268;"Asia/Tbilisi";273;"GEO" +"Gambia";270;"Africa/Banjul";8;"GMB" +"Palestine State of";275;"Asia/Hebron";229;"PSE" +"Palestine State of";275;"Asia/Gaza";227;"PSE" +"Germany";276;"Europe/Berlin";313;"DEU" +"Germany";276;"Europe/Busingen";318;"DEU" +"Ghana";288;"Africa/Accra";2;"GHA" +"Gibraltar";292;"Europe/Gibraltar";322;"GIB" +"Kiribati";296;"Pacific/Kiritimati";395;"KIR" +"Kiribati";296;"Pacific/Tarawa";413;"KIR" +"Kiribati";296;"Pacific/Enderbury";385;"KIR" +"Greece";300;"Europe/Athens";311;"GRC" +"Greenland";304;"America/Thule";191;"GRL" +"Greenland";304;"America/Scoresbysund";181;"GRL" +"Greenland";304;"America/Godthab";107;"GRL" +"Greenland";304;"America/Danmarkshavn";95;"GRL" +"Grenada";308;"America/Grenada";110;"GRD" +"Guadeloupe";312;"America/Guadeloupe";111;"GLP" +"Guam";316;"Pacific/Guam";392;"GUM" +"Guatemala";320;"America/Guatemala";112;"GTM" +"Guinea";324;"Africa/Conakry";16;"GIN" +"Guyana";328;"America/Guyana";114;"GUY" +"Haiti";332;"America/Port-au-Prince";168;"HTI" +"Holy See (Vatican City State)";336;"Europe/Vatican";359;"VAT" +"Honduras";340;"America/Tegucigalpa";190;"HND" +"Hong Kong";344;"Asia/Hong_Kong";231;"HKG" +"Hungary";348;"Europe/Budapest";317;"HUN" +"Iceland";352;"Atlantic/Reykjavik";292;"ISL" +"India";356;"Asia/Kolkata";243;"IND" +"Indonesia";360;"Asia/Jakarta";234;"IDN" +"Indonesia";360;"Asia/Pontianak";259;"IDN" +"Indonesia";360;"Asia/Makassar";250;"IDN" +"Indonesia";360;"Asia/Jayapura";235;"IDN" +"Iran Islamic Republic of";364;"Asia/Tehran";274;"IRN" +"Iraq";368;"Asia/Baghdad";210;"IRQ" +"Ireland";372;"Europe/Dublin";321;"IRL" +"Israel";376;"Asia/Jerusalem";236;"ISR" +"Italy";380;"Europe/Rome";346;"ITA" +"Cote d'Ivoire";384;"Africa/Abidjan";1;"CIV" +"Jamaica";388;"America/Jamaica";128;"JAM" +"Japan";392;"Asia/Tokyo";276;"JPN" +"Kazakhstan";398;"Asia/Almaty";204;"KAZ" +"Kazakhstan";398;"Asia/Atyrau";1001;"KAZ" +"Kazakhstan";398;"Asia/Aqtau";207;"KAZ" +"Kazakhstan";398;"Asia/Aqtobe";208;"KAZ" +"Kazakhstan";398;"Asia/Qyzylorda";262;"KAZ" +"Kazakhstan";398;"Asia/Oral";257;"KAZ" +"Jordan";400;"Asia/Amman";205;"JOR" +"Kenya";404;"Africa/Nairobi";43;"KEN" +"Korea Democratic People's Republic of";408;"Asia/Pyongyang";260;"PRK" +"Korea Republic of";410;"Asia/Seoul";267;"KOR" +"Kuwait";414;"Asia/Kuwait";247;"KWT" +"Kyrgyzstan";417;"Asia/Bishkek";216;"KGZ" +"Lao Peoples Democratic Republic";418;"Asia/Vientiane";281;"LAO" +"Lebanon";422;"Asia/Beirut";215;"LBN" +"Lesotho";426;"Africa/Maseru";39;"LSO" +"Latvia";428;"Europe/Riga";345;"LVA" +"Liberia";430;"Africa/Monrovia";42;"LBR" +"Libya";434;"Africa/Tripoli";50;"LBY" +"Liechtenstein";438;"Europe/Vaduz";358;"LIE" +"Lithuania";440;"Europe/Vilnius";361;"LTU" +"Luxembourg";442;"Europe/Luxembourg";334;"LUX" +"Macao";446;"Asia/Macau";248;"MAC" +"Madagascar";450;"Indian/Antananarivo";367;"MDG" +"Malawi";454;"Africa/Blantyre";10;"MWI" +"Malaysia";458;"Asia/Kuala_Lumpur";245;"MYS" +"Malaysia";458;"Asia/Kuching";246;"MYS" +"Maldives";462;"Indian/Maldives";374;"MDV" +"Mali";466;"Africa/Bamako";6;"MLI" +"Malta";470;"Europe/Malta";336;"MLT" +"Martinique";474;"America/Martinique";141;"MTQ" +"Mauritania";478;"Africa/Nouakchott";46;"MRT" +"Mauritius";480;"Indian/Mauritius";375;"MUS" +"Mexico";484;"America/Monterrey";150;"MEX" +"Mexico";484;"America/Bahia_Banderas";74;"MEX" +"Mexico";484;"America/Ojinaga";162;"MEX" +"Mexico";484;"America/Cancun";84;"MEX" +"Mexico";484;"America/Mazatlan";143;"MEX" +"Mexico";484;"America/Matamoros";142;"MEX" +"Mexico";484;"America/Merida";145;"MEX" +"Mexico";484;"America/Mexico_City";147;"MEX" +"Mexico";484;"America/Chihuahua";89;"MEX" +"Mexico";484;"America/Tijuana";193;"MEX" +"Mexico";484;"America/Hermosillo";117;"MEX" +"Monaco";492;"Europe/Monaco";339;"MCO" +"Mongolia";496;"Asia/Choibalsan";219;"MNG" +"Mongolia";496;"Asia/Hovd";232;"MNG" +"Mongolia";496;"Asia/Ulaanbaatar";278;"MNG" +"Moldova Republic of";498;"Europe/Chisinau";319;"MDA" +"Montenegro";499;"Europe/Podgorica";343;"MNE" +"Montserrat";500;"America/Montserrat";153;"MSR" +"Morocco";504;"Africa/Casablanca";14;"MAR" +"Mozambique";508;"Africa/Maputo";38;"MOZ" +"Oman";512;"Asia/Muscat";252;"OMN" +"Nauru";520;"Pacific/Nauru";401;"NRU" +"Nepal";524;"Asia/Kathmandu";241;"NPL" +"Netherlands";528;"Europe/Amsterdam";308;"NLD" +"Curacao";531;"America/Curacao";94;"CUW" +"Aruba";533;"America/Aruba";70;"ABW" +"Sint Maarten (Dutch part)";534;"America/Lower_Princes";136;"SXM" +"Bonaire Sint Eustatius and Saba";535;"America/Kralendijk";132;"BES" +"New Caledonia";540;"Pacific/Noumea";404;"NCL" +"Vanuatu";548;"Pacific/Efate";384;"VUT" +"New Zealand";554;"Pacific/Chatham";381;"NZL" +"New Zealand";554;"Pacific/Auckland";379;"NZL" +"Nicaragua";558;"America/Managua";138;"NIC" +"Niger";562;"Africa/Niamey";45;"NER" +"Nigeria";566;"Africa/Lagos";31;"NGA" +"Niue";570;"Pacific/Niue";402;"NIU" +"Norfolk Island";574;"Pacific/Norfolk";403;"NFK" +"Norway";578;"Europe/Oslo";341;"NOR" +"Northern Mariana Islands";580;"Pacific/Saipan";411;"MNP" +"United States Minor Outlying Islands";581;"Pacific/Johnston";394;"UMI" +"United States Minor Outlying Islands";581;"Pacific/Wake";415;"UMI" +"United States Minor Outlying Islands";581;"Pacific/Midway";400;"UMI" +"Micronesia Federated States of";583;"Pacific/Chuuk";382;"FSM" +"Micronesia Federated States of";583;"Pacific/Kosrae";396;"FSM" +"Micronesia Federated States of";583;"Pacific/Yap";417;"FSM" +"Micronesia Federated States of";583;"Pacific/Pohnpei";408;"FSM" +"Marshall Islands";584;"Pacific/Kwajalein";397;"MHL" +"Marshall Islands";584;"Pacific/Majuro";398;"MHL" +"Palau";585;"Pacific/Palau";406;"PLW" +"Pakistan";586;"Asia/Karachi";239;"PAK" +"Panama";591;"America/Panama";163;"PAN" +"Papua New Guinea";598;"Pacific/Port_Moresby";409;"PNG" +"Papua New Guinea";598;"Pacific/Bougainville";380;"PNG" +"Paraguay";600;"America/Asuncion";71;"PRY" +"Peru";604;"America/Lima";134;"PER" +"Philippines";608;"Asia/Manila";251;"PHL" +"Pitcairn";612;"Pacific/Pitcairn";407;"PCN" +"Poland";616;"Europe/Warsaw";363;"POL" +"Portugal";620;"Atlantic/Azores";286;"PRT" +"Portugal";620;"Europe/Lisbon";331;"PRT" +"Portugal";620;"Atlantic/Madeira";291;"PRT" +"Guinea-Bissau";624;"Africa/Bissau";9;"GNB" +"Timor-Leste";626;"Asia/Dili";224;"TLS" +"Puerto Rico";630;"America/Puerto_Rico";170;"PRI" +"Qatar";634;"Asia/Qatar";261;"QAT" +"Reunion";638;"Indian/Reunion";377;"REU" +"Romania";642;"Europe/Bucharest";316;"ROU" +"Russian Federation";643;"Asia/Sakhalin";265;"RUS" +"Russian Federation";643;"Asia/Novosibirsk";255;"RUS" +"Russian Federation";643;"Asia/Anadyr";206;"RUS" +"Russian Federation";643;"Asia/Irkutsk";233;"RUS" +"Russian Federation";643;"Europe/Simferopol";350;"RUS" +"Russian Federation";643;"Asia/Srednekolymsk";270;"RUS" +"Russian Federation";643;"Asia/Magadan";249;"RUS" +"Russian Federation";643;"Asia/Tomsk";277;"RUS" +"Russian Federation";643;"Asia/Khandyga";242;"RUS" +"Russian Federation";643;"Europe/Kirov";330;"RUS" +"Russian Federation";643;"Asia/Ust-Nera";280;"RUS" +"Russian Federation";643;"Europe/Volgograd";362;"RUS" +"Russian Federation";643;"Asia/Novokuznetsk";254;"RUS" +"Russian Federation";643;"Asia/Barnaul";214;"RUS" +"Russian Federation";643;"Asia/Krasnoyarsk";244;"RUS" +"Russian Federation";643;"Asia/Chita";218;"RUS" +"Russian Federation";643;"Asia/Yekaterinburg";284;"RUS" +"Russian Federation";643;"Europe/Ulyanovsk";356;"RUS" +"Russian Federation";643;"Europe/Kaliningrad";328;"RUS" +"Russian Federation";643;"Asia/Omsk";256;"RUS" +"Russian Federation";643;"Europe/Samara";347;"RUS" +"Russian Federation";643;"Europe/Moscow";340;"RUS" +"Russian Federation";643;"Asia/Yakutsk";283;"RUS" +"Russian Federation";643;"Europe/Saratov";1002;"RUS" +"Russian Federation";643;"Asia/Kamchatka";238;"RUS" +"Russian Federation";643;"Europe/Astrakhan";310;"RUS" +"Russian Federation";643;"Asia/Vladivostok";282;"RUS" +"Rwanda";646;"Africa/Kigali";29;"RWA" +"Saint Barthelemy";652;"America/St_Barthelemy";183;"BLM" +"Saint Helena Ascension and Tristan da Cunha";654;"Atlantic/St_Helena";294;"SHN" +"Saint Kitts and Nevis";659;"America/St_Kitts";185;"KNA" +"Anguilla";660;"America/Anguilla";55;"AIA" +"Saint Lucia";662;"America/St_Lucia";186;"LCA" +"Saint Martin (French part)";663;"America/Marigot";140;"MAF" +"Saint Pierre and Miquelon";666;"America/Miquelon";148;"SPM" +"Saint Vincent and the Grenadines";670;"America/St_Vincent";188;"VCT" +"San Marino";674;"Europe/San_Marino";348;"SMR" +"Sao Tome and Principe";678;"Africa/Sao_Tome";49;"STP" +"Saudi Arabia";682;"Asia/Riyadh";264;"SAU" +"Senegal";686;"Africa/Dakar";17;"SEN" +"Serbia";688;"Europe/Belgrade";312;"SRB" +"Seychelles";690;"Indian/Mahe";373;"SYC" +"Sierra Leone";694;"Africa/Freetown";22;"SLE" +"Singapore";702;"Asia/Singapore";269;"SGP" +"Slovakia";703;"Europe/Bratislava";314;"SVK" +"Viet Nam";704;"Asia/Ho_Chi_Minh";230;"VNM" +"Slovenia";705;"Europe/Ljubljana";332;"SVN" +"Somalia";706;"Africa/Mogadishu";41;"SOM" +"South Africa";710;"Africa/Johannesburg";25;"ZAF" +"Zimbabwe";716;"Africa/Harare";24;"ZWE" +"Spain";724;"Africa/Ceuta";15;"ESP" +"Spain";724;"Atlantic/Canary";288;"ESP" +"Spain";724;"Europe/Madrid";335;"ESP" +"South Sudan";728;"Africa/Juba";26;"SSD" +"Sudan";729;"Africa/Khartoum";28;"SDN" +"Western Sahara";732;"Africa/El_Aaiun";21;"ESH" +"Suriname";740;"America/Paramaribo";165;"SUR" +"Svalbard and Jan Mayen";744;"Arctic/Longyearbyen";202;"SJM" +"Swaziland";748;"Africa/Mbabane";40;"SWZ" +"Sweden";752;"Europe/Stockholm";353;"SWE" +"Switzerland";756;"Europe/Zurich";366;"CHE" +"Syrian Arab Republic";760;"Asia/Damascus";222;"SYR" +"Tajikistan";762;"Asia/Dushanbe";226;"TJK" +"Thailand";764;"Asia/Bangkok";213;"THA" +"Togo";768;"Africa/Lome";33;"TGO" +"Tokelau";772;"Pacific/Fakaofo";386;"TKL" +"Tonga";776;"Pacific/Tongatapu";414;"TON" +"Trinidad and Tobago";780;"America/Port_of_Spain";167;"TTO" +"United Arab Emirates";784;"Asia/Dubai";225;"ARE" +"Tunisia";788;"Africa/Tunis";51;"TUN" +"Turkey";792;"Europe/Istanbul";326;"TUR" +"Turkmenistan";795;"Asia/Ashgabat";209;"TKM" +"Turks and Caicos Islands";796;"America/Grand_Turk";109;"TCA" +"Tuvalu";798;"Pacific/Funafuti";388;"TUV" +"Uganda";800;"Africa/Kampala";27;"UGA" +"Ukraine";804;"Europe/Kiev";329;"UKR" +"Ukraine";804;"Europe/Zaporozhye";365;"UKR" +"Ukraine";804;"Europe/Uzhgorod";357;"UKR" +"Macedonia the Former Yugoslav Republic of";807;"Europe/Skopje";351;"MKD" +"Egypt";818;"Africa/Cairo";13;"EGY" +"United Kingdom";826;"Europe/London";333;"GBR" +"Guernsey";831;"Europe/Guernsey";323;"GGY" +"Jersey";832;"Europe/Jersey";327;"JEY" +"Isle of Man";833;"Europe/Isle_of_Man";325;"IMN" +"Tanzania United Republic of";834;"Africa/Dar_es_Salaam";18;"TZA" +"United States";840;"America/Anchorage";54;"USA" +"United States";840;"America/Los_Angeles";135;"USA" +"United States";840;"America/Indiana/Tell_City";122;"USA" +"United States";840;"America/Denver";98;"USA" +"United States";840;"America/Detroit";99;"USA" +"United States";840;"America/Phoenix";166;"USA" +"United States";840;"America/Yakutat";199;"USA" +"United States";840;"America/Metlakatla";146;"USA" +"United States";840;"America/Indiana/Marengo";120;"USA" +"United States";840;"America/Boise";81;"USA" +"United States";840;"America/Indiana/Petersburg";121;"USA" +"United States";840;"America/North_Dakota/New_Salem";161;"USA" +"United States";840;"Pacific/Honolulu";393;"USA" +"United States";840;"America/Indiana/Winamac";125;"USA" +"United States";840;"America/Nome";157;"USA" +"United States";840;"America/Sitka";182;"USA" +"United States";840;"America/Chicago";88;"USA" +"United States";840;"America/Adak";53;"USA" +"United States";840;"America/North_Dakota/Center";160;"USA" +"United States";840;"America/Kentucky/Monticello";131;"USA" +"United States";840;"America/Indiana/Knox";119;"USA" +"United States";840;"America/Indiana/Vincennes";124;"USA" +"United States";840;"America/North_Dakota/Beulah";159;"USA" +"United States";840;"America/Kentucky/Louisville";130;"USA" +"United States";840;"America/Indiana/Indianapolis";118;"USA" +"United States";840;"America/Indiana/Vevay";123;"USA" +"United States";840;"America/New_York";155;"USA" +"United States";840;"America/Juneau";129;"USA" +"United States";840;"America/Menominee";144;"USA" +"Virgin Islands U.S.";850;"America/St_Thomas";187;"VIR" +"Burkina Faso";854;"Africa/Ouagadougou";47;"BFA" +"Uruguay";858;"America/Montevideo";151;"URY" +"Uzbekistan";860;"Asia/Tashkent";272;"UZB" +"Uzbekistan";860;"Asia/Samarkand";266;"UZB" +"Venezuela Bolivarian Republic of";862;"America/Caracas";85;"VEN" +"Wallis and Futuna";876;"Pacific/Wallis";416;"WLF" +"Samoa";882;"Pacific/Apia";378;"WSM" +"Yemen";887;"Asia/Aden";203;"YEM" +"Zambia";894;"Africa/Lusaka";36;"ZMB" +"Namibia";"264";"Africa/Windhoek";52;"NAM" \ No newline at end of file diff --git a/data/profiles/vertical/Benchmark_15layers_vertical_description.csv b/data/profiles/vertical/Benchmark_15layers_vertical_description.csv new file mode 100644 index 0000000000000000000000000000000000000000..64e1ce8bc4122ab79db0f8581d9638a012f2e10a --- /dev/null +++ b/data/profiles/vertical/Benchmark_15layers_vertical_description.csv @@ -0,0 +1,16 @@ +Ilayer;height_magl +1;39 +2;78 +3;119 +4;157 +5;197 +6;237 +7;315 +8;390 +9;560 +10;820 +11;1250 +12;1870 +13;2850 +14;5100 +15;20100 \ No newline at end of file diff --git a/data/profiles/vertical/Vertical_profile.csv b/data/profiles/vertical/Vertical_profile.csv new file mode 100644 index 0000000000000000000000000000000000000000..2518a5a9b804a8306341bb14c8f95a1b99462299 --- /dev/null +++ b/data/profiles/vertical/Vertical_profile.csv @@ -0,0 +1,7 @@ +ID;layers;weights +V001;36,72,108,144,218,292,366,440,514,588,724,896,1050,1204,1358;0,0,0,0,0.1,0.32,0.31,0.15,0.06,0.03,0.03,0,0,0,0 +V002;36,72,108,144,218,292,366,440,514,588,724,896,1050,1204,1358;0,0.18,0.47,0.25,0.08,0.02,0,0,0,0,0,0,0,0,0 +V003;10,1000;0.6,0.4 +V004;1000,9000;0,1 +V005;9000,12000;0,1 +V006;36,72,108,144,218,292,366,440,514,588,724,896,1050,1204,1358;0,0,0.02,0.26,0.56,0.13,0.03,0,0,0,0,0,0,0,0 \ No newline at end of file diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000000000000000000000000000000000000..251f3a7b22bae8a6f1cebd94c11d5af210697274 --- /dev/null +++ b/environment.yml @@ -0,0 +1,26 @@ +--- + +name: hermesv3_gr + +channels: + - conda-forge + +dependencies: + - python = 2 + - numpy + - netcdf4 >= 1.3.1 + - python-cdo >= 1.3.6 + - geopandas + - pyproj + - configargparse + - cf_units >= 1.1.3 + - esmpy >= 7.1.0r + - pytz + - timezonefinder + - mpi4py + # Testing + - pytest + - pytest-cov + - pycodestyle + - pip: + - holidays diff --git a/hermesv3_gr/__init__.py b/hermesv3_gr/__init__.py index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..6c8e6b979c5f58121ac7ee2d9e024749da3a8ce1 100644 --- a/hermesv3_gr/__init__.py +++ b/hermesv3_gr/__init__.py @@ -0,0 +1 @@ +__version__ = "0.0.0" diff --git a/hermesv3_gr/config/__init__.py b/hermesv3_gr/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hermesv3_gr/config/config.py b/hermesv3_gr/config/config.py new file mode 100644 index 0000000000000000000000000000000000000000..36d9db4dffb2971102fca7702ca350df2ab74aa0 --- /dev/null +++ b/hermesv3_gr/config/config.py @@ -0,0 +1,309 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +from configargparse import ArgParser + + +class Config(ArgParser): + """ + Initialization of the arguments that the parser can handle. + """ + def __init__(self): + super(Config, self).__init__() + self.options = self.read_options() + + def read_options(self): + """ + Reads all the options from command line or from the configuration file. + The value of an argument given by command line has high priority that the one that appear in the + configuration file. + + :return: Arguments already parsed. + :rtype: Namespace + """ + # p = ArgParser(default_config_files=['/home/Earth/mguevara/HERMES/HERMESv3/IN/conf/hermes.conf']) + p = ArgParser() + p.add_argument('-c', '--my-config', required=False, is_config_file=True, help='Path to the configuration file.') + # TODO Detallar mas que significan 1, 2 y 3 los log_level + p.add_argument('--log_level', required=True, help='Level of detail of the running process information.', + type=int, choices=[1, 2, 3]) + + p.add_argument('--input_dir', required=True, help='Path to the input directory of the model.') + p.add_argument('--data_path', required=True, help='Path to the data necessary for the model.') + p.add_argument('--output_dir', required=True, help='Path to the output directory of the model.') + p.add_argument('--output_name', required=True, + help="Name of the output file. You can add the string '' that will be substitute by the " + + "starting date of the simulation day.") + p.add_argument('--start_date', required=True, help='Starting Date to simulate (UTC)') + p.add_argument('--end_date', required=False, default=None, + help='If you want to simulate more than one day you have to specify the ending date of ' + + 'simulation in this parameter. If it is not set end_date = start_date.') + + p.add_argument('--output_timestep_type', required=True, help='Type of timestep.', + type=str, choices=['hourly', 'daily', 'monthly', 'yearly']) + p.add_argument('--output_timestep_num', required=True, help='Number of timesteps to simulate.', type=int) + p.add_argument('--output_timestep_freq', required=True, help='Frequency between timesteps.', type=int) + + p.add_argument('--output_model', required=True, help='Name of the output model.', + choices=['MONARCH', 'CMAQ', 'WRF_CHEM']) + p.add_argument('--output_attributes', required=False, + help='Path to the file that contains the global attributes.') + + p.add_argument('--domain_type', required=True, help='Type of domain to simulate.', + choices=['global', 'lcc', 'rotated', 'mercator']) + p.add_argument('--auxiliar_files_path', required=True, + help='Path to the directory where the necessary auxiliary files will be created if them are ' + + 'not created yet.') + + p.add_argument('--vertical_description', required=True, + help='Path to the file that contains the vertical description of the desired output.') + + # Global options + p.add_argument('--inc_lat', required=False, help='Latitude resolution for a global domain.', type=float) + p.add_argument('--inc_lon', required=False, help='Longitude resolution for a global domain.', type=float) + + # Rotated options + p.add_argument('--centre_lat', required=False, + help='Central geographic latitude of grid (non-rotated degrees). Corresponds to the TPH0D ' + + 'parameter in NMMB-MONARCH.', type=float) + p.add_argument('--centre_lon', required=False, + help='Central geographic longitude of grid (non-rotated degrees, positive east). Corresponds ' + + 'to the TLM0D parameter in NMMB-MONARCH.', type=float) + p.add_argument('--west_boundary', required=False, + help="Grid's western boundary from center point (rotated degrees). Corresponds to the WBD " + + "parameter in NMMB-MONARCH.", type=float) + p.add_argument('--south_boundary', required=False, + help="Grid's southern boundary from center point (rotated degrees). Corresponds to the SBD " + + "parameter in NMMB-MONARCH.", type=float) + p.add_argument('--inc_rlat', required=False, + help='Latitudinal grid resolution (rotated degrees). Corresponds to the DPHD parameter in ' + + 'NMMB-MONARCH.', type=float) + p.add_argument('--inc_rlon', required=False, + help='Longitudinal grid resolution (rotated degrees). Corresponds to the DLMD parameter ' + + 'in NMMB-MONARCH.', type=float) + + # Lambert conformal conic options + p.add_argument('--lat_1', required=False, + help='Standard parallel 1 (in deg). Corresponds to the P_ALP parameter of the GRIDDESC file.', + type=float) + p.add_argument('--lat_2', required=False, + help='Standard parallel 2 (in deg). Corresponds to the P_BET parameter of the GRIDDESC file.', + type=float) + p.add_argument('--lon_0', required=False, + help='Longitude of the central meridian (degrees). Corresponds to the P_GAM parameter of ' + + 'the GRIDDESC file.', type=float) + p.add_argument('--lat_0', required=False, + help='Latitude of the origin of the projection (degrees). Corresponds to the Y_CENT ' + + 'parameter of the GRIDDESC file.', type=float) + p.add_argument('--nx', required=False, + help='Number of grid columns. Corresponds to the NCOLS parameter of the GRIDDESC file.', + type=float) + p.add_argument('--ny', required=False, + help='Number of grid rows. Corresponds to the NROWS parameter of the GRIDDESC file.', + type=float) + p.add_argument('--inc_x', required=False, + help='X-coordinate cell dimension (meters). Corresponds to the XCELL parameter of the ' + + 'GRIDDESC file.', type=float) + p.add_argument('--inc_y', required=False, + help='Y-coordinate cell dimension (meters). Corresponds to the YCELL parameter of the ' + + 'GRIDDESC file.', type=float) + p.add_argument('--x_0', required=False, + help='X-coordinate origin of grid (meters). Corresponds to the XORIG parameter of the ' + + 'GRIDDESC file.', type=float) + p.add_argument('--y_0', required=False, + help='Y-coordinate origin of grid (meters). Corresponds to the YORIG parameter of the ' + + 'GRIDDESC file.', type=float) + + # Mercator + p.add_argument('--lat_ts', required=False, help='...', type=float) + + p.add_argument('--cross_table', required=True, + help='Path to the file that contains the information of the datasets to use.') + p.add_argument('--p_vertical', required=True, + help='Path to the file that contains all the needed vertical profiles.') + p.add_argument('--p_month', required=True, + help='Path to the file that contains all the needed monthly profiles.') + p.add_argument('--p_day', required=True, help='Path to the file that contains all the needed daily profiles.') + p.add_argument('--p_hour', required=True, help='Path to the file that contains all the needed hourly profiles.') + p.add_argument('--p_speciation', required=True, + help='Path to the file that contains all the needed speciation profiles.') + p.add_argument('--molecular_weights', required=True, + help='Path to the file that contains the molecular weights of the input pollutants.') + p.add_argument('--world_info', required=True, + help='Path to the file that contains the world information like timezones, ISO codes, ...') + + options = p.parse_args() + for item in vars(options): + is_str = False + exec ("is_str = str == type(options.{0})".format(item)) + if is_str: + exec("options.{0} = options.{0}.replace('', options.input_dir)".format(item)) + exec("options.{0} = options.{0}.replace('', options.domain_type)".format(item)) + if options.domain_type == 'global': + exec("options.{0} = options.{0}.replace('', '{1}_{2}')".format( + item, options.inc_lat, options.inc_lon)) + elif options.domain_type == 'rotated': + exec("options.{0} = options.{0}.replace('', '{1}_{2}')".format( + item, options.inc_rlat, options.inc_rlon)) + elif options.domain_type == 'lcc' or options.domain_type == 'mercator': + exec("options.{0} = options.{0}.replace('', '{1}_{2}')".format( + item, options.inc_x, options.inc_y)) + + options.start_date = self._parse_start_date(options.start_date) + options.end_date = self._parse_end_date(options.end_date, options.start_date) + + self.create_dir(options.output_dir) + self.create_dir(options.auxiliar_files_path) + + return options + + def get_output_name(self, date): + """ + Generates the full path of the output replacing by YYYYMMDDHH, YYYYMMDD, YYYYMM or YYYY depending on the + output_timestep_type. + + :param date: Date of the day to simulate. + :type: datetime.datetime + + :return: Complete path to the output file. + :rtype: str + """ + import os + if self.options.output_timestep_type == 'hourly': + file_name = self.options.output_name.replace('', date.strftime('%Y%m%d%H')) + elif self.options.output_timestep_type == 'daily': + file_name = self.options.output_name.replace('', date.strftime('%Y%m%d')) + elif self.options.output_timestep_type == 'monthly': + file_name = self.options.output_name.replace('', date.strftime('%Y%m')) + elif self.options.output_timestep_type == 'yearly': + file_name = self.options.output_name.replace('', date.strftime('%Y')) + else: + file_name = self.options.output_name + full_path = os.path.join(self.options.output_dir, file_name) + return full_path + + @staticmethod + def create_dir(path): + """ + Create the given folder if it is not created yet. + + :param path: Path to create. + :type path: str + """ + import os + from mpi4py import MPI + icomm = MPI.COMM_WORLD + comm = icomm.Split(color=0, key=0) + rank = comm.Get_rank() + + if rank == 0: + if not os.path.exists(path): + os.makedirs(path) + + comm.Barrier() + + @staticmethod + def _parse_bool(str_bool): + """ + Parse the giving string into a boolean. + The accepted options for a True value are: 'True', 'true', 'T', 't', 'Yes', 'yes', 'Y', 'y', '1' + The accepted options for a False value are: 'False', 'false', 'F', 'f', 'No', 'no', 'N', 'n', '0' + + If the sting is not in the options it will release a WARNING and the return value will be False. + + :param str_bool: String to convert to boolean. + :return: bool + """ + true_options = ['True', 'true', 'T', 't', 'Yes', 'yes', 'Y', 'y', '1', 1, True] + false_options = ['False', 'false', 'F', 'f', 'No', 'no', 'N', 'n', '0', 0, False, None] + + if str_bool in true_options: + return True + elif str_bool in false_options: + return False + else: + print 'WARNING: Boolean value not contemplated use {0} for True values and {1} for the False ones'.format( + true_options, false_options + ) + print '/t Using False as default' + return False + + @staticmethod + def _parse_start_date(str_date): + """ + Parse the date form string to datetime. + It accepts several ways to introduce the date: + YYYYMMDD, YYYY/MM/DD, YYYYMMDDhh, YYYYYMMDD.hh, YYYY/MM/DD_hh:mm:ss, YYYY-MM-DD_hh:mm:ss, + YYYY/MM/DD hh:mm:ss, YYYY-MM-DD hh:mm:ss, YYYY/MM/DD_hh, YYYY-MM-DD_hh. + + :param str_date: Date to the day to simulate in string format. + :type str_date: str + + :return: Date to the day to simulate in datetime format. + :rtype: datetime.datetime + """ + from datetime import datetime + format_types = ['%Y%m%d', '%Y%m%d%H', '%Y%m%d.%H', '%Y/%m/%d_%H:%M:%S', '%Y-%m-%d_%H:%M:%S', + '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d_%H', '%Y-%m-%d_%H', '%Y/%m/%d'] + + date = None + for date_format in format_types: + try: + date = datetime.strptime(str_date, date_format) + break + except ValueError as e: + if e.message == 'day is out of range for month': + raise ValueError(e) + + if date is None: + raise ValueError("Date format '{0}' not contemplated. Use one of this: {1}".format(str_date, format_types)) + + return date + + def _parse_end_date(self, end_date, start_date): + """ + Parse the end date. + If it's not defined it will be the same date that start_date (to do only one day). + + :param end_date: Date to the last day to simulate in string format. + :type end_date: str + + :param start_date: Date to the first day to simulate. + :type start_date: datetime.datetime + + :return: Date to the last day to simulate in datetime format. + :rtype: datetime.datetime + """ + if end_date is None: + return start_date + else: + return self._parse_start_date(end_date) + + def set_log_level(self): + """ + Defines the log_level using the common script settings. + """ + import settings + settings.define_global_vars(self.options.log_level) + + +if __name__ == '__main__': + config = Config() + print config.options diff --git a/hermesv3_gr/config/settings.py b/hermesv3_gr/config/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..1b93cfa1182ba517531a30737ae119b8c81f5942 --- /dev/null +++ b/hermesv3_gr/config/settings.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +import numpy as np + +global refresh_log + +global precision +precision = np.float64 + +global writing_serial +writing_serial = False + +global compressed_netcdf +compressed_netcdf = True + +if not writing_serial: + compressed_netcdf = False + +global icomm +global comm +global rank +global size + +global log_level +global log_file +global df_times + + +def define_global_vars(in_log_level): + # TODO Documentation + from mpi4py import MPI + + global icomm + global comm + global rank + global size + + icomm = MPI.COMM_WORLD + comm = icomm.Split(color=0, key=0) + rank = comm.Get_rank() + size = comm.Get_size() + + global log_level + log_level = in_log_level + + +def define_log_file(log_path, date): + # TODO Documentation + log_path = os.path.join(log_path, 'logs') + if not os.path.exists(log_path): + if rank == 0: + os.makedirs(log_path) + comm.Barrier() + log_path = os.path.join(log_path, 'HERMESv3_{0}_Rank{1}_Procs{2}.log'.format( + date.strftime('%Y%m%d%H'), str(rank).zfill(4), str(size).zfill(4))) + if os.path.exists(log_path): + os.remove(log_path) + + global log_file + + log_file = open(log_path, mode='w') + + +def define_times_file(): + # TODO Documentation + import pandas as pd + global df_times + + df_times = pd.DataFrame(columns=['Class', 'Function', rank]) + + +def write_log(msg, level=1): + # TODO Documentation + if log_level >= level: + log_file.write(msg + '\n') + log_file.flush() + + +def write_time(module, func, time, level=1): + # TODO Documentation + global df_times + if log_level >= level: + df_times = df_times.append({'Class': module, 'Function': func, rank: time}, ignore_index=True) + + +def finish_logs(output_dir, date): + # TODO Documentation + import pandas as pd + from functools import reduce + log_file.close() + + global df_times + df_times = df_times.groupby(['Class', 'Function']).sum().reset_index() + data_frames = comm.gather(df_times, root=0) + if rank == 0: + times_path = os.path.join(output_dir, 'logs', 'HERMESv3_{0}_times_Procs{1}.csv'.format( + date.strftime('%Y%m%d%H'), str(size).zfill(4))) + if os.path.exists(times_path): + os.remove(times_path) + df_merged = reduce(lambda left, right: pd.merge(left, right, on=['Class', 'Function'], how='outer'), + data_frames) + df_merged['min'] = df_merged.loc[:, range(size)].min(axis=1) + df_merged['max'] = df_merged.loc[:, range(size)].max(axis=1) + df_merged['mean'] = df_merged.loc[:, range(size)].mean(axis=1) + + df_merged.to_csv(times_path) + comm.Barrier() diff --git a/hermesv3_gr/hermes.py b/hermesv3_gr/hermes.py new file mode 100755 index 0000000000000000000000000000000000000000..f094111a0b08295c87d3c21d873e2e816251ca66 --- /dev/null +++ b/hermesv3_gr/hermes.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import timeit +from hermesv3_gr.config import settings +from hermesv3_gr.config.config import Config +from hermesv3_gr.modules.emision_inventories.emission_inventory import EmissionInventory +from hermesv3_gr.modules.vertical.vertical import VerticalDistribution + +from hermesv3_gr.tools.netcdf_tools import * +# import pyextrae.sequential as pyextrae + +global full_time + + +class Hermes(object): + """ + Interface class for HERMESv3. + """ + def __init__(self, config, new_date=None): + from hermesv3_gr.modules.grids.grid import Grid + from hermesv3_gr.modules.temporal.temporal import TemporalDistribution + from hermesv3_gr.modules.writing.writer import Writer + global full_time + st_time = full_time = timeit.default_timer() + + self.config = config + self.options = config.options + + # Updating starting date + if new_date is not None: + self.options.start_date = new_date + + config.set_log_level() + settings.define_log_file(self.options.output_dir, self.options.start_date) + settings.define_times_file() + + settings.write_log('Starting HERMESv3 initialization:') + + if self.options.output_model in ['CMAQ', 'WRF_CHEM'] and self.options.domain_type == 'global': + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError('ERROR: Global domain is not aviable for {0} output model.'.format( + self.options.output_model)) + sys.exit(1) + + self.levels = VerticalDistribution.get_vertical_output_profile(self.options.vertical_description) + + self.grid = Grid.select_grid( + self.options.domain_type, self.options.vertical_description, self.options.output_timestep_num, + self.options.auxiliar_files_path, self.options.inc_lat, self.options.inc_lon, self.options.centre_lat, + self.options.centre_lon, self.options.west_boundary, self.options.south_boundary, self.options.inc_rlat, + self.options.inc_rlon, self.options.lat_1, self.options.lat_2, self.options.lon_0, self.options.lat_0, + self.options.nx, self.options.ny, self.options.inc_x, self.options.inc_y, self.options.x_0, + self.options.y_0, self.options.lat_ts) + + self.emission_list = EmissionInventory.make_emission_list(self.options, self.grid, self.levels, + self.options.start_date) + + self.delta_hours = TemporalDistribution.calculate_delta_hours( + self.options.start_date, self.options.output_timestep_type, self.options.output_timestep_num, + self.options.output_timestep_freq) + + self.writer = Writer.get_writer( + self.options.output_model, self.config.get_output_name(self.options.start_date), self.grid, + self.levels, self.options.start_date, self.delta_hours, self.options.output_attributes, + compress=settings.compressed_netcdf, + parallel=not settings.writing_serial) + + settings.write_log('End of HERMESv3 initialization.') + settings.write_time('HERMES', 'Init', timeit.default_timer() - st_time, level=1) + + # @profile + def main(self): + """ + Main functionality of the model. + """ + from datetime import timedelta + + st_time = timeit.default_timer() + settings.write_log('') + settings.write_log('***** Starting HERMESv3 *****') + num = 1 + for ei in self.emission_list: + settings.write_log('Processing emission inventory {0} for the sector {1} ({2}/{3}):'.format( + ei.inventory_name, ei.sector, num, len(self.emission_list))) + num += 1 + + ei.do_regrid() + + if ei.vertical is not None: + settings.write_log("\tCalculating vertical distribution.", level=2) + if ei.source_type == 'area': + ei.vertical_factors = ei.vertical.calculate_weights() + elif ei.source_type == 'point': + ei.calculate_altitudes(self.options.vertical_description) + ei.point_source_by_cell() + # To avoid use point source as area source when is going to apply vertical factors while writing. + ei.vertical = None + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError('Unrecognized emission source type {0}'.format(ei.source_type)) + sys.exit(1) + + if ei.temporal is not None: + ei.temporal_factors = ei.temporal.calculate_3d_temporal_factors() + if ei.speciation is not None: + ei.emissions = ei.speciation.do_speciation(ei.emissions) + + self.writer.write(self.emission_list) + + settings.write_log("***** HERMESv3 simulation finished successfully *****") + settings.write_time('HERMES', 'main', timeit.default_timer() - st_time) + settings.write_time('HERMES', 'TOTAL', timeit.default_timer() - full_time) + settings.finish_logs(self.options.output_dir, self.options.start_date) + + if self.options.start_date < self.options.end_date: + return self.options.start_date + timedelta(days=1) + + return None + + +def run(): + date = Hermes(Config()).main() + while date is not None: + date = Hermes(Config(), new_date=date).main() + sys.exit(0) + + +if __name__ == '__main__': + run() diff --git a/hermesv3_gr/modules/__init__.py b/hermesv3_gr/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hermesv3_gr/modules/emision_inventories/__init__.py b/hermesv3_gr/modules/emision_inventories/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hermesv3_gr/modules/emision_inventories/emission_inventory.py b/hermesv3_gr/modules/emision_inventories/emission_inventory.py new file mode 100644 index 0000000000000000000000000000000000000000..3101a2fccdef07220c15f2b448d56b2971c302d3 --- /dev/null +++ b/hermesv3_gr/modules/emision_inventories/emission_inventory.py @@ -0,0 +1,375 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + +import os +import sys +import timeit + +import hermesv3_gr.config.settings as settings +from hermesv3_gr.modules.regrid.regrid_conservative import ConservativeRegrid +from hermesv3_gr.modules.vertical.vertical import VerticalDistribution +from hermesv3_gr.modules.temporal.temporal import TemporalDistribution +from hermesv3_gr.modules.speciation.speciation import Speciation + + +class EmissionInventory(object): + """ + Class that defines the content and the methodology for the area emission inventories + + :param current_date: Date of the day to simulate. + :type current_date: datetime.datetime + + :param inventory_name: Name of the inventory to use. + :type inventory_name: str + + :param sector: Name of the sector of the inventory to use. + :type sector: str + + :param pollutants: List of the pollutant name to take into account. + :type pollutants: list of str + + :param inputs_path: Path where are stored all the datasets to use. eg: /esarchive/recon/jrc/htapv2/monthly_mean + :type inputs_path: str + + :param input_frequency: Frequency of the inputs. [yearly, monthly, daily] + :type input_frequency: str + + :param reference_year: year of reference of the information of the dataset. + :type reference_year: int + + :param factors: Description of the scale factors per country. (e.g. SPN 1.5, CHN 3.) + :type factors: str + + :param regrid_mask: Description of the masking countries (adding e.g. + SPN AND) (subtracting e.g. - SPN) + :type regrid_mask: str + + :param p_vertical: ID of the vertical profile to use. + :type p_vertical: str + + :param p_month: ID of the temporal monthly profile to use. + :type p_month: str + + :param p_day: ID of the temporal daily profile to use. + :type p_day: str + + :param p_hour: ID of the temporal hourly profile to use. + :type p_hour: str + + :param p_speciation: ID of the speciation profile to use. + :type p_speciation: str + """ + def __init__(self, options, grid, current_date, inventory_name, source_type, sector, pollutants, inputs_path, + input_frequency, vertical_output_profile, reference_year=2010, factors=None, regrid_mask=None, + p_vertical=None, p_month=None, p_day=None, p_hour=None, p_speciation=None): + from hermesv3_gr.modules.masking.masking import Masking + + st_time = timeit.default_timer() + settings.write_log('\t\tCreating area source emission inventory.', level=3) + + # Emission Inventory parameters + self.source_type = source_type + self.date = current_date + self.inventory_name = inventory_name + self.sector = sector + self.reference_year = reference_year + self.inputs_path = inputs_path + self.input_frequency = input_frequency + self.grid = grid + + # Profiles + p_vertical = self.get_profile(p_vertical) + p_month = self.get_profile(p_month) + p_day = self.get_profile(p_day) + p_hour = self.get_profile(p_hour) + p_speciation = self.get_profile(p_speciation) + + # Creating Masking Object + # It will also create the WoldMasks necessaries + self.masking = Masking( + options.world_info, factors, regrid_mask, grid, + world_mask_file=os.path.join(os.path.dirname(options.auxiliar_files_path), + '{0}_WorldMask.nc'.format(inventory_name))) + + self.pollutant_dicts = self.create_pollutants_dicts(pollutants) + + self.masking.check_regrid_mask(self.pollutant_dicts[0]['path']) + + # Creating Regrid Object + # It will also create the WoldMasks necessaries + if self.source_type == 'area': + self.regrid = ConservativeRegrid( + self.pollutant_dicts, + os.path.join(options.auxiliar_files_path, + "Weight_Matrix_{0}_{1}.nc".format(self.inventory_name, settings.size)), + grid, masking=self.masking) + + # Creating Vertical Object + if p_vertical is not None: + self.vertical = VerticalDistribution( + self.get_profile(p_vertical), vertical_profile_path=options.p_vertical, + vertical_output_profile=vertical_output_profile) + else: + self.vertical = None + settings.write_log('\t\tNone vertical profile set.', level=2) + self.vertical_factors = None + + # Creating Temporal Object + # It will also create the necessaries timezone files + if not((p_month is None) and (p_day is None) and (p_hour is None)): + self.temporal = TemporalDistribution( + current_date, options.output_timestep_type, options.output_timestep_num, options.output_timestep_freq, + options.p_month, p_month, options.p_day, p_day, options.p_hour, p_hour, options.world_info, + options.auxiliar_files_path, grid) + else: + self.temporal = None + settings.write_log('\t\tNone temporal profile set.', level=2) + self.temporal_factors = None + + # Creating Speciation Object + if p_speciation is not None: + self.speciation = Speciation(p_speciation, options.p_speciation, options.molecular_weights) + else: + self.speciation = None + settings.write_log('\t\tNone speciation profile set.', level=2) + + self.vertical_weights = None + + self.emissions = [] + + settings.write_time('EmissionInventory', 'Init', timeit.default_timer() - st_time, level=3) + + def create_pollutants_dicts(self, pollutants): + """ + Create a list of dictionaries with the information of the name, paht and Dataset of each pollutant + + :param pollutants: List of pollutants names + :type pollutants: list + + :return: List of dictionaries + :rtype: list + """ + + pollutant_list = [] + + for pollutant_name in pollutants: + pollutant_list.append( + {'name': pollutant_name, + 'path': self.get_input_path(pollutant_name), + 'Dataset': "{0}_{1}".format(self.inventory_name, self.sector)} + ) + return pollutant_list + + @staticmethod + def get_profile(id_aux): + """ + Parse the id of the profiles. + + :param id_aux: ID of the profile. + :type id_aux: str + + :return: ID of the profile parsed. + :rtype: str + """ + import pandas as pd + + if pd.isnull(id_aux): + return None + else: + return id_aux + + def get_input_path(self, pollutant=None): + """ + Completes the path of the input file that contains the needed information of the given pollutant. + + :param pollutant: Name of the pollutant. + :type pollutant: str + + :return: Full path of the needed file. + :rtype: str + """ + import pandas as pd + + if self.source_type == 'area': + extension = 'nc' + elif self.source_type == 'point': + extension = 'csv' + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError('ERROR: Unknown source type {0}'.format(self.source_type)) + sys.exit(1) + + # Finding upper folder + if pd.isnull(self.sector): + upper_folder = '{0}'.format(pollutant) + else: + upper_folder = '{0}_{1}'.format(pollutant, self.sector) + + # Finding pollutant folder and filename. + if self.input_frequency == 'yearly': + file_name = "{0}_{1}.{2}".format(pollutant, self.reference_year, extension) + elif self.input_frequency == 'monthly': + file_name = "{0}_{1}{2}.{3}".format(pollutant, self.reference_year, self.date.strftime("%m"), extension) + elif self.input_frequency == 'daily': + file_name = "{0}_{1}{2}{3}.{4}".format(pollutant, self.reference_year, self.date.strftime("%m"), + self.date.strftime("%D"), extension) + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise ValueError( + "ERROR: frequency {0} not implemented. Use yearly, monthly or daily.".format(self.input_frequency)) + sys.exit(1) + + # Filename + file_path = os.path.join(self.inputs_path, upper_folder, file_name) + + # Checking input file + if not os.path.exists(file_path): + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise IOError('ERROR: File {0} not found.'.format(file_path)) + sys.exit(1) + + return file_path + + def do_regrid(self): + + st_time = timeit.default_timer() + + settings.write_log("\tRegridding", level=2) + regridded_emissions = self.regrid.start_regridding() + for emission in regridded_emissions: + dict_aux = {'name': emission['name'], 'data': emission['data'], 'units': 'm'} + self.emissions.append(dict_aux) + settings.write_time('EmissionInventory', 'do_regrid', timeit.default_timer() - st_time, level=2) + + @staticmethod + def make_emission_list(options, grid, vertical_output_profile, date): + """ + Extract the information of the cross table to read all the needed emissions. + + :param options: Full list of parameters given by passing argument or in the configuration file. + :type options: Namespace + + :param grid: Grid to use. + :type grid: Grid + + :param vertical_output_profile: Path to eht file that contains the vertical profile. + :type vertical_output_profile: str + + :param date: Date to the day to simulate. + :type date: datetime.datetime + + :return: List of Emission inventories already loaded. + :rtype: list of EmissionInventory + """ + import pandas as pd + import re + from gfas_emission_inventory import GfasEmissionInventory + from point_source_emission_inventory import PointSourceEmissionInventory + + st_time = timeit.default_timer() + settings.write_log('Loading emissions') + + path = options.cross_table + df = pd.read_csv(path, sep=';', index_col=False) + for column in ['ei', 'sector', 'ref_year', 'active', 'factor_mask', 'regrid_mask', 'pollutants', 'path', + 'frequency', 'source_type', 'p_vertical', 'p_month', 'p_day', 'p_hour', 'p_speciation']: + df_cols = list(df.columns.values) + if column not in df_cols: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError('ERROR: Column {0} is not in the {1} file.'.format(column, path)) + sys.exit(1) + df = df[df['active'] == 1] + num = 1 + emission_inventory_list = [] + for i, emission_inventory in df.iterrows(): + settings.write_log('\tLoading emission {0}/{1} (Inventory: {2}; Sector: {3})'.format( + num, len(df), emission_inventory.ei, emission_inventory.sector), level=1) + num += 1 + pollutants = list(map(str, re.split(', |,|; |;| ', emission_inventory.pollutants))) + + try: + # gridded temporal profile + p_month = emission_inventory.p_month.replace('', options.input_dir) + except AttributeError: + p_month = emission_inventory.p_month + + emission_inventory_path = emission_inventory.path.replace('', options.data_path) + emission_inventory_path = emission_inventory_path.replace('', options.input_dir) + + if emission_inventory.source_type == 'area': + if emission_inventory.ei == 'GFASv12': + emission_inventory_list.append( + GfasEmissionInventory(options, grid, date, emission_inventory.ei, + emission_inventory.source_type, emission_inventory.sector, pollutants, + emission_inventory_path, + emission_inventory.frequency, vertical_output_profile, + reference_year=emission_inventory.ref_year, + factors=emission_inventory.factor_mask, + regrid_mask=emission_inventory.regrid_mask, + p_vertical=emission_inventory.p_vertical, + p_month=p_month, + p_day=emission_inventory.p_day, + p_hour=emission_inventory.p_hour, + p_speciation=emission_inventory.p_speciation)) + else: + emission_inventory_list.append( + EmissionInventory(options, grid, date, emission_inventory.ei, emission_inventory.source_type, + emission_inventory.sector, pollutants, + emission_inventory_path, + emission_inventory.frequency, vertical_output_profile, + reference_year=emission_inventory.ref_year, + factors=emission_inventory.factor_mask, + regrid_mask=emission_inventory.regrid_mask, + p_vertical=emission_inventory.p_vertical, + p_month=p_month, + p_day=emission_inventory.p_day, + p_hour=emission_inventory.p_hour, + p_speciation=emission_inventory.p_speciation)) + elif emission_inventory.source_type == 'point': + emission_inventory_list.append( + PointSourceEmissionInventory(options, grid, date, emission_inventory.ei, + emission_inventory.source_type, emission_inventory.sector, pollutants, + emission_inventory_path, + emission_inventory.frequency, vertical_output_profile, + reference_year=emission_inventory.ref_year, + factors=emission_inventory.factor_mask, + regrid_mask=emission_inventory.regrid_mask, + p_vertical=emission_inventory.p_vertical, + p_month=p_month, + p_day=emission_inventory.p_day, + p_hour=emission_inventory.p_hour, + p_speciation=emission_inventory.p_speciation)) + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise ValueError("ERROR: The emission inventory source type '{0}'".format( + emission_inventory.source_type) + + " is not implemented. Use 'area' or 'point'") + sys.exit(1) + settings.write_log('', level=2) + settings.write_time('EmissionInventory', 'make_emission_list', timeit.default_timer() - st_time, level=3) + + return emission_inventory_list + + +if __name__ == "__main__": + pass diff --git a/hermesv3_gr/modules/emision_inventories/gfas_emission_inventory.py b/hermesv3_gr/modules/emision_inventories/gfas_emission_inventory.py new file mode 100755 index 0000000000000000000000000000000000000000..a03cc27933ae585340d12e27ccf3de4779df948e --- /dev/null +++ b/hermesv3_gr/modules/emision_inventories/gfas_emission_inventory.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + +import os +import timeit + +import hermesv3_gr.config.settings as settings +from emission_inventory import EmissionInventory + + +class GfasEmissionInventory(EmissionInventory): + """ + Class that defines the content and the methodology for the GFAS emission inventories + + :param current_date: Date of the day to simulate. + :type current_date: datetime.datetime + + :param inventory_name: Name of the inventory to use. + :type inventory_name: str + + :param sector: Name of the sector of the inventory to use. + :type sector: str + + :param pollutants: List of the pollutant name to take into account. + :type pollutants: list of str + + :param frequency: Frequency of the inputs. [yearly, monthly, daily] + :type frequency: str + + :param reference_year: year of reference of the information of the dataset. + :type reference_year: int + + :param factors: NOT IMPLEMENTED YET + :type factors: NOT IMPLEMENTED YET + + :param regrid_mask: NOT IMPLEMENTED YET + :type regrid_mask: NOT IMPLEMENTED YET + + :param p_vertical: ID of the vertical profile to use. + :type p_vertical: str + + :param p_month: ID of the temporal monthly profile to use. + :type p_month: str + + :param p_day: ID of the temporal daily profile to use. + :type p_day: str + + :param p_hour: ID of the temporal hourly profile to use. + :type p_hour: str + + :param p_speciation: ID of the speciation profile to use. + :type p_speciation: str + """ + + def __init__(self, options, grid, current_date, inventory_name, source_type, sector, pollutants, inputs_path, + frequency, vertical_output_profile, + reference_year=2010, factors=None, regrid_mask=None, p_vertical=None, p_month=None, p_day=None, + p_hour=None, p_speciation=None): + from hermesv3_gr.modules.vertical.vertical_gfas import GfasVerticalDistribution + + st_time = timeit.default_timer() + settings.write_log('\t\tCreating GFAS emission inventory.', level=3) + + super(GfasEmissionInventory, self).__init__( + options, grid, current_date, inventory_name, source_type, sector, pollutants, inputs_path, frequency, + vertical_output_profile, + reference_year=reference_year, factors=factors, regrid_mask=regrid_mask, p_vertical=None, + p_month=p_month, p_day=p_day, p_hour=p_hour, p_speciation=p_speciation) + + self.approach = self.get_approach(p_vertical) + self.method = self.get_method(p_vertical) + + self.altitude = self.get_altitude() + + self.vertical = GfasVerticalDistribution(vertical_output_profile, self.approach, self.get_altitude()) + + settings.write_time('GFAS_EmissionInventory', 'Init', timeit.default_timer() - st_time, level=3) + + def get_input_path(self, pollutant=None, extension='nc'): + """ + Completes the path of the NetCDF that contains the needed information of the given pollutant. + + :param pollutant: Name of the pollutant of the NetCDF. + :type pollutant: str + + :param extension: Extension of the input file. + :type: str + + :return: Full path of the needed NetCDF. + :rtype: str + """ + st_time = timeit.default_timer() + + netcdf_path = os.path.join(self.inputs_path, 'multivar', 'ga_{0}.{1}'.format( + self.date.strftime('%Y%m%d'), extension)) + + settings.write_time('GfasEmissionInventory', 'get_input_path', timeit.default_timer() - st_time, level=3) + + return netcdf_path + + def get_altitude(self): + """ + Extract the altitude values depending on the choosen method. + + :return: Array with the alittude of each fire. + :rtype: numpy.array + """ + from hermesv3_gr.tools.netcdf_tools import extract_vars + + st_time = timeit.default_timer() + + if self.method == 'sovief': + alt_var = 'apt' + elif self.method == 'prm': + alt_var = 'mami' + else: + alt_var = None + + print "ERROR: Only 'sovief' and 'prm' methods are accepted." + + [alt] = extract_vars(self.get_input_path(), [alt_var]) + + alt = alt['data'] + + settings.write_time('GfasEmissionInventory', 'get_altitude', timeit.default_timer() - st_time, level=3) + return alt + + @ staticmethod + def get_approach(p_vertical): + """ + Extract the given approach value. + + :return: Approach value + :rtype: str + """ + import re + + st_time = timeit.default_timer() + + return_value = None + aux_list = re.split(', |,| , | ,', p_vertical) + for element in aux_list: + aux_value = re.split('=| =|= | = ', element) + if aux_value[0] == 'approach': + return_value = aux_value[1] + + settings.write_time('GfasEmissionInventory', 'get_approach', timeit.default_timer() - st_time, level=3) + + return return_value + + @ staticmethod + def get_method(p_vertical): + """ + Extract the given method value. + + :return: Method value + :rtype: str + """ + import re + + st_time = timeit.default_timer() + + return_value = None + aux_list = re.split(', |,| , | ,', p_vertical) + for element in aux_list: + aux_value = re.split('=| =|= | = ', element) + if aux_value[0] == 'method': + return_value = aux_value[1] + + settings.write_time('GfasEmissionInventory', 'get_method', timeit.default_timer() - st_time, level=3) + + return return_value + + def do_vertical_allocation(self, values): + """ + Allocates the fire emissions on their top level. + + :param values: 2D array with the fire emissions + :type values: numpy.array + + :return: Emissions already allocated on the top altitude of each fire. + :rtype: numpy.array + """ + st_time = timeit.default_timer() + + return_value = self.vertical.do_vertical_interpolation_allocation(values, self.altitude) + + settings.write_time('GfasEmissionInventory', 'do_vertical_allocation', timeit.default_timer() - st_time, + level=3) + + return return_value + + def do_regrid(self): + + st_time = timeit.default_timer() + settings.write_log("\tRegridding", level=2) + + for i in xrange(len(self.emissions)): + self.emissions[i]["data"] = self.do_vertical_allocation(self.emissions[i]["data"]) + + regridded_emissions = self.regrid.start_regridding(gfas=True, vertical=self.vertical) + + for emission in regridded_emissions: + dict_aux = {'name': emission['name'], 'data': emission['data'], 'units': 'm'} + # dict_aux['data'] = dict_aux['data'].reshape((1,) + dict_aux['data'].shape) + self.emissions.append(dict_aux) + self.vertical = None + + settings.write_time('GfasEmissionInventory', 'do_regrid', timeit.default_timer() - st_time, level=2) + + +if __name__ == "__main__": + pass diff --git a/hermesv3_gr/modules/emision_inventories/point_source_emission_inventory.py b/hermesv3_gr/modules/emision_inventories/point_source_emission_inventory.py new file mode 100755 index 0000000000000000000000000000000000000000..8c22e114aa539794cd8a57109f9cb83820ab6e0d --- /dev/null +++ b/hermesv3_gr/modules/emision_inventories/point_source_emission_inventory.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import timeit +import hermesv3_gr.config.settings as settings +from emission_inventory import EmissionInventory + + +class PointSourceEmissionInventory(EmissionInventory): + """ + Class that defines the content and the methodology for the Point Source emission inventories + + :param options: Place where are stored all the arguments. + :type options: namespace + + :param grid: Destination grid object. + :type grid: Grid + + :param current_date: Date of the day to simulate. + :type current_date: datetime.datetime + + :param inventory_name: Name of the inventory to use. + :type inventory_name: str + + :param sector: Name of the sector of the inventory to use. + :type sector: str + + :param pollutants: List of the pollutant name to take into account. + :type pollutants: list of str + + :param frequency: Frequency of the inputs. [yearly, monthly, daily] + :type frequency: str + + :param reference_year: year of reference of the information of the dataset. + :type reference_year: int + + :param factors: Description of the scale factors per country. (e.g. Spain 1.5, China 3.) + :type factors: str + + :param regrid_mask: Description of the masking countries (adding e.g. + Spain Andorra) (subtracting e.g. - Spain) + :type regrid_mask: str + + :param p_vertical: ID of the vertical profile to use. + :type p_vertical: str + + :param p_month: ID of the temporal monthly profile to use. + :type p_month: str + + :param p_day: ID of the temporal daily profile to use. + :type p_day: str + + :param p_hour: ID of the temporal hourly profile to use. + :type p_hour: str + + :param p_speciation: ID of the speciation profile to use. + :type p_speciation: str + """ + + def __init__(self, options, grid, current_date, inventory_name, source_type, sector, pollutants, inputs_path, + frequency, vertical_output_profile, reference_year=2010, factors=None, regrid_mask=None, + p_vertical=None, p_month=None, p_day=None, p_hour=None, p_speciation=None): + + st_time = timeit.default_timer() + settings.write_log('\t\tCreating point source emission inventory.', level=3) + + super(PointSourceEmissionInventory, self).__init__( + options, grid, current_date, inventory_name, source_type, sector, pollutants, inputs_path, frequency, + vertical_output_profile, reference_year=reference_year, factors=factors, regrid_mask=regrid_mask, + p_vertical=p_vertical, p_month=p_month, p_day=p_day, p_hour=p_hour, p_speciation=p_speciation) + + self.crs = {'init': 'epsg:4326'} + self.location = None + self.area = None + self.vertical = 'custom' + + settings.write_time('PointSourceEmissionInventory', 'Init', timeit.default_timer() - st_time, level=3) + + def do_regrid(self): + """ + Allocates the point source emission on the correspondent cell (getting the ID of the cell). + + :return: True when everything is correct. + :rtype: bool + """ + import pandas as pd + import geopandas as gpd + from shapely.geometry import Point + + st_time = timeit.default_timer() + settings.write_log("\tAllocating point sources on grid:", level=2) + + num = 1 + for pollutant in self.pollutant_dicts: + if self.location is None: + grid_shape = self.grid.to_shapefile(full_grid=False) + + settings.write_log('\t\tPollutant {0} ({1}/{2})'.format( + pollutant['name'], num, len(self.pollutant_dicts)), level=3) + num += 1 + + df = pd.read_csv(pollutant['path']) + + geometry = [Point(xy) for xy in zip(df.Lon, df.Lat)] + df = gpd.GeoDataFrame(df.loc[:, ['Emis', 'Alt_Injection']], crs=self.crs, geometry=geometry) + + df = df.to_crs(grid_shape.crs) + df = gpd.sjoin(df, grid_shape, how="inner", op='intersects') + + # Drops duplicates when the point source is on the boundary of the cell + df = df[~df.index.duplicated(keep='first')] + + if self.location is None: + self.location = df.loc[:, ['Alt_Injection', 'FID']] + self.area = self.grid.cell_area.flatten()[self.location['FID'].values] + + dict_aux = { + 'name': pollutant['name'], + 'units': '...', + 'data': df.loc[:, 'Emis'].values / self.area + } + + self.emissions.append(dict_aux) + settings.write_time('PointSourceEmissionInventory', 'do_regrid', timeit.default_timer() - st_time, level=2) + return True + + def calculate_altitudes(self, vertical_description_path): + """ + Calculate the number layer to allocate the point source. + + :param vertical_description_path: Path to the file that contains the vertical description + :type vertical_description_path: str + + :return: True + :rtype: bool + """ + import pandas as pd + + st_time = timeit.default_timer() + settings.write_log("\t\tCalculating vertical allocation.", level=3) + df = pd.read_csv(vertical_description_path, sep=';') + # df.sort_values(by='height_magl', ascending=False, inplace=True) + self.location['layer'] = None + + for i, line in df.iterrows(): + self.location.loc[self.location['Alt_Injection'] <= line['height_magl'], 'layer'] = line['Ilayer'] - 1 + self.location.loc[self.location['Alt_Injection'] <= line['height_magl'], 'Alt_Injection'] = None + del self.location['Alt_Injection'] + + settings.write_time('PointSourceEmissionInventory', 'calculate_altitudes', timeit.default_timer() - st_time, + level=2) + + return True + + def point_source_by_cell(self): + """ + Sums the different emissions that are allocated in the same cell and layer. + + :return: None + """ + st_time = timeit.default_timer() + + aux_df = None + for emission in self.emissions: + + aux_df = self.location.copy() + aux_df['Emis'] = emission['data'] + aux_df = aux_df.groupby(['FID', 'layer']).sum() + aux_df.reset_index(inplace=True) + emission['data'] = aux_df['Emis'] + + self.location = aux_df.loc[:, ['FID', 'layer']] + + settings.write_time('PointSourceEmissionInventory', 'Init', timeit.default_timer() - st_time, level=3) + + return None + + +if __name__ == "__main__": + pass diff --git a/hermesv3_gr/modules/grids/__init__.py b/hermesv3_gr/modules/grids/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hermesv3_gr/modules/grids/grid.py b/hermesv3_gr/modules/grids/grid.py new file mode 100644 index 0000000000000000000000000000000000000000..0c424d1ecd90aa27f068164d514c93216535a649 --- /dev/null +++ b/hermesv3_gr/modules/grids/grid.py @@ -0,0 +1,549 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +import sys +import timeit +import numpy as np +import ESMF +import hermesv3_gr.config.settings as settings + + +class Grid(object): + """ + Grid object that contains the information of the output grid. + + :param grid_type: Type of the output grid [global, rotated, lcc, mercator]. + :type grid_type: str + + :param vertical_description_path: Path to the file that contains the vertical description. + :type vertical_description_path: str + + :param temporal_path: Path to the temporal folder. + :type temporal_path: str + """ + + def __init__(self, grid_type, vertical_description_path, temporal_path): + st_time = timeit.default_timer() + # settings.write_log('Creating Grid...', level=1) + + # Defining class atributes + self.procs_array = None + self.nrows = 0 + self.ncols = 0 + + self.grid_type = grid_type + self.vertical_description = self.set_vertical_levels(vertical_description_path) + self.center_latitudes = None + self.center_longitudes = None + self.boundary_latitudes = None + self.boundary_longitudes = None + + self.cell_area = None + if settings.rank == 0: + if not os.path.exists(os.path.join(temporal_path)): + os.makedirs(os.path.join(temporal_path)) + settings.comm.Barrier() + + self.coords_netcdf_file = os.path.join(temporal_path, 'temporal_coords.nc') + self.temporal_path = temporal_path + self.shapefile_path = None + + self.esmf_grid = None + self.x_lower_bound = None + self.x_upper_bound = None + self.y_lower_bound = None + self.y_upper_bound = None + self.shape = None + + self.crs = None + + settings.write_time('Grid', 'Init', timeit.default_timer() - st_time, level=1) + + @staticmethod + def create_esmf_grid_from_file(file_name, sphere=True): + import ESMF + + st_time = timeit.default_timer() + settings.write_log('\t\tCreating ESMF grid from file {0}'.format(file_name), level=3) + + # ESMF.Manager(debug=True) + + grid = ESMF.Grid(filename=file_name, filetype=ESMF.FileFormat.GRIDSPEC, is_sphere=sphere, + add_corner_stagger=True) + + settings.write_time('Grid', 'create_esmf_grid_from_file', timeit.default_timer() - st_time, level=3) + return grid + + @staticmethod + def select_grid(grid_type, vertical_description_path, timestep_num, temporal_path, inc_lat, inc_lon, + centre_lat, centre_lon, west_boundary, south_boundary, inc_rlat, inc_rlon, + lat_1, lat_2, lon_0, lat_0, nx, ny, inc_x, inc_y, x_0, y_0, lat_ts): + # TODO describe better the rotated parameters + """ + Create a Grid object depending on the grid type. + + :param grid_type: type of grid to create [global, rotated, lcc, mercator] + :type grid_type: str + + :param vertical_description_path: Path to the file that contains the vertical description. + :type vertical_description_path: str + + :param timestep_num: Number of timesteps. + :type timestep_num: int + + :param temporal_path: Path to the temporal folder. + :type temporal_path: str + + :param inc_lat: [global] Increment between latitude centroids (degrees). + :type inc_lat: float + + :param inc_lon: [global] Increment between longitude centroids (degrees). + :type inc_lon: float + + :param centre_lat: [rotated] + :type centre_lat: float + + :param centre_lon: [rotated] + :type centre_lon: float + + :param west_boundary: [rotated] + :type west_boundary: float + + :param south_boundary: [rotated] + :type south_boundary: float + + :param inc_rlat: [rotated] Increment between rotated latitude centroids (degrees). + :type inc_rlat: float + + :param inc_rlon: [rotated] Increment between rotated longitude centroids (degrees). + :type inc_rlon: float + + :param lat_ts: [mercator] + :type lat_ts: float + + :param lat_1: [lcc] Value of the Lat1 for the LCC grid type. + :type lat_1: float + + :param lat_2: [lcc] Value of the Lat2 for the LCC grid type. + :type lat_2: float + + :param lon_0: [lcc, mercator] Value of the Lon0 for the LCC grid type. + :type lon_0: float + + :param lat_0: [lcc] Value of the Lat0 for the LCC grid type. + :type lat_0: float + + :param nx: [lcc, mercator] Number of cells on the x dimension. + :type nx: int + + :param ny: [lcc, mercator] Number of cells on the y dimension. + :type ny: int + + :param inc_x: [lcc, mercator] Increment between x dimensions cell centroids (metres). + :type inc_x: int + + :param inc_y: [lcc, mercator] Increment between y dimensions cell centroids (metres). + :type inc_y: int + + :param x_0: [lcc, mercator] Value of the X0 for the LCC grid type. + :type x_0: float + + :param y_0: [lcc, mercator] Value of the Y0 for the LCC grid type. + :type y_0: float + + :return: Grid object. It will return a GlobalGrid, RotatedGrid or LccGrid depending on the type. + :rtype: Grid + """ + + st_time = timeit.default_timer() + settings.write_log('Selecting grid', level=1) + + # Creating a different object depending on the grid type + if grid_type == 'global': + from hermesv3_gr.modules.grids.grid_global import GlobalGrid + grid = GlobalGrid(grid_type, vertical_description_path, timestep_num, temporal_path, inc_lat, inc_lon) + + elif grid_type == 'rotated': + from hermesv3_gr.modules.grids.grid_rotated import RotatedGrid + grid = RotatedGrid(grid_type, vertical_description_path, timestep_num, temporal_path, + centre_lat, centre_lon, west_boundary, south_boundary, inc_rlat, inc_rlon) + + elif grid_type == 'lcc': + from hermesv3_gr.modules.grids.grid_lcc import LccGrid + grid = LccGrid(grid_type, vertical_description_path, timestep_num, temporal_path, lat_1, lat_2, lon_0, + lat_0, nx, ny, inc_x, inc_y, x_0, y_0) + + elif grid_type == 'mercator': + from hermesv3_gr.modules.grids.grid_mercator import MercatorGrid + grid = MercatorGrid(grid_type, vertical_description_path, timestep_num, temporal_path, lat_ts, lon_0, + nx, ny, inc_x, inc_y, x_0, y_0) + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise NotImplementedError("The grid type {0} is not implemented.".format(grid_type) + + " Use 'global', 'rotated' or 'lcc'.") + sys.exit(1) + + settings.write_time('Grid', 'select_grid', timeit.default_timer() - st_time, level=3) + + return grid + + @staticmethod + def set_vertical_levels(vertical_description_path): + """ + Extract the vertical levels. + + :param vertical_description_path: path to the file that contain the vertical description of the required output + file. + :type vertical_description_path: str + + :return: Vertical levels. + :rtype: list of int + """ + import pandas as pd + + st_time = timeit.default_timer() + settings.write_log('\t\tSetting vertical levels', level=3) + + df = pd.read_csv(vertical_description_path, sep=';') + + heights = df.height_magl.values + + settings.write_time('Grid', 'set_vertical_levels', timeit.default_timer() - st_time, level=3) + + return heights + + def write_coords_netcdf(self): + """ + Writes the temporal file with the coordinates of the output needed to generate the weight matrix. + If it is already well created it will only add the cell_area parameter. + """ + # TODO Not to write two NetCDF. Open one and modify it. + from hermesv3_gr.tools.netcdf_tools import write_netcdf + + st_time = timeit.default_timer() + settings.write_log('\twrite_coords_netcdf', level=3) + + if not self.chech_coords_file(): + # Writes an auxiliary empty NetCDF only with the coordinates and an empty variable. + write_netcdf(self.coords_netcdf_file, self.center_latitudes, self.center_longitudes, + [{'name': 'var_aux', 'units': '', 'data': 0}], + boundary_latitudes=self.boundary_latitudes, boundary_longitudes=self.boundary_longitudes, + regular_latlon=True) + + # Calculate the cell area of the auxiliary NetCDF file + self.cell_area = self.get_cell_area() + + # Re-writes the NetCDF adding the cell area + write_netcdf(self.coords_netcdf_file, self.center_latitudes, self.center_longitudes, + [{'name': 'var_aux', 'units': '', 'data': 0}], + cell_area=self.cell_area, boundary_latitudes=self.boundary_latitudes, + boundary_longitudes=self.boundary_longitudes, regular_latlon=True) + else: + self.cell_area = self.get_cell_area() + + settings.write_time('Grid', 'write_coords_netcdf', timeit.default_timer() - st_time, level=3) + + def get_cell_area(self): + """ + Calculate the cell area of the grid. + + :return: Area of each cell of the grid. + :rtype: numpy.array + """ + from cdo import Cdo + from netCDF4 import Dataset + + st_time = timeit.default_timer() + settings.write_log('\t\tGetting cell area from {0}'.format(self.coords_netcdf_file), level=3) + + # Initialises the CDO + cdo = Cdo() + # Create a temporal file 's' with the cell area + s = cdo.gridarea(input=self.coords_netcdf_file) + # Get the cell area of the temporal file + nc_aux = Dataset(s, mode='r') + cell_area = nc_aux.variables['cell_area'][:] + nc_aux.close() + + settings.write_time('Grid', 'get_cell_area', timeit.default_timer() - st_time, level=3) + + return cell_area + + @staticmethod + def create_regular_grid_1d_array(center, inc, boundary): + """ + Create a regular grid giving the center, boundary and increment. + + :param center: Center of the coordinates. + :type center: float + + :param inc: Resolution: Increment between cells. + :type inc: float + + :param boundary: Limit of the coordinates: Distance between the first cell and the center. + :type boundary: float + + :return: 1D array with the coordinates. + :rtype: numpy.array + """ + + st_time = timeit.default_timer() + + # Calculate first center point. + origin = center - abs(boundary) + # Calculate the quantity of cells. + n = (abs(boundary) / inc) * 2 + # Calculate all the values + values = np.arange(origin + inc, origin + (n * inc) - inc + inc / 2, inc, dtype=np.float) + + settings.write_time('Grid', 'create_regular_grid_1d_array', timeit.default_timer() - st_time, level=3) + + return values + + @staticmethod + def create_bounds(coords, inc, number_vertices=2, inverse=False): + """ + Calculate the vertices coordinates. + + :param coords: Coordinates in degrees (latitude or longitude) + :type coords: numpy.array + + :param inc: Increment between center values. + :type inc: float + + :param number_vertices: Non mandatory parameter that informs the number of vertices that must have the + boundaries (by default 2). + :type number_vertices: int + + :param inverse: For some grid latitudes. + :type inverse: bool + + :return: Array with as many elements as vertices for each value of coords. + :rtype: numpy.array + """ + st_time = timeit.default_timer() + settings.write_log('\t\t\tCreating boundaries.', level=3) + + # Create new arrays moving the centers half increment less and more. + coords_left = coords - inc / 2 + coords_right = coords + inc / 2 + + # Defining the number of corners needed. 2 to regular grids and 4 for irregular ones. + if number_vertices == 2: + # Create an array of N arrays of 2 elements to store the floor and the ceil values for each cell + bound_coords = np.dstack((coords_left, coords_right)) + bound_coords = bound_coords.reshape((len(coords), number_vertices)) + elif number_vertices == 4: + # Create an array of N arrays of 4 elements to store the corner values for each cell + # It can be stored in clockwise starting form the left-top element, or in inverse mode. + if inverse: + bound_coords = np.dstack((coords_left, coords_left, coords_right, coords_right)) + + else: + bound_coords = np.dstack((coords_left, coords_right, coords_right, coords_left)) + else: + if settings.rank == 0: + raise ValueError('ERROR: The number of vertices of the boundaries must be 2 or 4.') + settings.write_log('ERROR: Check the .err file to get more info.') + sys.exit(1) + + settings.write_time('Grid', 'create_bounds', timeit.default_timer() - st_time, level=3) + + return bound_coords + + def get_coordinates_2d(self): + """ + Returns the coordinates but in a 2D format. + + A regular grid only needs two 1D arrays (latitudes and longitudes) to define a grid. + This method is to convert this two 1D arrays into 2D arrays replicating the info of each value. + + :return: Tuple with 2 fields, the first the 2D latitude coordinate, and the second for the 2D longitude + coordinate. + :rtype: tuple + """ + st_time = timeit.default_timer() + settings.write_log('\t\tGetting 2D coordinates from ESMPy Grid', level=3) + + lat = self.esmf_grid.get_coords(1, ESMF.StaggerLoc.CENTER).T + lon = self.esmf_grid.get_coords(0, ESMF.StaggerLoc.CENTER).T + + settings.write_time('Grid', 'get_coordinates_2d', timeit.default_timer() - st_time, level=3) + + return lat, lon + + def is_shapefile(self): + return os.path.exists(self.shapefile_path) + + def to_shapefile(self, full_grid=True): + import geopandas as gpd + import pandas as pd + from shapely.geometry import Polygon + + st_time = timeit.default_timer() + # settings.write_log('\t\tGetting grid shapefile', level=3) + + if full_grid: + self.shapefile_path = os.path.join(self.temporal_path, 'shapefile') + else: + self.shapefile_path = os.path.join(self.temporal_path, 'shapefiles_n{0}'.format(settings.size)) + + if settings.rank == 0: + if not os.path.exists(self.shapefile_path): + os.makedirs(self.shapefile_path) + if full_grid: + self.shapefile_path = os.path.join(self.shapefile_path, 'grid_shapefile.shp') + else: + self.shapefile_path = os.path.join(self.shapefile_path, 'grid_shapefile_{0}.shp'.format(settings.rank)) + + done = self.is_shapefile() + + if not done: + settings.write_log('\t\tGrid shapefile not done. Lets try to create it.', level=3) + # Create Shapefile + + # Use the meters coordiantes to create the shapefile + + y = self.boundary_latitudes + x = self.boundary_longitudes + # sys.exit() + + if self.grid_type == 'global': + x = x.reshape((x.shape[1], x.shape[2])) + y = y.reshape((y.shape[1], y.shape[2])) + + # x_aux = np.empty((x.shape[0], y.shape[0], 4)) + # x_aux[:, :, 0] = x[:, np.newaxis, 0] + # x_aux[:, :, 1] = x[:, np.newaxis, 1] + # x_aux[:, :, 2] = x[:, np.newaxis, 1] + # x_aux[:, :, 3] = x[:, np.newaxis, 0] + aux_shape = (y.shape[0], x.shape[0], 4) + x_aux = np.empty(aux_shape) + x_aux[:, :, 0] = x[np.newaxis, :, 0] + x_aux[:, :, 1] = x[np.newaxis, :, 1] + x_aux[:, :, 2] = x[np.newaxis, :, 1] + x_aux[:, :, 3] = x[np.newaxis, :, 0] + + x = x_aux + # print x + del x_aux + + # y_aux = np.empty((x.shape[0], y.shape[0], 4)) + # y_aux[:, :, 0] = y[np.newaxis, :, 0] + # y_aux[:, :, 1] = y[np.newaxis, :, 0] + # y_aux[:, :, 2] = y[np.newaxis, :, 1] + # y_aux[:, :, 3] = y[np.newaxis, :, 1] + + y_aux = np.empty(aux_shape) + y_aux[:, :, 0] = y[:, np.newaxis, 0] + y_aux[:, :, 1] = y[:, np.newaxis, 0] + y_aux[:, :, 2] = y[:, np.newaxis, 1] + y_aux[:, :, 3] = y[:, np.newaxis, 1] + + # print y_aux + y = y_aux + del y_aux + + # exit() + + if not full_grid: + y = y[self.x_lower_bound:self.x_upper_bound, self.y_lower_bound:self.y_upper_bound, :] + x = x[self.x_lower_bound:self.x_upper_bound, self.y_lower_bound:self.y_upper_bound, :] + + aux_b_lats = y.reshape((y.shape[0] * y.shape[1], y.shape[2])) + aux_b_lons = x.reshape((x.shape[0] * x.shape[1], x.shape[2])) + + # The regular lat-lon projection has only 2 (laterals) points for each cell instead of 4 (corners) + # if aux_b_lats.shape[1] == 2: + # aux_b = np.empty((aux_b_lats.shape[0], 4)) + # aux_b[:, 0] = aux_b_lats[:, 0] + # aux_b[:, 1] = aux_b_lats[:, 0] + # aux_b[:, 2] = aux_b_lats[:, 1] + # aux_b[:, 3] = aux_b_lats[:, 1] + # aux_b_lats = aux_b + # + # if aux_b_lons.shape[1] == 2: + # aux_b = np.empty((aux_b_lons.shape[0], 4)) + # aux_b[:, 0] = aux_b_lons[:, 0] + # aux_b[:, 1] = aux_b_lons[:, 1] + # aux_b[:, 2] = aux_b_lons[:, 1] + # aux_b[:, 3] = aux_b_lons[:, 0] + # aux_b_lons = aux_b + + # Create one dataframe with 8 columns, 4 points with two coordinates each one + df_lats = pd.DataFrame(aux_b_lats, columns=['b_lat_1', 'b_lat_2', 'b_lat_3', 'b_lat_4']) + df_lons = pd.DataFrame(aux_b_lons, columns=['b_lon_1', 'b_lon_2', 'b_lon_3', 'b_lon_4']) + df = pd.concat([df_lats, df_lons], axis=1) + + # Substituate 8 columns by 4 with the two coordinates + df['p1'] = zip(df.b_lon_1, df.b_lat_1) + del df['b_lat_1'], df['b_lon_1'] + df['p2'] = zip(df.b_lon_2, df.b_lat_2) + del df['b_lat_2'], df['b_lon_2'] + df['p3'] = zip(df.b_lon_3, df.b_lat_3) + del df['b_lat_3'], df['b_lon_3'] + df['p4'] = zip(df.b_lon_4, df.b_lat_4) + del df['b_lat_4'], df['b_lon_4'] + + # Make a list of list of tuples + # [[(point_1.1), (point_1.2), (point_1.3), (point_1.4)], + # [(point_2.1), (point_2.2), (point_2.3), (point_2.4)], ...] + list_points = df.as_matrix() + del df['p1'], df['p2'], df['p3'], df['p4'] + + # List of polygons from the list of points + geometry = [Polygon(list(points)) for points in list_points] + # geometry = [] + # for point in list_points: + # print point + # geometry.append(Polygon(list(point))) + # print geometry[0] + # sys.exit() + # print len(geometry), len(df), + + gdf = gpd.GeoDataFrame(df, crs={'init': 'epsg:4326'}, geometry=geometry) + gdf = gdf.to_crs(self.crs) + + gdf['FID'] = gdf.index + + gdf.to_file(self.shapefile_path) + else: + settings.write_log('\t\tGrid shapefile already done. Lets try to read it.', level=3) + gdf = gpd.read_file(self.shapefile_path) + + settings.write_time('Grid', 'to_shapefile', timeit.default_timer() - st_time, level=1) + + return gdf + + def chech_coords_file(self): + """ + Checks if the auxiliary coordinates file is created well. + + :return: True: if it is well created. + :rtype: bool + """ + # TODO better check by partition size + return os.path.exists(self.coords_netcdf_file) + + +if __name__ == '__main__': + pass diff --git a/hermesv3_gr/modules/grids/grid_global.py b/hermesv3_gr/modules/grids/grid_global.py new file mode 100644 index 0000000000000000000000000000000000000000..ebbd97ec7df515ef37bbaf9f2b04b032d99011ae --- /dev/null +++ b/hermesv3_gr/modules/grids/grid_global.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +import sys +import timeit + +import hermesv3_gr.config.settings as settings +from grid import Grid + + +class GlobalGrid(Grid): + """ + Global grid object that contains all the information to do a global output. + + :param grid_type: Type of the output grid [global, rotated, lcc, mercator]. + :type grid_type: str + + :param vertical_description_path: Path to the file that contains the vertical description. + :type vertical_description_path: str + + :param timestep_num: Number of timesteps. + :type timestep_num: int + + :param temporal_path: Path to the temporal folder. + :type temporal_path: str + + :param inc_lat: Increment between latitude centroids. + :type inc_lat: float + + :param inc_lon: Increment between longitude centroids. + :type inc_lon: float + + :param center_longitude: Location of the longitude of the center cell. + Default = 0 + :type center_longitude: float + """ + + def __init__(self, grid_type, vertical_description_path, timestep_num, temporal_path, inc_lat, inc_lon, + center_longitude=float(0)): + import ESMF + + st_time = timeit.default_timer() + settings.write_log('\tCreating Global grid.', level=2) + + # Initialize the class using parent + super(GlobalGrid, self).__init__(grid_type, vertical_description_path, temporal_path) + + self.center_lat = float(0) + self.center_lon = center_longitude + self.inc_lat = inc_lat + self.inc_lon = inc_lon + + self.crs = {'init': 'epsg:4326'} + self.create_coords() + + if not os.path.exists(self.coords_netcdf_file): + if settings.rank == 0: + super(GlobalGrid, self).write_coords_netcdf() + settings.comm.Barrier() + + self.esmf_grid = super(GlobalGrid, self).create_esmf_grid_from_file(self.coords_netcdf_file) + + self.x_lower_bound = self.esmf_grid.lower_bounds[ESMF.StaggerLoc.CENTER][1] + self.x_upper_bound = self.esmf_grid.upper_bounds[ESMF.StaggerLoc.CENTER][1] + self.y_lower_bound = self.esmf_grid.lower_bounds[ESMF.StaggerLoc.CENTER][0] + self.y_upper_bound = self.esmf_grid.upper_bounds[ESMF.StaggerLoc.CENTER][0] + + self.shape = (timestep_num, len(self.vertical_description), self.x_upper_bound-self.x_lower_bound, + self.y_upper_bound-self.y_lower_bound) + + self.cell_area = self.get_cell_area()[self.x_lower_bound:self.x_upper_bound, + self.y_lower_bound:self.y_upper_bound] + + settings.write_time('GlobalGrid', 'Init', timeit.default_timer() - st_time, level=1) + + def create_coords(self): + """ + Create the coordinates for a global domain. + """ + import numpy as np + + st_time = timeit.default_timer() + settings.write_log('\t\tCreating global coordinates', level=3) + + self.center_latitudes = self.create_regular_grid_1d_array(self.center_lat, self.inc_lat, -90) + self.boundary_latitudes = self.create_bounds(self.center_latitudes, self.inc_lat) + + # ===== Longitudes ===== + self.center_longitudes = self.create_regular_grid_1d_array(self.center_lon, self.inc_lon, -180) + if len(self.center_longitudes)//2 < settings.size: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError("ERROR: Maximum number of processors exceeded. " + + "It has to be less or equal than {0}.".format(len(self.center_longitudes)//2)) + sys.exit(1) + self.boundary_longitudes = self.create_bounds(self.center_longitudes, self.inc_lon) + + # Creating special cells with half cell on le left and right border + lat_origin = self.center_lat - abs(-90) + lon_origin = self.center_lon - abs(-180) + n_lat = (abs(-90) / self.inc_lat) * 2 + n_lon = (abs(-180) / self.inc_lon) * 2 + self.center_latitudes = np.concatenate([ + [lat_origin + self.inc_lat / 2 - self.inc_lat / 4], self.center_latitudes, + [lat_origin + (n_lat * self.inc_lat) - self.inc_lat / 2 + self.inc_lat / 4]]) + + self.center_longitudes = np.concatenate([ + [lon_origin + self.inc_lon / 2 - self.inc_lon / 4], self.center_longitudes, + [lon_origin + (n_lon * self.inc_lon) - self.inc_lon / 2 + self.inc_lon / 4]]) + + self.boundary_latitudes = np.concatenate([ + [[lat_origin, lat_origin + self.inc_lat / 2]], self.boundary_latitudes, + [[lat_origin + (n_lat * self.inc_lat) - self.inc_lat / 2, lat_origin + (n_lat * self.inc_lat)]]]) + + self.boundary_longitudes = np.concatenate([ + [[lon_origin, lon_origin + self.inc_lon / 2]], self.boundary_longitudes, + [[lon_origin + (n_lon * self.inc_lon) - self.inc_lon / 2, lon_origin + (n_lon * self.inc_lon)]]],) + + self.boundary_latitudes = self.boundary_latitudes.reshape((1,) + self.boundary_latitudes.shape) + self.boundary_longitudes = self.boundary_longitudes.reshape((1,) + self.boundary_longitudes.shape) + + settings.write_time('GlobalGrid', 'create_coords', timeit.default_timer() - st_time, level=2) + + +if __name__ == '__main__': + pass diff --git a/hermesv3_gr/modules/grids/grid_lcc.py b/hermesv3_gr/modules/grids/grid_lcc.py new file mode 100644 index 0000000000000000000000000000000000000000..96ea0ec75b35862e4a462aa98438d26eed3ddb51 --- /dev/null +++ b/hermesv3_gr/modules/grids/grid_lcc.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +import sys +import timeit +import hermesv3_gr.config.settings as settings +from grid import Grid + + +class LccGrid(Grid): + """ + Lambert Conformal Conic (LCC) grid object that contains all the information to do a lcc output. + + :param grid_type: Type of the output grid [global, rotated, lcc, mercator]. + :type grid_type: str + + :param vertical_description_path: Path to the file that contains the vertical description. + :type vertical_description_path: str + + :param timestep_num: Number of timesteps. + :type timestep_num: int + + :param temporal_path: Path to the temporal folder. + :type temporal_path: str + + :param lat_1: Value of the Lat1 for the LCC grid type. + :type lat_1: float + + :param lat_2: Value of the Lat2 for the LCC grid type. + :type lat_2: float + + :param lon_0: Value of the Lon0 for the LCC grid type. + :type lon_0: float + + :param lat_0: Value of the Lat0 for the LCC grid type. + :type lat_0: float + + :param nx: Number of cells on the x dimension. + :type nx: int + + :param ny: Number of cells on the y dimension. + :type ny: int + + :param inc_x: Increment between x dimensions cell centroids (metres). + :type inc_x: int + + :param inc_y: Increment between y dimensions cell centroids (metres). + :type inc_y: int + + :param x_0: Value of the X0 for the LCC grid type. + :type x_0: float + + :param y_0: Value of the Y0 for the LCC grid type. + :type y_0: float + + :param earth_radius: Radius of the Earth (metres). + Default = 6370000.000 + :type earth_radius: float + """ + + def __init__(self, grid_type, vertical_description_path, timestep_num, temporal_path, lat_1, lat_2, lon_0, lat_0, + nx, ny, inc_x, inc_y, x_0, y_0, earth_radius=6370000.000): + import ESMF + st_time = timeit.default_timer() + settings.write_log('\tCreating Lambert Conformal Conic (LCC) grid.', level=2) + + # Initialises with parent class + super(LccGrid, self).__init__(grid_type, vertical_description_path, temporal_path) + + # Setting parameters + self.lat_1 = lat_1 + self.lat_2 = lat_2 + self.lon_0 = lon_0 + self.lat_0 = lat_0 + self.nx = nx + self.ny = ny + self.inc_x = inc_x + self.inc_y = inc_y + self.x_0 = x_0 + (inc_x / 2) + self.y_0 = y_0 + (inc_y / 2) + self.earth_radius = earth_radius + + # UTM coordinates + self.x = None + self.y = None + + # Creating coordinates + self.crs = "+proj=lcc +lat_1={0} +lat_2={1} +lat_0={2} +lon_0={3} +x_0={4} +y_0={5} +datum=WGS84".format( + self.lat_1, self.lat_2, self.lat_0, self.lon_0, 0, 0) + " +units=m" + self.create_coords() + + if not os.path.exists(self.coords_netcdf_file): + if settings.rank == 0: + # super(LccGrid, self).write_coords_netcdf() + self.write_coords_netcdf() + settings.comm.Barrier() + + self.esmf_grid = super(LccGrid, self).create_esmf_grid_from_file(self.coords_netcdf_file, sphere=False) + # + self.x_lower_bound = self.esmf_grid.lower_bounds[ESMF.StaggerLoc.CENTER][1] + self.x_upper_bound = self.esmf_grid.upper_bounds[ESMF.StaggerLoc.CENTER][1] + self.y_lower_bound = self.esmf_grid.lower_bounds[ESMF.StaggerLoc.CENTER][0] + self.y_upper_bound = self.esmf_grid.upper_bounds[ESMF.StaggerLoc.CENTER][0] + + self.shape = (timestep_num, len(self.vertical_description), self.x_upper_bound-self.x_lower_bound, + self.y_upper_bound-self.y_lower_bound) + # print 'Rank {0} _3_\n'.format(settings.rank) + settings.comm.Barrier() + # print 'Rank {0} _4_\n'.format(settings.rank) + self.cell_area = self.get_cell_area()[self.x_lower_bound:self.x_upper_bound, + self.y_lower_bound:self.y_upper_bound] + + settings.write_time('LccGrid', 'Init', timeit.default_timer() - st_time, level=1) + + def write_coords_netcdf(self): + """ + Writes the temporal file with the coordinates of the output needed to generate the weight matrix. + If it is already well created it will only add the cell_area parameter. + """ + from hermesv3_gr.tools.netcdf_tools import write_netcdf + + st_time = timeit.default_timer() + settings.write_log('\tWriting {0} file.'.format(self.coords_netcdf_file), level=3) + + if not self.chech_coords_file(): + # Writes an auxiliary empty NetCDF only with the coordinates and an empty variable. + write_netcdf(self.coords_netcdf_file, self.center_latitudes, self.center_longitudes, + [{'name': 'var_aux', 'units': '', 'data': 0}], + boundary_latitudes=self.boundary_latitudes, boundary_longitudes=self.boundary_longitudes, + lcc=True, lcc_x=self.x, lcc_y=self.y, + lat_1_2="{0}, {1}".format(self.lat_1, self.lat_2), lon_0=self.lon_0, lat_0=self.lat_0) + + # Calculate the cell area of the auxiliary NetCDF file + self.cell_area = self.get_cell_area() + + # Re-writes the NetCDF adding the cell area + write_netcdf(self.coords_netcdf_file, self.center_latitudes, self.center_longitudes, + [{'name': 'var_aux', 'units': '', 'data': 0}], + boundary_latitudes=self.boundary_latitudes, boundary_longitudes=self.boundary_longitudes, + cell_area=self.cell_area, + lcc=True, lcc_x=self.x, lcc_y=self.y, + lat_1_2="{0}, {1}".format(self.lat_1, self.lat_2), lon_0=self.lon_0, lat_0=self.lat_0) + else: + self.cell_area = self.get_cell_area() + + settings.write_time('LccGrid', 'write_coords_netcdf', timeit.default_timer() - st_time, level=3) + + def create_coords(self): + """ + Create the coordinates for a lambert conformal conic domain. + """ + import numpy as np + from pyproj import Proj + + st_time = timeit.default_timer() + settings.write_log('\t\tCreating lcc coordinates', level=3) + + # Create a regular grid in metres (Two 1D arrays) + self.x = np.arange(self.x_0, self.x_0 + self.inc_x * self.nx, self.inc_x, dtype=np.float) + if len(self.x)//2 < settings.size: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError("ERROR: Maximum number of processors exceeded. " + + "It has to be less or equal than {0}.".format(len(self.x)//2)) + sys.exit(1) + self.y = np.arange(self.y_0, self.y_0 + self.inc_y * self.ny, self.inc_y, dtype=np.float) + + # 1D to 2D + x = np.array([self.x] * len(self.y)) + y = np.array([self.y] * len(self.x)).T + + # Create UTM bounds + y_b = super(LccGrid, self).create_bounds(y, self.inc_y, number_vertices=4, inverse=True) + x_b = super(LccGrid, self).create_bounds(x, self.inc_x, number_vertices=4) + + # Create the LCC projection + projection = Proj( + proj='lcc', + ellps='WGS84', + R=self.earth_radius, + lat_1=self.lat_1, + lat_2=self.lat_2, + lon_0=self.lon_0, + lat_0=self.lat_0, + to_meter=1, + x_0=0, + y_0=0, + a=self.earth_radius, + k_0=1.0) + + # UTM to LCC + self.center_longitudes, self.center_latitudes = projection(x, y, inverse=True) + self.boundary_longitudes, self.boundary_latitudes = projection(x_b, y_b, inverse=True) + + settings.write_time('LccGrid', 'create_coords', timeit.default_timer() - st_time, level=2) + + +if __name__ == '__main__': + pass diff --git a/hermesv3_gr/modules/grids/grid_mercator.py b/hermesv3_gr/modules/grids/grid_mercator.py new file mode 100644 index 0000000000000000000000000000000000000000..f3104fbf6c2ba50452c63219260c457aef06986b --- /dev/null +++ b/hermesv3_gr/modules/grids/grid_mercator.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +import sys +import timeit +import hermesv3_gr.config.settings as settings +from grid import Grid + + +class MercatorGrid(Grid): + """ + Mercator grid object that contains all the information to do a mercator output. + + :param grid_type: Type of the output grid [global, rotated, lcc, mercator]. + :type grid_type: str + + :param vertical_description_path: Path to the file that contains the vertical description. + :type vertical_description_path: str + + :param timestep_num: Number of timesteps. + :type timestep_num: int + + :param temporal_path: Path to the temporal folder. + :type temporal_path: str + + :param lon_0: Value of the Lon0 for the LCC grid type. + :type lon_0: float + + :param nx: Number of cells on the x dimension. + :type nx: int + + :param ny: Number of cells on the y dimension. + :type ny: int + + :param inc_x: Increment between x dimensions cell centroids (metres). + :type inc_x: int + + :param inc_y: Increment between y dimensions cell centroids (metres). + :type inc_y: int + + :param x_0: Value of the X0 for the LCC grid type. + :type x_0: float + + :param y_0: Value of the Y0 for the LCC grid type. + :type y_0: float + + :param earth_radius: Radius of the Earth (metres). + Default = 6370000.000 + :type earth_radius: float + """ + + def __init__(self, grid_type, vertical_description_path, timestep_num, temporal_path, lat_ts, lon_0, + nx, ny, inc_x, inc_y, x_0, y_0, earth_radius=6370000.000): + import ESMF + st_time = timeit.default_timer() + settings.write_log('\tCreating Mercator grid.', level=2) + + # Initialises with parent class + super(MercatorGrid, self).__init__(grid_type, vertical_description_path, temporal_path) + + # Setting parameters + self.lat_ts = lat_ts + self.lon_0 = lon_0 + self.nx = nx + self.ny = ny + self.inc_x = inc_x + self.inc_y = inc_y + self.x_0 = x_0 + (inc_x / 2) + self.y_0 = y_0 + (inc_y / 2) + self.earth_radius = earth_radius + + # UTM coordinates + self.x = None + self.y = None + + # Creating coordinates + self.crs = "+proj=merc +a={2} +b={2} +lat_ts={0} +lon_0={1}".format(self.lat_ts, self.lon_0, earth_radius) + + self.create_coords() + + if not os.path.exists(self.coords_netcdf_file): + if settings.rank == 0: + self.write_coords_netcdf() + settings.comm.Barrier() + + self.esmf_grid = super(MercatorGrid, self).create_esmf_grid_from_file(self.coords_netcdf_file, sphere=False) + # + self.x_lower_bound = self.esmf_grid.lower_bounds[ESMF.StaggerLoc.CENTER][1] + self.x_upper_bound = self.esmf_grid.upper_bounds[ESMF.StaggerLoc.CENTER][1] + self.y_lower_bound = self.esmf_grid.lower_bounds[ESMF.StaggerLoc.CENTER][0] + self.y_upper_bound = self.esmf_grid.upper_bounds[ESMF.StaggerLoc.CENTER][0] + + self.shape = (timestep_num, len(self.vertical_description), self.x_upper_bound-self.x_lower_bound, + self.y_upper_bound-self.y_lower_bound) + # print 'Rank {0} _3_\n'.format(settings.rank) + settings.comm.Barrier() + # print 'Rank {0} _4_\n'.format(settings.rank) + self.cell_area = self.get_cell_area()[self.x_lower_bound:self.x_upper_bound, + self.y_lower_bound:self.y_upper_bound] + + settings.write_time('MercatorGrid', 'Init', timeit.default_timer() - st_time, level=1) + + def write_coords_netcdf(self): + """ + Writes the temporal file with the coordinates of the output needed to generate the weight matrix. + If it is already well created it will only add the cell_area parameter. + """ + from hermesv3_gr.tools.netcdf_tools import write_netcdf + + st_time = timeit.default_timer() + + if not self.chech_coords_file(): + # Writes an auxiliary empty NetCDF only with the coordinates and an empty variable. + write_netcdf(self.coords_netcdf_file, self.center_latitudes, self.center_longitudes, + [{'name': 'var_aux', 'units': '', 'data': 0}], + boundary_latitudes=self.boundary_latitudes, boundary_longitudes=self.boundary_longitudes, + mercator=True, lcc_x=self.x, lcc_y=self.y, lon_0=self.lon_0, lat_ts=self.lat_ts) + + # Calculate the cell area of the auxiliary NetCDF file + self.cell_area = self.get_cell_area() + + # Re-writes the NetCDF adding the cell area + write_netcdf(self.coords_netcdf_file, self.center_latitudes, self.center_longitudes, + [ + {'name': 'var_aux', + 'units': '', + 'data': 0} + ], + boundary_latitudes=self.boundary_latitudes, boundary_longitudes=self.boundary_longitudes, + cell_area=self.cell_area, + mercator=True, lcc_x=self.x, lcc_y=self.y, lon_0=self.lon_0, lat_ts=self.lat_ts) + else: + self.cell_area = self.get_cell_area() + + settings.write_time('MercatorGrid', 'write_coords_netcdf', timeit.default_timer() - st_time, level=3) + + def create_coords(self): + """ + Create the coordinates for a lambert conformal conic domain. + """ + import numpy as np + from pyproj import Proj + + st_time = timeit.default_timer() + + # Create a regular grid in metres (Two 1D arrays) + self.x = np.arange(self.x_0, self.x_0 + self.inc_x * self.nx, self.inc_x, dtype=np.float) + if len(self.x)//2 < settings.size: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError("ERROR: Maximum number of processors exceeded. " + + "It has to be less or equal than {0}.".format(len(self.x)//2)) + sys.exit(1) + self.y = np.arange(self.y_0, self.y_0 + self.inc_y * self.ny, self.inc_y, dtype=np.float) + + # 1D to 2D + x = np.array([self.x] * len(self.y)) + y = np.array([self.y] * len(self.x)).T + + # Create UTM bounds + y_b = super(MercatorGrid, self).create_bounds(y, self.inc_y, number_vertices=4, inverse=True) + x_b = super(MercatorGrid, self).create_bounds(x, self.inc_x, number_vertices=4) + + # Create the LCC projection + projection = Proj(self.crs) + + # UTM to Mercator + self.center_longitudes, self.center_latitudes = projection(x, y, inverse=True) + self.boundary_longitudes, self.boundary_latitudes = projection(x_b, y_b, inverse=True) + + settings.write_time('MercatorGrid', 'create_coords', timeit.default_timer() - st_time, level=3) + + +if __name__ == '__main__': + pass diff --git a/hermesv3_gr/modules/grids/grid_rotated.py b/hermesv3_gr/modules/grids/grid_rotated.py new file mode 100644 index 0000000000000000000000000000000000000000..856630075c7ed241f39dbe6818bf0dae9fe8b18c --- /dev/null +++ b/hermesv3_gr/modules/grids/grid_rotated.py @@ -0,0 +1,237 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import sys +import os +import timeit +import hermesv3_gr.config.settings as settings +from grid import Grid + + +class RotatedGrid(Grid): + # TODO Rotated options description + """ + :param grid_type: Type of the output grid [global, rotated, lcc, mercator]. + :type grid_type: str + + :param vertical_description_path: Path to the file that contains the vertical description. + :type vertical_description_path: str + + + :param timestep_num: Number of timesteps. + :type timestep_num: int + """ + + def __init__(self, grid_type, vertical_description_path, timestep_num, temporal_path, centre_lat, centre_lon, + west_boundary, south_boundary, inc_rlat, inc_rlon): + import ESMF + + st_time = timeit.default_timer() + settings.write_log('\tCreating Rotated grid.', level=2) + + # Initialises with parent class + super(RotatedGrid, self).__init__(grid_type, vertical_description_path, temporal_path) + + # Setting parameters + self.new_pole_longitude_degrees = -180 + centre_lon + self.new_pole_latitude_degrees = centre_lat # 90 - centre_lat + self.centre_lat = centre_lat + self.centre_lon = centre_lon + self.west_boundary = west_boundary # + inc_rlon #/ 2 + self.south_boundary = south_boundary # + inc_rlat #/ 2 + self.inc_rlat = inc_rlat + self.inc_rlon = inc_rlon + self.n_lat = int((abs(south_boundary) / inc_rlat) * 2 + 1) + self.n_lon = int((abs(west_boundary) / inc_rlon) * 2 + 1) + + # Rotated coordinates + self.rlat = None + self.rlon = None + + # Create coordinates + self.crs = {'init': 'epsg:4326'} + self.create_coords() + + if not os.path.exists(self.coords_netcdf_file): + if settings.rank == 0: + # super(RotatedGrid, self).write_coords_netcdf() + self.write_coords_netcdf() + settings.comm.Barrier() + + # self.write_coords_netcdf() + + self.esmf_grid = super(RotatedGrid, self).create_esmf_grid_from_file(self.coords_netcdf_file, sphere=False) + + self.x_lower_bound = self.esmf_grid.lower_bounds[ESMF.StaggerLoc.CENTER][1] + self.x_upper_bound = self.esmf_grid.upper_bounds[ESMF.StaggerLoc.CENTER][1] + self.y_lower_bound = self.esmf_grid.lower_bounds[ESMF.StaggerLoc.CENTER][0] + self.y_upper_bound = self.esmf_grid.upper_bounds[ESMF.StaggerLoc.CENTER][0] + + self.shape = (timestep_num, len(self.vertical_description), self.x_upper_bound-self.x_lower_bound, + self.y_upper_bound-self.y_lower_bound) + + self.cell_area = self.get_cell_area()[self.x_lower_bound:self.x_upper_bound, + self.y_lower_bound:self.y_upper_bound] + + settings.write_time('RotatedGrid', 'Init', timeit.default_timer() - st_time, level=1) + + def create_coords(self): + """ + Create the coordinates for a rotated domain. + """ + from hermesv3_gr.tools.coordinates_tools import create_regular_rotated + import numpy as np + + st_time = timeit.default_timer() + settings.write_log('\t\tCreating rotated coordinates.', level=3) + + # Create rotated coordinates + (self.rlat, self.rlon, br_lats_single, br_lons_single) = create_regular_rotated( + self.south_boundary, self.west_boundary, self.inc_rlat, self.inc_rlon, self.n_lat, self.n_lon) + if len(self.rlon)//2 < settings.size: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError("ERROR: Maximum number of processors exceeded. " + + "It has to be less or equal than {0}.".format(len(self.rlon)//2)) + sys.exit(1) + # 1D to 2D + c_lats = np.array([self.rlat] * len(self.rlon)).T + c_lons = np.array([self.rlon] * len(self.rlat)) + + # Create rotated boundary coordinates + b_lats = super(RotatedGrid, self).create_bounds(c_lats, self.inc_rlat, number_vertices=4, inverse=True) + b_lons = super(RotatedGrid, self).create_bounds(c_lons, self.inc_rlon, number_vertices=4) + + # Rotated to Lat-Lon + self.boundary_longitudes, self.boundary_latitudes = self.rotated2latlon(b_lons, b_lats) + self.center_longitudes, self.center_latitudes = self.rotated2latlon(c_lons, c_lats) + + settings.write_time('RotatedGrid', 'create_coords', timeit.default_timer() - st_time, level=2) + + def rotated2latlon(self, lon_deg, lat_deg, lon_min=-180): + """ + Calculate the unrotated coordinates using the rotated ones. + + :param lon_deg: Rotated longitude coordinate. + :type lon_deg: numpy.array + + :param lat_deg: Rotated latitude coordinate. + :type lat_deg: numpy.array + + :param lon_min: Minimum value for the longitudes: -180 (-180 to 180) or 0 (0 to 360) + :type lon_min: float + + :return: Unrotated coordinates. Longitudes, Latitudes + :rtype: tuple(numpy.array, numpy.array) + """ + import numpy as np + import math + + st_time = timeit.default_timer() + settings.write_log('\t\t\tTransforming rotated coordinates to latitude, longitude coordinates.', level=3) + + # TODO Document this function + degrees_to_radians = math.pi / 180. + # radians_to_degrees = 180. / math.pi + + # Positive east to negative east + # self.new_pole_longitude_degrees -= 180 + + tph0 = self.new_pole_latitude_degrees * degrees_to_radians + tlm = lon_deg * degrees_to_radians + tph = lat_deg * degrees_to_radians + tlm0d = self.new_pole_longitude_degrees + ctph0 = np.cos(tph0) + stph0 = np.sin(tph0) + + stlm = np.sin(tlm) + ctlm = np.cos(tlm) + stph = np.sin(tph) + ctph = np.cos(tph) + + # Latitude + sph = (ctph0 * stph) + (stph0 * ctph * ctlm) + # if sph > 1.: + # sph = 1. + # if sph < -1.: + # sph = -1. + # print type(sph) + sph[sph > 1.] = 1. + sph[sph < -1.] = -1. + + aph = np.arcsin(sph) + aphd = aph / degrees_to_radians + + # Longitude + anum = ctph * stlm + denom = (ctlm * ctph - stph0 * sph) / ctph0 + relm = np.arctan2(anum, denom) - math.pi + almd = relm / degrees_to_radians + tlm0d + + # if almd < min_lon: + # almd += 360 + # elif almd > max_lon: + # almd -= 360 + almd[almd > (lon_min + 360)] -= 360 + almd[almd < lon_min] += 360 + + settings.write_time('RotatedGrid', 'rotated2latlon', timeit.default_timer() - st_time, level=3) + + return almd, aphd + + def write_coords_netcdf(self): + """ + Writes the temporal file with the coordinates of the output needed to generate the weight matrix. + If it is already well created it will only add the cell_area parameter. + """ + from hermesv3_gr.modules.writing.writer import Writer + + st_time = timeit.default_timer() + settings.write_log('\tWriting {0} file.'.format(self.coords_netcdf_file), level=3) + + if not self.chech_coords_file(): + # Writes an auxiliary empty NetCDF only with the coordinates and an empty variable. + Writer.write_netcdf(self.coords_netcdf_file, self.center_latitudes, self.center_longitudes, + [{'name': 'var_aux', 'units': '', 'data': 0}], + boundary_latitudes=self.boundary_latitudes, + boundary_longitudes=self.boundary_longitudes, + roated=True, rotated_lats=self.rlat, rotated_lons=self.rlon, + north_pole_lat=self.new_pole_latitude_degrees, + north_pole_lon=self.new_pole_longitude_degrees) + + # Calculate the cell area of the auxiliary NetCDF file + self.cell_area = self.get_cell_area() + + # Re-writes the NetCDF adding the cell area + Writer.write_netcdf(self.coords_netcdf_file, self.center_latitudes, self.center_longitudes, + [{'name': 'var_aux', 'units': '', 'data': 0}], + boundary_latitudes=self.boundary_latitudes, + boundary_longitudes=self.boundary_longitudes, cell_area=self.cell_area, + roated=True, rotated_lats=self.rlat, rotated_lons=self.rlon, + north_pole_lat=self.new_pole_latitude_degrees, + north_pole_lon=self.new_pole_longitude_degrees) + else: + self.cell_area = self.get_cell_area() + + settings.write_time('RotatedGrid', 'write_coords_netcdf', timeit.default_timer() - st_time, level=3) + + +if __name__ == '__main__': + pass diff --git a/hermesv3_gr/modules/masking/__init__.py b/hermesv3_gr/modules/masking/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hermesv3_gr/modules/masking/masking.py b/hermesv3_gr/modules/masking/masking.py new file mode 100644 index 0000000000000000000000000000000000000000..36b1c93f85a709e9e5e7c771e9c39b9a3248ae09 --- /dev/null +++ b/hermesv3_gr/modules/masking/masking.py @@ -0,0 +1,301 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +import timeit +from warnings import warn as warning +import hermesv3_gr.config.settings as settings + + +class Masking(object): + """ + Masking object to apply simple mask or factor mask. + + :param world_info: Path to the file that contains the ISO Codes and other relevant information. + :type world_info: str + + :param factors_mask_values: List of the factor mask values. + :type factors_mask_values: list + + :param regrid_mask_values: List of the mask values. + :type regrid_mask_values: list + + :param grid: Grid. + :type grid: Grid + + :param world_mask_file: + :type world_mask_file: str + """ + + def __init__(self, world_info, factors_mask_values, regrid_mask_values, grid, world_mask_file=None): + from timezonefinder import TimezoneFinder + + st_time = timeit.default_timer() + settings.write_log('\t\tCreating mask.', level=2) + + self.adding = None + self.world_info = world_info + self.country_codes = self.get_country_codes() + self.world_mask_file = world_mask_file + self.factors_mask_values = self.parse_factor_values(factors_mask_values) + self.regrid_mask_values = self.parse_masking_values(regrid_mask_values) + self.regrid_mask = None + self.scale_mask = None + self.timezonefinder = TimezoneFinder() + + self.grid = grid + + settings.write_time('Masking', 'Init', timeit.default_timer() - st_time, level=3) + + def get_country_codes(self): + """ + Get the country code information. + + :return: Dictionary of country codes. + :rtype: dict + """ + import pandas as pd + + st_time = timeit.default_timer() + + dataframe = pd.read_csv(self.world_info, sep=';') + del dataframe['time_zone'], dataframe['time_zone_code'] + dataframe = dataframe.drop_duplicates().dropna() + dataframe = dataframe.set_index('country_code_alpha') + countries_dict = dataframe.to_dict() + countries_dict = countries_dict['country_code'] + + settings.write_time('Masking', 'get_country_codes', timeit.default_timer() - st_time, level=3) + return countries_dict + + @staticmethod + def partlst(lst, num): + """ + Split a Array in N balanced arrays. + + :param lst: Array to split + :type lst: numpy.array + + :param num: Number of mini arrays. + :type num: int + + :return: Array + :type: numpy.array + """ + import itertools + # Partition @lst in @n balanced parts, in given order + parts, rest = divmod(len(lst), num) + lstiter = iter(lst) + for j in xrange(num): + plen = len(lst) / num + (1 if rest > 0 else 0) + rest -= 1 + yield list(itertools.islice(lstiter, plen)) + + def create_country_iso(self, in_nc): + import numpy as np + from hermesv3_gr.tools.netcdf_tools import extract_vars + from hermesv3_gr.modules.writing.writer import Writer + + st_time = timeit.default_timer() + settings.write_log('\t\t\tCreating {0} file.'.format(self.world_mask_file), level=2) + # output_path = os.path.join(output_dir, 'iso.nc') + + lat_o, lon_o = extract_vars(in_nc, ['lat', 'lon']) + lon = np.array([lon_o['data']] * len(lat_o['data'])) + lat = np.array([lat_o['data']] * len(lon_o['data'])).T + + dst_var = [] + num = 0 + points = np.array(zip(lat.flatten(), lon.flatten())) + + points_list = list(self.partlst(points, settings.size)) + + for lat_aux, lon_aux in points_list[settings.rank]: + num += 1 + + settings.write_log("\t\t\t\tlat:{0}, lon:{1} ({2}/{3})".format( + lat_aux, lon_aux, num, len(points_list[settings.rank])), level=3) + + tz = self.find_timezone(lat_aux, lon_aux) + tz_id = self.get_iso_code_from_tz(tz) + dst_var.append(tz_id) + dst_var = np.array(dst_var) + dst_var = settings.comm.gather(dst_var, root=0) + + if settings.rank == 0: + dst_var = np.concatenate(dst_var) + dst_var = dst_var.reshape((1,) + lat.shape) + data = [{ + 'name': 'timezone_id', + 'units': '', + 'data': dst_var, + }] + Writer.write_netcdf(self.world_mask_file, lat, lon, data, regular_latlon=True) + settings.comm.Barrier() + + settings.write_time('Masking', 'create_country_iso', timeit.default_timer() - st_time, level=3) + + return True + + def find_timezone(self, latitude, longitude): + + st_time = timeit.default_timer() + + if longitude < -180: + longitude += 360 + elif longitude > +180: + longitude -= 360 + + tz = self.timezonefinder.timezone_at(lng=longitude, lat=latitude) + + settings.write_time('Masking', 'find_timezone', timeit.default_timer() - st_time, level=3) + + return tz + + def get_iso_code_from_tz(self, tz): + import pandas as pd + + st_time = timeit.default_timer() + + zero_values = [None, ] + if tz in zero_values: + return 0 + + df = pd.read_csv(self.world_info, sep=';') + code = df.country_code[df.time_zone == tz].values + + settings.write_time('Masking', 'get_iso_code_from_tz', timeit.default_timer() - st_time, level=3) + + return code[0] + + def parse_factor_values(self, values): + """ + + :param values: + :return: + :rtype: dict + """ + import re + + st_time = timeit.default_timer() + + if type(values) != str: + return None + values = list(map(str, re.split(' , |, | ,|,', values))) + scale_dict = {} + for element in values: + element = list(map(str, re.split("{0}{0}|{0}".format(' '), element))) + scale_dict[int(self.country_codes[element[0]])] = element[1] + + settings.write_log('\t\t\tApplying scaling factors for {0}.'.format(values), level=3) + settings.write_time('Masking', 'parse_factor_values', timeit.default_timer() - st_time, level=3) + + return scale_dict + + def parse_masking_values(self, values): + """ + + :param values: + :return: + :rtype: list + """ + import re + + st_time = timeit.default_timer() + + if type(values) != str: + return None + values = list(map(str, re.split(' , |, | ,|,| ', values))) + if values[0] == '+': + self.adding = True + elif values[0] == '-': + self.adding = False + else: + if len(values) > 0: + settings.write_log('WARNING: Check the .err file to get more info. Ignoring mask') + if settings.rank == 0: + warning("WARNING: The list of masking does not start with '+' or '-'. Ignoring mask.") + return None + code_list = [] + for country in values[1:]: + code_list.append(int(self.country_codes[country])) + + if self.adding: + settings.write_log("\t\t\tCreating mask to do {0} countries.".format(values[1:]), level=3) + else: + settings.write_log("\t\t\tCreating mask to avoid {0} countries.".format(values[1:]), level=3) + settings.write_time('Masking', 'parse_masking_values', timeit.default_timer() - st_time, level=3) + + return code_list + + def check_regrid_mask(self, input_file): + + if self.regrid_mask_values is not None: + if not os.path.exists(self.world_mask_file): + self.create_country_iso(input_file) + self.regrid_mask = self.custom_regrid_mask() + if self.factors_mask_values is not None: + if not os.path.exists(self.world_mask_file): + self.create_country_iso(input_file) + self.scale_mask = self.custom_scale_mask() + + def custom_regrid_mask(self): + import numpy as np + from netCDF4 import Dataset + + st_time = timeit.default_timer() + + netcdf = Dataset(self.world_mask_file, mode='r') + values = netcdf.variables['timezone_id'][:] + netcdf.close() + + if self.adding: + mask = np.zeros(values.shape) + for code in self.regrid_mask_values: + mask[values == code] = 1 + else: + mask = np.ones(values.shape) + for code in self.regrid_mask_values: + mask[values == code] = 0 + + settings.write_time('Masking', 'custom_regrid_mask', timeit.default_timer() - st_time, level=3) + + return mask + + def custom_scale_mask(self): + import numpy as np + from hermesv3_gr.tools.netcdf_tools import extract_vars + + st_time = timeit.default_timer() + + [values] = extract_vars(self.world_mask_file, ['timezone_id']) + + values = values['data'] + mask = np.ones(values.shape) + for code, factor in self.factors_mask_values.iteritems(): + mask[values == code] = factor + + settings.write_time('Masking', 'custom_scale_mask', timeit.default_timer() - st_time, level=3) + + return mask + + +if __name__ == '__main__': + pass diff --git a/hermesv3_gr/modules/regrid/__init__.py b/hermesv3_gr/modules/regrid/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hermesv3_gr/modules/regrid/regrid.py b/hermesv3_gr/modules/regrid/regrid.py new file mode 100644 index 0000000000000000000000000000000000000000..9d224a9b07148e36015bce72b4d3ae688d632c11 --- /dev/null +++ b/hermesv3_gr/modules/regrid/regrid.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + +import os +import numpy as np +import timeit +import hermesv3_gr.config.settings as settings + + +class Regrid(object): + # TODO Documentation + def __init__(self, pollutant_dicts, weight_matrix_file, grid, masking=None): + st_time = timeit.default_timer() + settings.write_log('\t\t\tInitializing Regrid.', level=3) + + self.grid = grid + self.pollutant_dicts = pollutant_dicts + self.weight_matrix_file = weight_matrix_file + self.masking = masking + + if not self.is_created_weight_matrix(erase=False): + settings.write_log("\t\t\tWeight matrix {0} is not created. ".format(weight_matrix_file) + + "Trying to create it", level=1) + settings.comm.Barrier() + self.create_weight_matrix() + + settings.write_time('Regrid', 'Init', round(timeit.default_timer() - st_time), level=3) + + def create_weight_matrix(self): + """ + This function is not used because all the child classes have to implement it. + """ + pass + # implemented on inner class + + def apply_weights(self, values): + """ + Calculate the regridded values using the ESMF algorithm for a 3D array. + + :param values: Input values to regrid + :type values: numpy.array + + :return: Values already regridded. + :rtype: numpy.array + """ + from netCDF4 import Dataset + + st_time = timeit.default_timer() + + # Read weight matrix + nc_weights = Dataset(self.weight_matrix_file, mode='r') + + src_indices = nc_weights.variables['src_indices'][:] + max_index = nc_weights.variables['dst_indices'][:].max() + 1 + dst_indices_counts = nc_weights.variables['dst_indices_count'][:] + weights = nc_weights.variables['weights'][:] + + nc_weights.close() + + # Do masking + if self.masking.regrid_mask is not None: + values = np.multiply(values, self.masking.regrid_mask) + # Do scalling + if self.masking.scale_mask is not None: + values = np.multiply(values, self.masking.scale_mask) + values = values.reshape(values.shape[1], values.shape[2] * values.shape[3]) + + # Expand src values + src_aux = np.take(values, src_indices, axis=1) + + # Apply weights + dst_field_aux = np.multiply(src_aux, weights) + + # Reduce dst values + dst_field = self.reduce_dst_field(dst_field_aux, dst_indices_counts, max_index) + + settings.write_time('Regrid', 'apply_weights', timeit.default_timer() - st_time, level=3) + + return dst_field + + @staticmethod + def reduce_dst_field(dst_field_extended, dst_indices, max_index): + """ + Reduces the values of the regridded data. + eg: + dst_field_extended = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + goes to: 0 0 0 1 1 1 2 2 2 2 + + dst_indices = [3, 6, 10] + + result = [0+1+2, 3+4+5+, 6+7+8+9] + result = [3, 12, 30] + + :param dst_field_extended: Array with as many elements as interconnections between src and dst with the dst + values to be gathered. + :type dst_field_extended: numpy.array + + :param dst_indices: Array with the last element index to + :type dst_indices: numpy.array + + :param max_index: + :type max_index: int + + :return: + :rtype: numpy.array + """ + st_time = timeit.default_timer() + + # Create new + dst_field = np.zeros((dst_field_extended.shape[0], max_index), dtype=settings.precision) + # dst_field = np.zeros((dst_field_extended.shape[0], self.grid.shape[-1] * self.grid.shape[-2])) + + previous = 0 + count = 0 + for i in dst_indices: + try: + dst_field[:, count] = dst_field_extended[:, previous:i].sum(axis=1, dtype=settings.precision) + except ValueError: + pass + count += 1 + previous = i + + settings.write_time('Regrid', 'reduce_dst_field', timeit.default_timer() - st_time, level=3) + + return dst_field + + def is_created_weight_matrix(self, erase=False): + """ + Checks if the weight matrix is created + + :return: Boolean that indicates if the weight matrix is already created. + :rtype: bool + """ + if erase and settings.rank == 0: + if os.path.exists(self.weight_matrix_file): + os.remove(self.weight_matrix_file) + + return os.path.exists(self.weight_matrix_file) + + +if __name__ == '__main__': + pass diff --git a/hermesv3_gr/modules/regrid/regrid_conservative.py b/hermesv3_gr/modules/regrid/regrid_conservative.py new file mode 100644 index 0000000000000000000000000000000000000000..f2de4d1ed0b96bac1bd6ea35928daf0d7be1e7bf --- /dev/null +++ b/hermesv3_gr/modules/regrid/regrid_conservative.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + +import os +import numpy as np +import timeit +import ESMF + +import hermesv3_gr.config.settings as settings +from regrid import Regrid + + +class ConservativeRegrid(Regrid): + # TODO Documentation + def __init__(self, pollutant_dicts, weight_matrix_file, grid, masking=None): + st_time = timeit.default_timer() + settings.write_log('\t\tInitializing Conservative regrid.', level=2) + + super(ConservativeRegrid, self).__init__(pollutant_dicts, weight_matrix_file, grid, masking=masking) + + settings.write_time('ConservativeRegrid', 'Init', timeit.default_timer() - st_time, level=2) + + def create_weight_matrix(self): + """ + Calls to ESMF_RegridWeightGen to generate the weight matrix. + """ + + st_time = timeit.default_timer() + + src_grid = self.grid.create_esmf_grid_from_file(self.pollutant_dicts[0]['path']) + src_field = ESMF.Field(src_grid, name='my input field') + src_field.read(filename=self.pollutant_dicts[0]['path'], variable=self.pollutant_dicts[0]['name'], + timeslice=0) + + dst_grid = self.grid.esmf_grid + dst_field = ESMF.Field(dst_grid, name='my outut field') + ESMF.Regrid(src_field, dst_field, filename=self.weight_matrix_file, regrid_method=ESMF.RegridMethod.CONSERVE,) + + settings.write_time('ConservativeRegrid', 'create_weight_matrix', timeit.default_timer() - st_time, level=1) + + def start_regridding(self, gfas=False, vertical=None): + # TODO Documentation + from hermesv3_gr.tools.netcdf_tools import extract_vars + + st_time = timeit.default_timer() + + weights = self.read_weight_matrix() + + dst_field_list = [] + num = 1 + for pollutant_single_dict in self.pollutant_dicts: + settings.write_log('\t\tPollutant {0} ({1}/{2})'.format( + pollutant_single_dict['name'], num, len(self.pollutant_dicts)), level=3) + num += 1 + + [values] = extract_vars(pollutant_single_dict['path'], [pollutant_single_dict['name']]) + values = values['data'] + if gfas: + values = vertical.do_vertical_interpolation_allocation(values, vertical.altitude) + # Do masking + if self.masking.regrid_mask is not None: + values = np.multiply(values, self.masking.regrid_mask) + # Do scalling + if self.masking.scale_mask is not None: + values = np.multiply(values, self.masking.scale_mask) + if gfas: + values = values.reshape((values.shape[-3], values.shape[-2] * values.shape[-1],)) + else: + values = values.reshape((1, values.shape[-2] * values.shape[-1],)) + + unique, counts = np.unique(weights['row'], return_counts=True) + new_dst_indices = np.cumsum(counts) + + # Expand src values + src_aux = np.take(values, weights['col'], axis=1) + + # Apply weights + dst_field_aux = np.multiply(src_aux, weights['S']) + + # Reduce dst values + dst_field = self.reduce_dst_field(dst_field_aux, new_dst_indices, self.grid.shape[-1] * self.grid.shape[-2]) + + if gfas: + dst_field = vertical.do_vertical_interpolation(dst_field) + dst_field = dst_field.reshape((self.grid.shape[-3], self.grid.shape[-2], self.grid.shape[-1],)) + else: + dst_field = dst_field.reshape((self.grid.shape[-2], self.grid.shape[-1],)) + + dst_field_list.append({'data': dst_field, 'name': pollutant_single_dict['name']}) + + settings.write_time('ConservativeRegrid', 'start_regridding', timeit.default_timer() - st_time, level=3) + return dst_field_list + + def read_weight_matrix(self): + from netCDF4 import Dataset + dict_aux = {} + nc = Dataset(self.weight_matrix_file, mode='r') + + dict_aux['col'] = nc.variables['col'][:] + dict_aux['row'] = nc.variables['row'][:] + dict_aux['S'] = nc.variables['S'][:] + nc.close() + dict_aux['max'] = dict_aux['row'].max() + + dict_aux['col'] -= 1 + dict_aux['row'] -= 1 + + if settings.size != 1: + inc = dict_aux['row'][:-1] - dict_aux['row'][1:] + index = np.where(inc > inc.max() * 0.5)[0] + index = np.concatenate([[0], index]) + + try: + if settings.rank != 0: + dict_aux['col'] = dict_aux['col'][index[settings.rank] + 1: index[settings.rank + 1] + 1] + dict_aux['row'] = dict_aux['row'][index[settings.rank] + 1: index[settings.rank + 1] + 1] + dict_aux['S'] = dict_aux['S'][index[settings.rank] + 1: index[settings.rank + 1] + 1] + else: + dict_aux['col'] = dict_aux['col'][: index[settings.rank + 1] + 1] + dict_aux['row'] = dict_aux['row'][: index[settings.rank + 1] + 1] + dict_aux['S'] = dict_aux['S'][: index[settings.rank + 1] + 1] + except IndexError: + dict_aux['col'] = dict_aux['col'][index[settings.rank] + 1:] + dict_aux['row'] = dict_aux['row'][index[settings.rank] + 1:] + dict_aux['S'] = dict_aux['S'][index[settings.rank] + 1:] + + return dict_aux + + def wait_to_weightmatrix(self): + import time + + find = False + + while not find: + if os.path.exists(self.weight_matrix_file): + pre_size = 0 + post_size = 1 + print "I'm {0}".format(settings.rank), 'Writing Weight Matrix {0}'.format(self.weight_matrix_file) + # find = True + while pre_size != post_size: + print "I'm {0}".format(settings.rank), pre_size, post_size + pre_size = post_size + post_size = os.path.getsize(self.weight_matrix_file) + time.sleep(1) + find = True + print "I'm {0}".format(settings.rank), 'FINISHED' + else: + time.sleep(5) + print "I'm {0}".format(settings.rank), 'Waiting Weight Matrix' + + def apply_weights(self, values): + """ + Calculate the regridded values using the ESMF algorithm for a 3D array specifically for a conservative regrid. + + :param values: Input values to regrid. + :type values: numpy.array + + :return: Values already regridded. + :rtype: numpy.array + """ + + dst_field = super(ConservativeRegrid, self).apply_weights(values) + + return dst_field + + +if __name__ == '__main__': + pass diff --git a/hermesv3_gr/modules/speciation/__init__.py b/hermesv3_gr/modules/speciation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hermesv3_gr/modules/speciation/speciation.py b/hermesv3_gr/modules/speciation/speciation.py new file mode 100644 index 0000000000000000000000000000000000000000..6084c44978eb0db59489bd7fbf3862592a61fdd9 --- /dev/null +++ b/hermesv3_gr/modules/speciation/speciation.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import sys +import timeit +import hermesv3_gr.config.settings as settings +from warnings import warn as warning + + +class Speciation(object): + """ + Speciation class that contains all the needed information to do the speciation. + + :param speciation_id: ID of the speciation profile that have to be in the speciation profile file. + :type speciation_id: str + + :param speciation_profile_path: Path to the file that contains all the speciation profiles. + :type speciation_profile_path: str + + :param molecular_weights_path: Path to the file that contains all the needed molecular weights. + :type molecular_weights_path: str + """ + def __init__(self, speciation_id, speciation_profile_path, molecular_weights_path): + st_time = timeit.default_timer() + settings.write_log('\t\tInitializing Speciation.', level=2) + + self.id = speciation_id + self.speciation_profile = self.get_speciation_profile(speciation_profile_path) + self.molecular_weights_path = molecular_weights_path + self.molecular_weights = self.extract_molecular_weights(molecular_weights_path) + + settings.write_time('Speciation', 'Init', timeit.default_timer() - st_time, level=2) + + def get_speciation_profile(self, speciation_profile_path): + """ + Extract the speciation information as a dictionary with the destiny pollutant as key and the formula as value. + + :param speciation_profile_path: + :type speciation_profile_path: + + :return: List of dictionaries. Each dictionary has the keys 'name', 'formula', 'units' and 'long_name. + :rtype: list + """ + import pandas as pd + + st_time = timeit.default_timer() + settings.write_log("\t\t\tGetting speciation profile id '{0}' from {1} .".format( + self.id, speciation_profile_path), level=3) + + df = pd.read_csv(speciation_profile_path, sep=';') + + try: + formulas_dict = df.loc[df[df.ID == self.id].index[0]].to_dict() + except IndexError: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError('ERROR: Speciation profile ID {0} is not in the {1} file.'.format( + self.id, speciation_profile_path)) + sys.exit(1) + formulas_dict.pop('ID', None) + units_dict = df.loc[df[df.ID == 'units'].index[0]].to_dict() + + units_dict.pop('ID', None) + long_name_dict = df.loc[df[df.ID == 'short_description'].index[0]].to_dict() + long_name_dict.pop('ID', None) + profile_list = [] + for key in formulas_dict.iterkeys(): + profile_list.append({ + 'name': key, + 'formula': formulas_dict[key], + 'units': units_dict[key], + 'long_name': long_name_dict[key] + }) + + settings.write_time('Speciation', 'get_speciation_profile', timeit.default_timer() - st_time, level=3) + return profile_list + + @staticmethod + def extract_molecular_weights(molecular_weights_path): + """ + Extract the molecular weights for each pollutant as a dictionary with the name of the pollutant as key and the + molecular weight as value. + + :param molecular_weights_path: Path to the CSV that contains all the molecular weights. + :type molecular_weights_path: str + + :return: Dictionary with the name of the pollutant as key and the molecular weight as value. + :rtype: dict + """ + import pandas as pd + + st_time = timeit.default_timer() + + df = pd.read_csv(molecular_weights_path, sep=';') + + dict_aux = {} + + for i, element in df.iterrows(): + dict_aux.update({element.Specie: element.MW}) + + settings.write_time('Speciation', 'extract_molecular_weights', timeit.default_timer() - st_time, level=3) + + return dict_aux + + def do_speciation(self, emission_list): + """ + Manages all the process to speciate the emissions. + + :param emission_list: List of emissions to speciate. + :type emission_list: list + + :return: List of emissions already speciated. + :rtype: list + """ + import numpy as np + + st_time = timeit.default_timer() + settings.write_log("\tSpeciating", level=2) + + input_pollutants = [] + # Apply conversion factor to the input pollutants + for emission in emission_list: + try: + emission['data'] = np.array(emission['data'] / self.molecular_weights[emission['name']], + dtype=settings.precision) + except KeyError: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise KeyError('ERROR: {0} pollutant is not in the molecular weights file {1} .'.format( + emission['name'], self.molecular_weights_path)) + sys.exit(1) + exec ("{0} = np.array(emission['data'], dtype=settings.precision)".format(emission['name'])) + emission['units'] = '' + input_pollutants.append(emission['name']) + + del emission_list + + speciated_emissions = [] + num = 0 + + for pollutant in self.speciation_profile: + formula = str(pollutant['formula']) + used_poll = [] + for in_p in input_pollutants: + if in_p in formula: + used_poll.append(in_p) + for poll_rem in used_poll: + input_pollutants.remove(poll_rem) + num += 1 + if formula != 'nan': + settings.write_log("\t\tPollutant {0} using the formula {0}={3} ({1}/{2})".format( + pollutant['name'], num, len(self.speciation_profile), formula), level=3) + + dict_aux = {'name': pollutant['name'], + 'units': pollutant['units'], + 'long_name': pollutant['long_name']} + if formula is '0' or formula is 0: + dict_aux.update({'data': 0}) + else: + try: + dict_aux.update({'data': np.array(eval(formula), dtype=settings.precision)}) + except NameError as e: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError( + "Error in speciation profile {0}: ".format(self.id) + + "The output specie {0} cannot be calculated ".format(pollutant['name']) + + "with the expression {0} because{1}".format(formula, e.message)) + else: + sys.exit(1) + speciated_emissions.append(dict_aux) + else: + settings.write_log("\t\tPollutant {0} does not have formula. Ignoring. ({1}/{2})".format( + pollutant['name'], num, len(self.speciation_profile)), level=3) + if len(input_pollutants) > 0: + settings.write_log("WARNING: The input pollutants {0} do not appear in the speciation profile {1}.".format( + input_pollutants, self.id)) + if settings.rank == 0: + warning("WARNING: The input pollutants {0} do not appear in the speciation profile {1}.".format( + input_pollutants, self.id)) + settings.write_time('Speciation', 'do_speciation', timeit.default_timer() - st_time, level=3) + + return speciated_emissions + + def get_long_name(self, name): + + st_time = timeit.default_timer() + value = '' + for pollutant in self.speciation_profile: + if pollutant['name'] == name: + value = pollutant['long_name'] + + settings.write_time('Speciation', 'get_long_name', timeit.default_timer() - st_time, level=3) + return value + + def get_units(self, name): + + st_time = timeit.default_timer() + value = '' + + for pollutant in self.speciation_profile: + if pollutant['name'] == name: + value = pollutant['units'] + + settings.write_time('Speciation', 'get_units', timeit.default_timer() - st_time, level=3) + return value + + +if __name__ == '__main__': + pass diff --git a/hermesv3_gr/modules/temporal/__init__.py b/hermesv3_gr/modules/temporal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hermesv3_gr/modules/temporal/temporal.py b/hermesv3_gr/modules/temporal/temporal.py new file mode 100644 index 0000000000000000000000000000000000000000..82e1e36e9f3316bf4bb004d97596bcbd8bde974e --- /dev/null +++ b/hermesv3_gr/modules/temporal/temporal.py @@ -0,0 +1,815 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +import sys +import timeit +import hermesv3_gr.config.settings as settings +import numpy as np + + +class TemporalDistribution(object): + """ + TemporalDistribution class that contains all the information for the temporal disaggregation. + + :param starting_date: Date of the first timestep. + :type starting_date: datetime.datetime + + :param timestep_type: Relation between timesteps. It can be hourly, monthly or yearly. + :type timestep_type: str + + :param timestep_num: Quantity of timesteps. + :type timestep_num: int + + :param timestep_freq: Quantity of timestep_type between timesteps. + eg: If timestep_type = hourly; timestep_freq = 2; The difference between time of each timestep is 2 hours. + :type timestep_freq: int + + :param monthly_profile_path: Path to the file that contains all the monthly profiles. + :type monthly_profile_path: str + + :param month_profile_id: ID of the monthly profile to use. + :type month_profile_id: str + + :param daily_profile_path: Path to the file that contains all the daily profiles. + :type daily_profile_path: str + + :param daily_profile_id: ID of the daily profile to use. + :type daily_profile_id: str + + :param hourly_profile_path: Path to the file that contains all the hourly profiles. + :type hourly_profile_path: str + + :param hourly_profile_id: ID of the hourly profile to use. + :type hourly_profile_id: str + + :param world_info_path: Path to the file that contains the necessary information to do the NetCDF of timezones. + :type world_info_path: str + + :param auxiliar_files_dir: Path to the directory where will be all the needed auxiliar files like the NetCDf of + timezones. + :type auxiliar_files_dir: str + """ + def __init__(self, starting_date, timestep_type, timestep_num, timestep_freq, monthly_profile_path, + month_profile_id, daily_profile_path, daily_profile_id, hourly_profile_path, hourly_profile_id, + world_info_path, auxiliar_files_dir, grid): + from timezonefinder import TimezoneFinder + + import pandas as pd + + st_time = timeit.default_timer() + settings.write_log('\t\tInitializing Temporal.', level=2) + + self.grid = grid + + self.starting_date = starting_date + + self.timestep_type = timestep_type + self.timestep_num = timestep_num + self.timestep_freq = timestep_freq + + self.ending_date = self.calculate_ending_date() + if month_profile_id is not None: + if len(month_profile_id) > 4: + if os.path.exists(month_profile_id): + self.monthly_profile = self.read_gridded_profile(month_profile_id, 'Fmonth') + self.monthly_profile = self.monthly_profile.reshape( + (self.monthly_profile.shape[0], self.monthly_profile.shape[1] * self.monthly_profile.shape[2])) + + self.monthly_profile_path = month_profile_id + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise IOError('ERROR: Gridded monthly profile file not found: {0}'.format(month_profile_id)) + sys.exit(1) + else: + self.monthly_profile = self.get_temporal_monthly_profile(monthly_profile_path, month_profile_id) + else: + self.monthly_profile = None + settings.write_log("\t\t\tNo temporal monthly profile is set.", level=3) + + self.daily_profile_id = daily_profile_id + self.daily_profile_path = daily_profile_path + if daily_profile_id is not None: + settings.write_log("\t\t\tGetting temporal daily profile id '{0}' from {1} .".format( + daily_profile_id, daily_profile_path), level=3) + else: + settings.write_log("\t\t\tNo temporal daily profile is set.", level=3) + + self.hourly_profile_path = hourly_profile_path + if hourly_profile_id is not None and len(hourly_profile_id) is 4: + self.hourly_profile = self.get_temporal_hourly_profile(hourly_profile_id) + settings.write_log("\t\t\tGetting temporal hourly profile id '{0}' from {1} .".format( + hourly_profile_id, hourly_profile_path), level=3) + else: + if hourly_profile_id is None: + settings.write_log("\t\t\tNo temporal hourly profile is set.", level=3) + else: + settings.write_log("\t\t\tGetting temporal hourly profile ids {0} from {1} .".format( + hourly_profile_id, hourly_profile_path), level=3) + self.hourly_profile = hourly_profile_id + + self.world_info = world_info_path + self.netcdf_timezones = os.path.join(auxiliar_files_dir, 'timezones.nc') + + self.hours_since = [] + + self.world_info_df = pd.read_csv(self.world_info, sep=';') + self.tf = TimezoneFinder() + + if not self.is_created_netcdf_timezones(): + settings.write_log("\t\tTimezones netCDF is not created. Lets try to create it.", level=1) + self.create_netcdf_timezones(grid) + self.timezones_array = self.calculate_timezones() + + settings.write_time('TemporalDistribution', 'Init', timeit.default_timer() - st_time, level=2) + + def calculate_ending_date(self): + """ + Calculate the date of the last timestep. + + :return: Date of the last timestep + :rtype: datetime.datetime + """ + from datetime import timedelta + + st_time = timeit.default_timer() + + if self.timestep_type == 'hourly': + end_date = self.starting_date + (self.timestep_num - 1) * timedelta(hours=self.timestep_freq) + elif self.timestep_type == 'daily': + end_date = self.starting_date + (self.timestep_num - 1) * timedelta(hours=self.timestep_freq * 24) + elif self.timestep_type == 'monthly': + delta_year = (self.timestep_num - 1) * self.timestep_freq // 12 + delta_month = (self.timestep_num - 1) * self.timestep_freq % 12 + end_date = self.starting_date.replace(year=self.starting_date.year + delta_year, + month=self.starting_date.month + delta_month) + elif self.timestep_type == 'yearly': + delta_year = (self.timestep_num - 1) * self.timestep_freq + end_date = self.starting_date.replace(year=self.starting_date.year + delta_year) + else: + end_date = self.starting_date + + settings.write_time('TemporalDistribution', 'calculate_ending_date', timeit.default_timer() - st_time, level=3) + + return end_date + + def calculate_timedelta(self, date): + """ + Calculate the difference of time to the next timestep. + + :param date: Date of the current timestep. + :type date: datetime.datetime + + :return: Difference of time to the next timestep. + :rtype: datetime.timedelta + """ + from datetime import timedelta + from calendar import monthrange, isleap + + st_time = timeit.default_timer() + + if self.timestep_type == 'hourly': + delta = timedelta(hours=self.timestep_freq) + elif self.timestep_type == 'daily': + delta = timedelta(hours=self.timestep_freq * 24) + elif self.timestep_type == 'monthly': + days = monthrange(date.year, date.month)[1] + delta = timedelta(hours=days * 24) + elif self.timestep_type == 'yearly': + if isleap(date.year): + delta = timedelta(hours=366 * 24) + else: + delta = timedelta(hours=365 * 24) + else: + delta = None + + settings.write_time('TemporalDistribution', 'calculate_ending_date', timeit.default_timer() - st_time, level=3) + + return delta + + def get_tz_from_id(self, tz_id): + """ + Extract the timezone (string format) for the given id (int). + + :param tz_id: ID of the timezone. + :type tz_id: int + + :return: Timezone + :rtype: str + """ + tz = self.world_info_df.time_zone[self.world_info_df.time_zone_code == tz_id].values + + return tz[0] + + def get_id_from_tz(self, tz): + """ + Extract the id (int) for the given timezone (string format). + + :param tz: Timezone of the ID. + :type tz: str + + :return: ID + :rtype: int + """ + tz_id = self.world_info_df.time_zone_code[self.world_info_df.time_zone == tz].values + + try: + tz_id = tz_id[0] + except IndexError: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise ValueError("ERROR: The timezone '{0}' is not in the {1} file. ".format(tz, self.world_info) + + "Please add it as a new line with an unique time_zone_code " + + "and the corresponding country and country_code.") + sys.exit(1) + + return tz_id + + @staticmethod + def parse_tz(timezone): + """ + Parse the timezone (string format). + + It is needed because some libraries have more timezones than others and it + tries to simplify setting the strange ones into the nearest common one. + Examples: + 'America/Punta_Arenas': 'America/Santiago', + 'Europe/Astrakhan': 'Europe/Moscow', + 'Asia/Atyrau': 'Asia/Aqtau', + 'Asia/Barnaul': 'Asia/Almaty', + 'Europe/Saratov': 'Europe/Moscow', + 'Europe/Ulyanovsk': 'Europe/Moscow', + 'Europe/Kirov': 'Europe/Moscow', + 'Asia/Tomsk': 'Asia/Novokuznetsk', + 'America/Fort_Nelson': 'America/Vancouver' + + :param timezone: Not parsed timezone. + :type timezone: str + + :return: Parsed timezone + :rtype: str + """ + tz_dict = { + 'America/Punta_Arenas': 'America/Santiago', + 'Europe/Astrakhan': 'Europe/Moscow', + 'Asia/Atyrau': 'Asia/Aqtau', + 'Asia/Barnaul': 'Asia/Almaty', + 'Europe/Saratov': 'Europe/Moscow', + 'Europe/Ulyanovsk': 'Europe/Moscow', + 'Europe/Kirov': 'Europe/Moscow', + 'Asia/Tomsk': 'Asia/Novokuznetsk', + 'America/Fort_Nelson': 'America/Vancouver', + 'Asia/Famagusta': 'Asia/Nicosia', + } + + if timezone in tz_dict.iterkeys(): + timezone = tz_dict[timezone] + + return timezone + + def find_closest_timezone(self, latitude, longitude): + """ + Find the closest timezone for the given coordinates. + + :param latitude: Latitude coordinate to find timezone. + :type latitude: float + + :param longitude: Longitude coordinate fo find the timezone. + :type longitude: float + + :return: Nearest timezone of the given coordinates. + :rtype: str + """ + st_time = timeit.default_timer() + + degrees = 0 + timezone = None + while timezone is None: + timezone = self.tf.closest_timezone_at(lng=longitude, lat=latitude, delta_degree=degrees) + degrees += 1 + + settings.write_time('TemporalDistribution', 'find_closest_timezone', timeit.default_timer() - st_time, level=3) + + return timezone + + def is_created_netcdf_timezones(self): + """ + Check if the NetCDF of timezones is created + + :return: True if it is already created. + :rtype: bool + """ + return os.path.exists(self.netcdf_timezones) + + def create_netcdf_timezones(self, grid): + """ + Create a NetCDF with the timezones in the resolution of the given grid. + + :param grid: Grid object with the coordinates. + :type grid: Grid + + :return: True if it is created. + :rtype: bool + """ + from hermesv3_gr.tools.netcdf_tools import write_netcdf + + st_time = timeit.default_timer() + settings.write_log("\t\tCreating {0} file.".format(self.netcdf_timezones), level=2) + + lat, lon = grid.get_coordinates_2d() + total_lat = settings.comm.gather(lat, root=0) + total_lon = settings.comm.gather(lon, root=0) + + dst_var = [] + + num = 0 + points = zip(lat.flatten(), lon.flatten()) + + for lat_aux, lon_aux in points: + num += 1 + settings.write_log("\t\t\tlat:{0}, lon:{1} ({2}/{3})".format(lat_aux, lon_aux, num, len(points)), level=3) + timezone = self.find_closest_timezone(lat_aux, lon_aux) + tz_id = self.get_id_from_tz(timezone) + dst_var.append(tz_id) + dst_var = np.array(dst_var) + dst_var = dst_var.reshape((1,) + lat.shape) + dst_var = settings.comm.gather(dst_var, root=0) + if settings.rank == 0: + total_lat = np.concatenate(total_lat, axis=1) + total_lon = np.concatenate(total_lon, axis=1) + dst_var = np.concatenate(dst_var, axis=2) + data = [{'name': 'timezone_id', 'units': '', 'data': dst_var}] + + write_netcdf(self.netcdf_timezones, total_lat, total_lon, data, regular_latlon=True) + settings.comm.Barrier() + + settings.write_time('TemporalDistribution', 'create_netcdf_timezones', timeit.default_timer() - st_time, + level=2) + + return True + + def read_gridded_profile(self, path, value): + # TODO Documentation + """ + + :param path: + :param value: + :return: + """ + from netCDF4 import Dataset + + st_time = timeit.default_timer() + + settings.write_log('\t\t\tGetting gridded temporal monthly profile from {0} .'.format(path), level=3) + + nc_in = Dataset(path) + profile = nc_in.variables[value][:, self.grid.x_lower_bound:self.grid.x_upper_bound, + self.grid.y_lower_bound:self.grid.y_upper_bound] + nc_in.close() + + profile[profile <= 0] = 1 + + settings.write_time('TemporalDistribution', 'read_gridded_profile', timeit.default_timer() - st_time, level=3) + + return profile + + def calculate_timezones(self): + """ + Calculate the timezones ID's from the NetCDF and convert them to the timezone (str). + + :return: Array with the timezone of each cell. + :rtype: numpy.chararray + """ + from netCDF4 import Dataset + + st_time = timeit.default_timer() + + nc_in = Dataset(self.netcdf_timezones) + timezones = nc_in.variables['timezone_id'][:, self.grid.x_lower_bound:self.grid.x_upper_bound, + self.grid.y_lower_bound:self.grid.y_upper_bound].astype(int) + + nc_in.close() + tz_list = np.chararray(timezones.shape, itemsize=32) + for id_aux in xrange(timezones.min(), timezones.max() + 1): + try: + timezone = self.get_tz_from_id(id_aux) + tz_list[timezones == id_aux] = timezone + except IndexError: + pass + settings.write_time('TemporalDistribution', 'calculate_timezones', timeit.default_timer() - st_time, level=3) + + return tz_list + + def calculate_2d_temporal_factors(self, date): + """ + Calculate the temporal factor to correct the input data of the given date for each cell. + + :param date: Date of the current timestep. + :type date: datetime.datetime + + :return: 2D array with the factors to correct the input data to the date of this timestep. + :rtype: numpy.array + """ + import pytz + import pandas as pd + + st_time = timeit.default_timer() + + df = pd.DataFrame(self.timezones_array.flatten(), columns=['tz']) + df['i'] = df.index + + df['utc'] = pd.to_datetime(date) + try: + df['local'] = df.groupby('tz')['utc'].apply( + lambda x: pd.to_datetime(x).dt.tz_localize(pytz.utc).dt.tz_convert(x.name).dt.tz_localize(None)) + except pytz.exceptions.UnknownTimeZoneError: + df['local'] = df.groupby('tz')['utc'].apply( + lambda x: pd.to_datetime(x).dt.tz_localize(pytz.utc).dt.tz_convert( + self.parse_tz(x.name)).dt.tz_localize(None)) + df.set_index('local', inplace=True) + + df['month'] = df.index.month + df['day'] = df.index.weekday + df['hour'] = df.index.hour + + if self.hourly_profile is not None: + if isinstance(self.hourly_profile, dict): + df['hour_factor'] = df['hour'].map(self.hourly_profile) + else: + profile_ids = self.parse_hourly_profile_id() + weekday_profile = self.get_temporal_hourly_profile(profile_ids['weekday']) + saturday_profile = self.get_temporal_hourly_profile(profile_ids['saturday']) + sunday_profile = self.get_temporal_hourly_profile(profile_ids['sunday']) + df['weekday'] = df['hour'].map(weekday_profile) + df['saturday'] = df['hour'].map(saturday_profile) + df['sunday'] = df['hour'].map(sunday_profile) + + del df['tz'], df['utc'] + df['hour_factor'] = 0 + + df.loc[df['day'] <= 4, 'hour_factor'] = df['weekday'][df['day'] <= 4].values + df.loc[df['day'] == 5, 'hour_factor'] = df['saturday'][df['day'] == 5].values + df.loc[df['day'] == 6, 'hour_factor'] = df['sunday'][df['day'] == 6].values + + del df['weekday'], df['saturday'], df['sunday'] + else: + df['hour_factor'] = 1 + del df['hour'] + + if self.daily_profile_id is not None: + daily_profile = self.get_temporal_daily_profile(date) + df['day_factor'] = df['day'].map(daily_profile) + else: + df['day_factor'] = 1 + del df['day'] + + if self.monthly_profile is None: + df['month_factor'] = 1 + elif isinstance(self.monthly_profile, dict): + df['month_factor'] = df['month'].map(self.monthly_profile) + elif isinstance(self.monthly_profile, np.ndarray): + for m, df_aux in df.groupby('month'): + try: + df.loc[df['month'] == m, 'month_factor'] = \ + self.monthly_profile[m - 1, df.loc[df['month'] == m, 'i'].values] + except IndexError: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise IOError("The gridded temporal profile {0} ".format(self.monthly_profile_path) + + "is not on the output grid resolution.") + sys.exit(1) + else: + df['month_factor'] = 1 + del df['month'] + + df['factor'] = df['month_factor'] * df['day_factor'] * df['hour_factor'] + + # TODO make timezones_aray 2D + factors = np.array(df['factor'].values).reshape((self.timezones_array.shape[1], self.timezones_array.shape[2])) + del df + + settings.write_time('TemporalDistribution', 'calculate_2d_temporal_factors', timeit.default_timer() - st_time, + level=3) + + return factors + + def calculate_3d_temporal_factors(self): + """ + Calculate the temporal factor to correct the input data of the given date for each cell. + + :return: 3D array with the factors to correct the input data to the date of this timestep. + :rtype: numpy.array + """ + st_time = timeit.default_timer() + settings.write_log("\tCalculating temporal factors.", level=2) + + factors = [] + date = self.starting_date + count = 0 + + while date <= self.ending_date: + count += 1 + settings.write_log("\t\t{0} temporal factor ({1}/{2}).".format( + date.strftime('%Y/%m/%d %H:%M:%S'), count, self.timestep_num), level=3) + + factors.append(self.calculate_2d_temporal_factors(date)) + + date_aux = date - self.starting_date + self.hours_since.append(date_aux.seconds / 3600 + date_aux.days * 24) # 3600 seconds per hour + date = date + self.calculate_timedelta(date) + + factors = np.array(factors) + + settings.write_time('TemporalDistribution', 'calculate_3d_temporal_factors', timeit.default_timer() - st_time, + level=3) + return factors + + def parse_hourly_profile_id(self): + """ + Parse the hourly profile ID to get a dictionary with the ID for "weekday", "saturday" and "sunday" + + :return: + """ + import re + + dict_aux = {} + list_aux = list(map(str, re.split(' , | ,|, |,| ', self.hourly_profile))) + for element in list_aux: + key_value_list = list(map(str, re.split(':| :|: | : |=| =|= | = ', element))) + dict_aux[key_value_list[0]] = key_value_list[1] + + return dict_aux + + def get_temporal_hourly_profile(self, profile_id, date=None): + """ + Extract the hourly profile of the given ID in a dictionary format. + + The hour (0 to 23) is the key (int) and the value (float) is the factor. + + :param profile_id: ID of the hourly profile to use. + :type profile_id: str + + :param date: Date of the timestep to simulate. Not necessary for a single ID. + :type date: datetime.datetime + + :return: Hourly profile where the hour (0 to 23) is the key (int) and the value (float) is the factor. + :rtype: dict + """ + import pandas as pd + + st_time = timeit.default_timer() + if date is None: + df = pd.read_csv(self.hourly_profile_path) + try: + profile = df.loc[df[df.TP_H == profile_id].index[0]].to_dict() + except IndexError: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError('ERROR: Hourly profile ID {0} is not in the {1} file.'.format( + profile_id, self.hourly_profile_path)) + sys.exit(1) + profile.pop('TP_H', None) + profile = {int(k): float(v) for k, v in profile.items()} + else: + profile = None + settings.write_time('TemporalDistribution', 'get_temporal_hourly_profile', timeit.default_timer() - st_time, + level=3) + + return profile + + def get_temporal_daily_profile(self, date): + """ + Extract the daily profile of the given ID in a dictionary format. + + The weekday (0 to 6) is the key (int) and the value (float) is the factor. + + :param date: Date of the timestep to simulate. + :type date: datetime.datetime + + :return: Daily profile where the weekday (0 to 6) is the key (int) and the value (float) is the factor. + :rtype: dict + """ + import pandas as pd + + st_time = timeit.default_timer() + + if self.daily_profile_id is not None: + df = pd.read_csv(self.daily_profile_path) + try: + profile = df.loc[df[df.TP_D == self.daily_profile_id].index[0]].to_dict() + except IndexError: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError('ERROR: Daily profile ID {0} is not in the {1} file.'.format( + self.daily_profile_id, self.daily_profile_path)) + sys.exit(1) + profile.pop('TP_D', None) + profile_aux = {int(k): float(v) for k, v in profile.items()} + rebalance_factor = self.calculate_rebalance_factor(profile_aux, date) + profile = {int(k): float(v) + rebalance_factor for k, v in profile.items()} + else: + profile = None + + settings.write_time('TemporalDistribution', 'get_temporal_daily_profile', timeit.default_timer() - st_time, + level=3) + + return profile + + def calculate_rebalance_factor(self, profile, date): + """ + Calculate the necessary factor make consistent the full month data. + + This is needed for the months that if you sum the daily factor of each day of the month it doesn't sum as + the number of days of the month. + + :param profile: Daily profile. + :type profile: dict + + :param date: Date of the timestep to simulate. + :type date: datetime.datetime + + :return: Rebalance factor to be sum to the daily factor. + :rtype: float + """ + st_time = timeit.default_timer() + + weekdays = self.calculate_weekdays(date) + rebalance_factor = self.calculate_weekday_factor_full_month(profile, weekdays) + + settings.write_time('TemporalDistribution', 'calculate_rebalance_factor', timeit.default_timer() - st_time, + level=3) + + return rebalance_factor + + @staticmethod + def calculate_weekday_factor_full_month(profile, weekdays): + # TODO Documentation + """ + Operate with all the days of the month to get the sum of daily factors of the full month. + + :param profile: + :param weekdays: + :return: + """ + st_time = timeit.default_timer() + + weekdays_factors = 0 + num_days = 0 + for day in xrange(7): + weekdays_factors += profile[day] * weekdays[day] + num_days += weekdays[day] + + settings.write_time('TemporalDistribution', 'calculate_weekday_factor_full_month', + timeit.default_timer() - st_time, level=3) + + return (num_days - weekdays_factors) / num_days + + @staticmethod + def calculate_weekdays(date): + # TODO Documentation + """ + + :param date: + :return: + """ + from calendar import monthrange, weekday, MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY + + st_time = timeit.default_timer() + + weekdays = [MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY] + days = [weekday(date.year, date.month, d + 1) for d in xrange(monthrange(date.year, date.month)[1])] + weekdays_dict = {} + count = 0 + for day in weekdays: + weekdays_dict[count] = days.count(day) + + count += 1 + + settings.write_time('TemporalDistribution', 'calculate_weekdays', timeit.default_timer() - st_time, level=3) + return weekdays_dict + + @staticmethod + def get_temporal_monthly_profile(profile_path, profile_id): + """ + Extract the monthly profile of the given ID in a dictionary format. + + The month (1 to 12) is the key (int) and the value (float) is the factor. + + :param profile_path: Path to the file that contains all the monthly profiles. + :type profile_path: str + + :param profile_id: ID of the monthly profile to use. + :type profile_id: str + + :return: Monthly profile where the month (1 to 12) is the key (int) and the value (float) is the factor. + :rtype: dict + """ + import pandas as pd + + st_time = timeit.default_timer() + + settings.write_log("\t\t\tGetting temporal monthly profile id '{0}' from {1} .".format( + profile_id, profile_path), level=3) + + if profile_id is not None: + df = pd.read_csv(profile_path) + try: + profile = df.loc[df[df.TP_M == profile_id].index[0]].to_dict() + except IndexError: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError('ERROR: Monthly profile ID {0} is not in the {1} file.'.format( + profile_id, profile_path)) + sys.exit(1) + profile.pop('TP_M', None) + profile = {int(k): float(v) for k, v in profile.items()} + else: + profile = None + + settings.write_time('TemporalDistribution', 'get_temporal_monthly_profile', timeit.default_timer() - st_time, + level=2) + + return profile + + @staticmethod + def calculate_delta_hours(st_date, time_step_type, time_step_num, time_step_freq): + # TODO Documentation + """ + + :param st_date: + :param time_step_type: + :param time_step_num: + :param time_step_freq: + :return: + """ + from datetime import timedelta + from calendar import monthrange, isleap + + st_time = timeit.default_timer() + + settings.write_log('Calculating time array of {0} time steps starting from {1}.'.format( + time_step_num, st_date.strftime('%Y/%m/%d %H:%M:%S'))) + + if time_step_type == 'hourly': + end_date = st_date + (time_step_num - 1) * timedelta(hours=time_step_freq) + elif time_step_type == 'daily': + end_date = st_date + (time_step_num - 1) * timedelta(hours=time_step_freq * 24) + elif time_step_type == 'monthly': + delta_year = (time_step_num - 1) * time_step_freq // 12 + delta_month = (time_step_num - 1) * time_step_freq % 12 + end_date = st_date.replace(year=st_date.year + delta_year, + month=st_date.month + delta_month) + elif time_step_type == 'yearly': + delta_year = (time_step_num - 1) * time_step_freq + end_date = st_date.replace(year=st_date.year + delta_year) + else: + end_date = st_date + + date_aux = st_date + hours_since = [] + while date_aux <= end_date: + d = date_aux - st_date + hours_since.append(d.seconds / 3600 + d.days * 24) # 3600 seconds per hour + + if time_step_type == 'hourly': + delta = timedelta(hours=time_step_freq) + elif time_step_type == 'daily': + delta = timedelta(hours=time_step_freq * 24) + elif time_step_type == 'monthly': + days = monthrange(date_aux.year, date_aux.month)[1] + delta = timedelta(hours=days * 24) + elif time_step_type == 'yearly': + if isleap(date_aux.year): + delta = timedelta(hours=366 * 24) + else: + delta = timedelta(hours=365 * 24) + else: + delta = None + + date_aux = date_aux + delta + + settings.write_time('TemporalDistribution', 'calculate_delta_hours', timeit.default_timer() - st_time, level=2) + + return hours_since + + +if __name__ == '__main__': + pass diff --git a/hermesv3_gr/modules/vertical/__init__.py b/hermesv3_gr/modules/vertical/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hermesv3_gr/modules/vertical/vertical.py b/hermesv3_gr/modules/vertical/vertical.py new file mode 100644 index 0000000000000000000000000000000000000000..1b309ea4be040924ab3f08aa4078ac1ceb1aed42 --- /dev/null +++ b/hermesv3_gr/modules/vertical/vertical.py @@ -0,0 +1,215 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import sys +import timeit +import hermesv3_gr.config.settings as settings + + +class VerticalDistribution(object): + """ + VerticalDistribution class that contains all the information to do the vertical distribution. + + :param vertical_id: ID of the vertical profile that appears in the vertical profile file. + :type vertical_id: str + + :param vertical_profile_path: Path to the file that contains all the vertical profiles. + :type vertical_profile_path: str + + :param vertical_output_profile: path to the file that contain the vertical description of the required output + file. + :type vertical_output_profile: str + """ + def __init__(self, vertical_id, vertical_profile_path, vertical_output_profile): + st_time = timeit.default_timer() + settings.write_log('\t\tInitializing Vertical.', level=2) + + self.id = vertical_id + + self.output_heights = vertical_output_profile + self.vertical_profile = self.get_vertical_profile(vertical_profile_path) + + settings.write_time('VerticalDistribution', 'Init', timeit.default_timer() - st_time, level=2) + + def get_vertical_profile(self, path): + """ + Extract the vertical v_profile from the vertical v_profile file. + + :param path: Path to the file that contains all the vertical profiles. + :type path: str + + :return: List of tuples of two values. Te first value of the tuple is the height of the layer and the second + value is the quantity (%) of pollutant that goes into this layer. + :rtype: list of tuple + """ + import pandas as pd + import re + + st_time = timeit.default_timer() + settings.write_log("\t\t\tGetting vertical profile id '{0}' from {1} .".format(self.id, path), level=3) + + df = pd.read_csv(path, sep=';') + try: + v_profile = df.loc[df[df.ID == self.id].index[0]].to_dict() + except IndexError: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError('ERROR: Vertical profile ID {0} is not in the {1} file.'.format(self.id, path)) + sys.exit(1) + v_profile.pop('ID', None) + v_profile['layers'] = list(map(int, re.split(', |,|; |;| ', v_profile['layers']))) + v_profile['weights'] = list(map(float, re.split(', |,|; |;| ', v_profile['weights']))) + + if len(v_profile['layers']) != len(v_profile['weights']): + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError("ERROR: The number of layers and numbers os weight have to have the same length." + + " The v_profile '{0}' of the '{1}' file doesn't match.".format(self.id, path)) + sys.exit(1) + else: + return_value = zip(v_profile['layers'], v_profile['weights']) + + settings.write_time('VerticalDistribution', 'get_vertical_profile', timeit.default_timer() - st_time, level=3) + + return return_value + + @staticmethod + def get_vertical_output_profile(path): + """ + Extract the vertical description of the desired output. + + :param path: Path to the file that contains the output vertical description. + :type path: str + + :return: Heights of the output vertical layers. + :rtype: list + """ + import pandas as pd + + st_time = timeit.default_timer() + settings.write_log('Calculating vertical levels from {0} .'.format(path)) + + df = pd.read_csv(path, sep=';') + + heights = df.height_magl.values + + settings.write_time('VerticalDistribution', 'get_vertical_output_profile', timeit.default_timer() - st_time, + level=3) + + return heights + + @staticmethod + def get_weights(prev_layer, layer, in_weight, output_vertical_profile): + """ + Calculate the weights for the given layer. + + :param prev_layer: Altitude of the low layer. 0 if it's the first. + :type prev_layer: float + + :param layer: Altitude of the current layer. + :type layer: float + + :param in_weight: Weights + :param output_vertical_profile: + :return: + """ + st_time = timeit.default_timer() + + output_vertical_profile_aux = [s for s in output_vertical_profile if s >= prev_layer] + output_vertical_profile_aux = [s for s in output_vertical_profile_aux if s < layer] + + output_vertical_profile_aux = [prev_layer] + output_vertical_profile_aux + [layer] + + index = len([s for s in output_vertical_profile if s < prev_layer]) + origin_diff_factor = in_weight / (layer - prev_layer) + weight_list = [] + for i in xrange(len(output_vertical_profile_aux) - 1): + weight = (abs(output_vertical_profile_aux[i] - output_vertical_profile_aux[i + 1])) * origin_diff_factor + weight_list.append({'index': index, 'weight': weight}) + index += 1 + + settings.write_time('VerticalDistribution', 'get_weights', timeit.default_timer() - st_time, level=3) + + return weight_list + + def calculate_weights(self): + """ + Calculate the weights for all the vertical layers. + + :return: Weights that goes to each layer. + :rtype: list of float + """ + import numpy as np + + st_time = timeit.default_timer() + settings.write_log("\t\tCalculating vertical weights.", level=3) + + weights = np.zeros(len(self.output_heights)) + prev_layer = 0 + for layer, weight in self.vertical_profile: + if weight != float(0): + for element in self.get_weights(prev_layer, layer, weight, self.output_heights): + weights[element['index']] += element['weight'] + + prev_layer = layer + + settings.write_time('VerticalDistribution', 'calculate_weights', timeit.default_timer() - st_time, level=3) + + return weights + + @staticmethod + def apply_weights(data, weights): + """ + Calculate the vertical distribution using the given data and weights. + + :param data: Emissions to be vertically distributed. + :type data: numpy.array + + :param weights: Weights of each layer. + :type weights: numpy.array + + :return: Emissions already vertically distributed. + :rtype: numpy.array + """ + import numpy as np + + st_time = timeit.default_timer() + + data_aux = np.multiply(weights.reshape(weights.shape + (1, 1)), data) + + settings.write_time('VerticalDistribution', 'apply_weights', timeit.default_timer() - st_time, level=3) + + return data_aux + + @staticmethod + def apply_weights_level(data, weight): + st_time = timeit.default_timer() + + for emi in data: + if emi['data'] is not 0: + emi['data'] = emi['data'] * weight + + settings.write_time('VerticalDistribution', 'apply_weights_level', timeit.default_timer() - st_time, level=3) + + return data + + +if __name__ == '__main__': + pass diff --git a/hermesv3_gr/modules/vertical/vertical_gfas.py b/hermesv3_gr/modules/vertical/vertical_gfas.py new file mode 100644 index 0000000000000000000000000000000000000000..600ab7f6fef3559a064f0f37ea209847ada65260 --- /dev/null +++ b/hermesv3_gr/modules/vertical/vertical_gfas.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import timeit +import hermesv3_gr.config.settings as settings +from vertical import VerticalDistribution + + +class GfasVerticalDistribution(VerticalDistribution): + """ + Class that contains all the needed information to vertically distribute the fire emissions. + + :param vertical_output_profile: Path to the file that contains the vertical description of the desired output. + :type vertical_output_profile: str + + :param approach: Approach to take into account. + :type approach: str + """ + def __init__(self, vertical_output_profile, approach, altitude): + st_time = timeit.default_timer() + + self.altitude = altitude + self.output_heights = vertical_output_profile + self.approach = approach + + settings.write_time('GfasVerticalDistribution', 'Init', timeit.default_timer() - st_time, level=3) + + @staticmethod + def calculate_widths(heights_list): + """ + Calculate the width of each vertical level. + + :param heights_list: List of the top altitude in meters of each level. + :type heights_list: list + + :return: List with the width of each vertical level. + :rtype: list + """ + st_time = timeit.default_timer() + + widths = [] + for i in xrange(len(heights_list)): + if i == 0: + widths.append(heights_list[i]) + else: + widths.append(heights_list[i] - heights_list[i - 1]) + + settings.write_time('GfasVerticalDistribution', 'calculate_widths', timeit.default_timer() - st_time, level=3) + return widths + + def get_weights(self, heights_list): + """ + Calculate the proportion (%) of emission to put on each layer. + + :param heights_list: List with the width of each vertical level. + :type heights_list: list + + :return: List of the weight to apply to each layer. + :rtype: list + """ + st_time = timeit.default_timer() + + weights = [] + width_list = self.calculate_widths(heights_list) + if self.approach == 'uniform': + max_percent = 1. + elif self.approach == '50_top': + max_percent = 0.5 + width_list = width_list[0:-1] + else: + max_percent = 1. + + for width in width_list: + weights.append((width * max_percent) / sum(width_list)) + if self.approach == '50_top': + if len(heights_list) == 1: + weights.append(1.) + else: + weights.append(0.5) + + settings.write_time('GfasVerticalDistribution', 'get_weights', timeit.default_timer() - st_time, level=3) + return weights + + def apply_approach(self, top_fires): + """ + Scatters the emissions vertically. + + :param top_fires: 4D array (time, level, latitude, longitude) with all the emission on each top layer. + :type top_fires: numpy.array + + :return: 4D array (time, level, latitude, longitude) with all the emission distributed on all the involved + layers. + :rtype: numpy.array + """ + import numpy as np + + st_time = timeit.default_timer() + + fires = np.zeros(top_fires.shape) + for i in xrange(len(self.output_heights)): + if top_fires[i].sum() != 0: + weight_list = self.get_weights(list(self.output_heights[0: i + 1])) + for i_weight in xrange(len(weight_list)): + fires[i_weight] += top_fires[i] * weight_list[i_weight] + + settings.write_time('GfasVerticalDistribution', 'apply_approach', timeit.default_timer() - st_time, level=3) + return fires + + def do_vertical_interpolation_allocation(self, values, altitude): + """ + Allocates the fire emissions on their top level. + + :param values: 2D array with the fire emissions + :type values: numpy.array + + :param altitude: 2D array with the altitude of the fires. + :type altitude: numpy.array + + :return: Emissions already allocated on the top altitude of each fire. + :rtype: numpy.array + """ + import numpy as np + + st_time = timeit.default_timer() + + fire_list = [] + aux_var = values + for height in self.output_heights: + aux_data = np.zeros(aux_var.shape) + ma = np.ma.masked_less_equal(altitude, height) + aux_data[ma.mask] += aux_var[ma.mask] + aux_var -= aux_data + fire_list.append(aux_data) + fire_list = np.array(fire_list).reshape((len(fire_list), values.shape[1], values.shape[2])) + + settings.write_time('GfasVerticalDistribution', 'do_vertical_interpolation_allocation', + timeit.default_timer() - st_time, level=3) + return fire_list + + def do_vertical_interpolation(self, values): + """ + Manages all the process to do the vertical distribution. + + :param values: Emissions to be vertically distributed. + :type values: numpy.array + + :return: Emissions already vertically distributed. + :rtype: numpy.array + """ + st_time = timeit.default_timer() + + fire_list = self.apply_approach(values) + + settings.write_time('GfasVerticalDistribution', 'do_vertical_interpolation', timeit.default_timer() - st_time, + level=3) + return fire_list + + +if __name__ == '__main__': + pass diff --git a/hermesv3_gr/modules/writing/__init__.py b/hermesv3_gr/modules/writing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hermesv3_gr/modules/writing/writer.py b/hermesv3_gr/modules/writing/writer.py new file mode 100644 index 0000000000000000000000000000000000000000..06e6f34b4136aa34365b2f02fee263ce579477ed --- /dev/null +++ b/hermesv3_gr/modules/writing/writer.py @@ -0,0 +1,604 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import sys +import timeit +import numpy as np +from mpi4py import MPI +from netCDF4 import Dataset +from hermesv3_gr.config import settings + + +class Writer(object): + """ + Class to Write the output file. + + :param path: Path to the destination file. + :type path: str + + :param grid: Grid of the destination file. + :type grid: Grid + + :param levels: List with the levels of the grid. + :type levels: list + + :param date: Date of the output file + :type date: datetime.datetime + + :param hours: List with the timestamp hours. + :type hours: list. + + :param global_attributes_path: Path to the file that contains the static global attributes. + :type global_attributes_path: str + + :param compress: Indicates if you want to compress the netCDF variable data. + :type compress: bool + + :param parallel: Indicates if you want to write in parallel mode. + :type parallel. bool + """ + + def __init__(self, path, grid, levels, date, hours, global_attributes_path, compress=True, parallel=False): + + self.path = path + self.grid = grid + self.compress = compress + self.parallel = parallel + + self.variables_attributes = None + self.levels = levels + self.date = date + self.hours = hours + + self.global_attributes = None + + self.global_attributes_path = global_attributes_path + + def write(self, inventory_list): + """ + Write the netCDF4 file with the pollutants of the given list of inventories. + + :param inventory_list: List of inventories. + :type inventory_list: list + + :return: True at end + :rtype: bool + """ + st_time = timeit.default_timer() + settings.write_log('') + settings.write_log("Writing netCDF output file {0} .".format(self.path)) + + self.set_variable_attributes(inventory_list) + self.change_variable_attributes() + if self.parallel: + if settings.rank == 0: + self.create_parallel_netcdf() + settings.comm.Barrier() + self.write_parallel_netcdf(inventory_list) + else: + self.write_serial_netcdf(inventory_list) + + settings.write_time('Writer', 'write', timeit.default_timer() - st_time) + return True + + def change_variable_attributes(self): + pass + + def create_parallel_netcdf(self): + """ + Implemented on inner class. + """ + return None + + def write_parallel_netcdf(self, emission_list): + """ + Append the data to the netCDF4 file already created in parallel mode. + + :param emission_list: Data to append. + :type emission_list: list + + :return: True at end. + :rtype: bool + """ + + st_time = timeit.default_timer() + + settings.write_log("\tAppending data to parallel NetCDF file.", level=2) + if settings.size > 1: + netcdf = Dataset(self.path, mode='a', format="NETCDF4", parallel=True, comm=settings.comm, info=MPI.Info()) + else: + netcdf = Dataset(self.path, mode='a', format="NETCDF4") + settings.write_log("\t\tParallel NetCDF file ready to write.", level=2) + index = 0 + # print "Rank {0} 2".format(rank) + for var_name in self.variables_attributes.iterkeys(): + + data = self.calculate_data_by_var(var_name, emission_list, self.grid.shape) + st_time = timeit.default_timer() + index += 1 + + var = netcdf.variables[var_name] + if settings.size > 1: + var.set_collective(True) + # Correcting NAN + if data is None: + data = 0 + var[:, :, self.grid.x_lower_bound:self.grid.x_upper_bound, + self.grid.y_lower_bound:self.grid.y_upper_bound] = data + + settings.write_log("\t\t\t'{0}' variable filled".format(var_name)) + + if 'cell_area' in netcdf.variables: + c_area = netcdf.variables['cell_area'] + c_area[self.grid.x_lower_bound:self.grid.x_upper_bound, + self.grid.y_lower_bound:self.grid.y_upper_bound] = self.grid.cell_area + + netcdf.close() + settings.write_time('Writer', 'write_parallel_netcdf', timeit.default_timer() - st_time, level=3) + return True + + def write_serial_netcdf(self, emission_list): + """ + Implemented on inner class. + """ + return None + + def set_variable_attributes(self, inventory_list): + """ + Change the variables_attribute parameter of the Writer class. + + :param inventory_list: list of invenotries. + :type inventory_list: list + + :return: True at end. + :rtype: bool + """ + st_time = timeit.default_timer() + empty_dict = {} + for inventory in inventory_list: + for emi in inventory.emissions: + if not emi['name'] in empty_dict: + dict_aux = emi.copy() + dict_aux['data'] = None + empty_dict[emi['name']] = dict_aux + + self.variables_attributes = empty_dict.values() + + settings.write_time('Writer', 'set_variable_attributes', timeit.default_timer() - st_time, level=3) + + return True + + def calculate_data_by_var(self, variable, inventory_list, shape): + """ + Calculate the date of the given variable throw the inventory list. + + :param variable: Variable to calculate. + :type variable: str + + :param inventory_list: Inventory list + :type inventory_list: list + + :param shape: Output desired shape. + :type shape: tuple + + :return: Data of the given variable. + :rtype: numpy.array + """ + st_time = timeit.default_timer() + settings.write_log("\t\t\t\tGetting data for '{0}' pollutant.".format(variable), level=3) + + data = None + + for ei in inventory_list: + for emission in ei.emissions: + if emission['name'] == variable: + if emission['data'] is not 0: + vertical_time = timeit.default_timer() + if ei.source_type == 'area': + if ei.vertical_factors is not None: + aux_data = emission['data'][np.newaxis, :, :] * ei.vertical_factors[:, np.newaxis, + np.newaxis] + else: + if len(emission['data'].shape) != 3: + aux_data = np.zeros((shape[1], shape[2], shape[3])) + aux_data[0, :, :] = emission['data'] + else: + aux_data = emission['data'] + elif ei.source_type == 'point': + aux_data = np.zeros((shape[1], shape[2] * shape[3])) + aux_data[ei.location['layer'], ei.location['FID']] = emission['data'] + aux_data = aux_data.reshape((shape[1], shape[2], shape[3])) + else: + aux_data = None + + settings.write_time('VerticalDistribution', 'calculate_data_by_var', + timeit.default_timer() - vertical_time, level=2) + del emission['data'] + + temporal_time = timeit.default_timer() + if data is None: + data = np.zeros(shape) + if ei.temporal_factors is not None: + data += aux_data[np.newaxis, :, :, :] * ei.temporal_factors[:, np.newaxis, :, :] + else: + data += aux_data[np.newaxis, :, :, :] + settings.write_time('TemporalDistribution', 'calculate_data_by_var', + timeit.default_timer() - temporal_time, level=2) + # Unit changes + data = self.unit_change(variable, data) + if data is not None: + data[data < 0] = 0 + settings.write_time('Writer', 'calculate_data_by_var', timeit.default_timer() - st_time, level=3) + return data + + def unit_change(self, variable, data): + """ + Implement on inner class + """ + return np.array([0]) + + @staticmethod + def calculate_displacements(counts): + """ + Calculate the index position of all the ranks. + + :param counts: Number of elements for rank + :type counts: list + + :return: Displacements + :rtype: list + """ + st_time = timeit.default_timer() + + new_list = [0] + accum = 0 + for counter in counts[:-1]: + accum += counter + new_list.append(accum) + + settings.write_time('Writer', 'calculate_displacements', timeit.default_timer() - st_time, level=3) + return new_list + + @staticmethod + def tuple_to_index(tuple_list, bidimensional=False): + """ + Get the index for a list of shapes. + + :param tuple_list: List os shapes. + :type tuple_list: list + + :param bidimensional: Indicates if the tuple is bidimensional. + :type bidimensional: bool + + :return: List of index + :rtype: list + """ + from operator import mul + st_time = timeit.default_timer() + + new_list = [] + for my_tuple in tuple_list: + if bidimensional: + new_list.append(my_tuple[-1] * my_tuple[-2]) + else: + new_list.append(reduce(mul, my_tuple)) + settings.write_time('Writer', 'tuple_to_index', timeit.default_timer() - st_time, level=3) + return new_list + + @staticmethod + def get_writer(output_model, path, grid, levels, date, hours, global_attributes_path, compress, parallel): + """ + Choose between the different writers depending on the desired output model. + + :param output_model: Name of the output model. Only accepted 'MONARCH, CMAQ or WRF_CHEM. + :type output_model: str + + :param path: Path to the destination file. + :type path: str + + :param grid: Grid of the destination file. + :type grid: Grid + + :param levels: List with the levels of the grid. + :type levels: list + + :param date: Date of the output file + :type date: datetime.datetime + + :param hours: List with the timestamp hours. + :type hours: list. + + :param global_attributes_path: Path to the file that contains the static global attributes. + :type global_attributes_path: str + + :param compress: Indicates if you want to compress the netCDF variable data. + :type compress: bool + + :param parallel: Indicates if you want to write in parallel mode. + :type parallel. bool + + :return: Writing object of the desired output model. + :rtype: Writer + """ + from hermesv3_gr.modules.writing.writer_cmaq import WriterCmaq + from hermesv3_gr.modules.writing.writer_monarch import WriterMonarch + from hermesv3_gr.modules.writing.writer_wrf_chem import WriterWrfChem + + settings.write_log('Selecting writing output type for {0}.'.format(output_model)) + if output_model.lower() == 'monarch': + return WriterMonarch(path, grid, levels, date, hours, global_attributes_path, compress, parallel) + elif output_model.lower() == 'cmaq': + return WriterCmaq(path, grid, levels, date, hours, global_attributes_path, compress, parallel) + elif output_model.lower() == 'wrf_chem': + return WriterWrfChem(path, grid, levels, date, hours, global_attributes_path, compress, parallel) + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise AttributeError("The desired '{0}' output model is not available. ".format(output_model) + + "Only accepted 'MONARCH, CMAQ or WRF_CHEM.") + sys.exit(1) + + @staticmethod + def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, + levels=None, date=None, hours=None, + boundary_latitudes=None, boundary_longitudes=None, cell_area=None, global_attributes=None, + regular_latlon=False, + roated=False, rotated_lats=None, rotated_lons=None, north_pole_lat=None, north_pole_lon=None, + lcc=False, lcc_x=None, lcc_y=None, lat_1_2=None, lon_0=None, lat_0=None, + mercator=False, lat_ts=None): + # TODO Deprecate + """ + Will be deprecated + """ + from netCDF4 import Dataset + from cf_units import Unit, encode_time + + if not (regular_latlon or lcc or roated or mercator): + regular_latlon = True + netcdf = Dataset(netcdf_path, mode='w', format="NETCDF4") + + # ===== Dimensions ===== + if regular_latlon: + var_dim = ('lat', 'lon',) + + # Latitude + if len(center_latitudes.shape) == 1: + netcdf.createDimension('lat', center_latitudes.shape[0]) + lat_dim = ('lat',) + elif len(center_latitudes.shape) == 2: + netcdf.createDimension('lat', center_latitudes.shape[0]) + lat_dim = ('lon', 'lat',) + else: + print 'ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape)) + sys.exit(1) + + # Longitude + if len(center_longitudes.shape) == 1: + netcdf.createDimension('lon', center_longitudes.shape[0]) + lon_dim = ('lon',) + elif len(center_longitudes.shape) == 2: + netcdf.createDimension('lon', center_longitudes.shape[1]) + lon_dim = ('lon', 'lat',) + else: + print 'ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format( + len(center_longitudes.shape)) + sys.exit(1) + elif roated: + var_dim = ('rlat', 'rlon',) + + # Rotated Latitude + if rotated_lats is None: + print 'ERROR: For rotated grids is needed the rotated latitudes.' + sys.exit(1) + netcdf.createDimension('rlat', len(rotated_lats)) + lat_dim = ('rlat', 'rlon',) + + # Rotated Longitude + if rotated_lons is None: + print 'ERROR: For rotated grids is needed the rotated longitudes.' + sys.exit(1) + netcdf.createDimension('rlon', len(rotated_lons)) + lon_dim = ('rlat', 'rlon',) + elif lcc or mercator: + var_dim = ('y', 'x',) + + netcdf.createDimension('y', len(lcc_y)) + lat_dim = ('y', 'x',) + + netcdf.createDimension('x', len(lcc_x)) + lon_dim = ('y', 'x',) + else: + lat_dim = None + lon_dim = None + var_dim = None + + # Levels + if levels is not None: + netcdf.createDimension('lev', len(levels)) + + # Bounds + if boundary_latitudes is not None: + # print boundary_latitudes.shape + # print len(boundary_latitudes[0, 0]) + try: + netcdf.createDimension('nv', len(boundary_latitudes[0, 0])) + except TypeError: + netcdf.createDimension('nv', boundary_latitudes.shape[1]) + + # sys.exit() + + # Time + netcdf.createDimension('time', None) + + # ===== Variables ===== + # Time + if date is None: + time = netcdf.createVariable('time', 'd', ('time',), zlib=True) + time.units = "months since 2000-01-01 00:00:00" + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = [0.] + else: + time = netcdf.createVariable('time', 'd', ('time',), zlib=True) + # print u.offset_by_time(encode_time(date.year, date.month, date.day, date.hour, date.minute, date.second)) + # Unit('hour since 1970-01-01 00:00:00.0000000 UTC') + time.units = str(Unit('hours').offset_by_time( + encode_time(date.year, date.month, date.day, date.hour, date.minute, date.second))) + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = hours + + # Latitude + lats = netcdf.createVariable('lat', 'f', lat_dim, zlib=True) + lats.units = "degrees_north" + lats.axis = "Y" + lats.long_name = "latitude coordinate" + lats.standard_name = "latitude" + lats[:] = center_latitudes + + if boundary_latitudes is not None: + lats.bounds = "lat_bnds" + lat_bnds = netcdf.createVariable('lat_bnds', 'f', lat_dim + ('nv',), zlib=True) + # print lat_bnds[:].shape, boundary_latitudes.shape + lat_bnds[:] = boundary_latitudes + + # Longitude + lons = netcdf.createVariable('lon', 'f', lon_dim, zlib=True) + + lons.units = "degrees_east" + lons.axis = "X" + lons.long_name = "longitude coordinate" + lons.standard_name = "longitude" + # print 'lons:', lons[:].shape, center_longitudes.shape + lons[:] = center_longitudes + if boundary_longitudes is not None: + lons.bounds = "lon_bnds" + lon_bnds = netcdf.createVariable('lon_bnds', 'f', lon_dim + ('nv',), zlib=True) + # print lon_bnds[:].shape, boundary_longitudes.shape + lon_bnds[:] = boundary_longitudes + + if roated: + # Rotated Latitude + rlat = netcdf.createVariable('rlat', 'f', ('rlat',), zlib=True) + rlat.long_name = "latitude in rotated pole grid" + rlat.units = Unit("degrees").symbol + rlat.standard_name = "grid_latitude" + rlat[:] = rotated_lats + + # Rotated Longitude + rlon = netcdf.createVariable('rlon', 'f', ('rlon',), zlib=True) + rlon.long_name = "longitude in rotated pole grid" + rlon.units = Unit("degrees").symbol + rlon.standard_name = "grid_longitude" + rlon[:] = rotated_lons + if lcc or mercator: + x_var = netcdf.createVariable('x', 'd', ('x',), zlib=True) + x_var.units = Unit("km").symbol + x_var.long_name = "x coordinate of projection" + x_var.standard_name = "projection_x_coordinate" + x_var[:] = lcc_x + + y_var = netcdf.createVariable('y', 'd', ('y',), zlib=True) + y_var.units = Unit("km").symbol + y_var.long_name = "y coordinate of projection" + y_var.standard_name = "projection_y_coordinate" + y_var[:] = lcc_y + + cell_area_dim = var_dim + # Levels + if levels is not None: + var_dim = ('lev',) + var_dim + lev = netcdf.createVariable('lev', 'f', ('lev',), zlib=True) + lev.units = Unit("m").symbol + lev.positive = 'up' + lev[:] = levels + + # All variables + if len(data_list) is 0: + var = netcdf.createVariable('aux_var', 'f', ('time',) + var_dim, zlib=True) + var[:] = 0 + for variable in data_list: + # print ('time',) + var_dim + var = netcdf.createVariable(variable['name'], 'f', ('time',) + var_dim, zlib=True) + var.units = Unit(variable['units']).symbol + if 'long_name' in variable: + var.long_name = str(variable['long_name']) + if 'standard_name' in variable: + var.standard_name = str(variable['standard_name']) + if 'cell_method' in variable: + var.cell_method = str(variable['cell_method']) + var.coordinates = "lat lon" + if cell_area is not None: + var.cell_measures = 'area: cell_area' + if regular_latlon: + var.grid_mapping = 'crs' + elif roated: + var.grid_mapping = 'rotated_pole' + elif lcc: + var.grid_mapping = 'Lambert_conformal' + elif mercator: + var.grid_mapping = 'mercator' + try: + var[:] = variable['data'] + except ValueError: + print 'VAR ERROR, netcdf shape: {0}, variable shape: {1}'.format(var[:].shape, variable['data'].shape) + + # Grid mapping + if regular_latlon: + # CRS + mapping = netcdf.createVariable('crs', 'i') + mapping.grid_mapping_name = "latitude_longitude" + mapping.semi_major_axis = 6371000.0 + mapping.inverse_flattening = 0 + elif roated: + # Rotated pole + mapping = netcdf.createVariable('rotated_pole', 'c') + mapping.grid_mapping_name = 'rotated_latitude_longitude' + mapping.grid_north_pole_latitude = north_pole_lat + mapping.grid_north_pole_longitude = north_pole_lon + elif lcc: + # CRS + mapping = netcdf.createVariable('Lambert_conformal', 'i') + mapping.grid_mapping_name = "lambert_conformal_conic" + mapping.standard_parallel = lat_1_2 + mapping.longitude_of_central_meridian = lon_0 + mapping.latitude_of_projection_origin = lat_0 + elif mercator: + # Mercator + mapping = netcdf.createVariable('mercator', 'i') + mapping.grid_mapping_name = "mercator" + mapping.longitude_of_projection_origin = lon_0 + mapping.standard_parallel = lat_ts + + # Cell area + if cell_area is not None: + c_area = netcdf.createVariable('cell_area', 'f', cell_area_dim) + c_area.long_name = "area of the grid cell" + c_area.standard_name = "cell_area" + c_area.units = Unit("m2").symbol + # print c_area[:].shape, cell_area.shape + c_area[:] = cell_area + + if global_attributes is not None: + netcdf.setncatts(global_attributes) + + netcdf.close() diff --git a/hermesv3_gr/modules/writing/writer_cmaq.py b/hermesv3_gr/modules/writing/writer_cmaq.py new file mode 100644 index 0000000000000000000000000000000000000000..7b3480c780f442559400412192c7367b0a61b0e6 --- /dev/null +++ b/hermesv3_gr/modules/writing/writer_cmaq.py @@ -0,0 +1,624 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import sys +import timeit +import numpy as np +from netCDF4 import Dataset +from mpi4py import MPI +from hermesv3_gr.modules.writing.writer import Writer +from hermesv3_gr.config import settings + + +class WriterCmaq(Writer): + """ + Class to Write the output file for CMAQ Chemical Transport Model CCTM. + + :param path: Path to the destination file. + :type path: str + + :param grid: Grid of the destination file. + :type grid: Grid + + :param levels: List with the levels of the grid. + :type levels: list + + :param date: Date of the output file + :type date: datetime.datetime + + :param hours: List with the timestamp hours. + :type hours: list. + + :param global_attributes_path: Path to the file that contains the static global attributes. + :type global_attributes_path: str + + :param compress: Indicates if you want to compress the netCDF variable data. + :type compress: bool + + :param parallel: Indicates if you want to write in parallel mode. + :type parallel. bool + """ + + def __init__(self, path, grid, levels, date, hours, global_attributes_path, compress=True, parallel=False): + super(WriterCmaq, self).__init__(path, grid, levels, date, hours, global_attributes_path, compress, parallel) + + self.global_attributes_order = [ + 'IOAPI_VERSION', 'EXEC_ID', 'FTYPE', 'CDATE', 'CTIME', 'WDATE', 'WTIME', 'SDATE', 'STIME', 'TSTEP', 'NTHIK', + 'NCOLS', 'NROWS', 'NLAYS', 'NVARS', 'GDTYP', 'P_ALP', 'P_BET', 'P_GAM', 'XCENT', 'YCENT', 'XORIG', 'YORIG', + 'XCELL', 'YCELL', 'VGTYP', 'VGTOP', 'VGLVLS', 'GDNAM', 'UPNAM', 'FILEDESC', 'HISTORY', 'VAR-LIST'] + + def unit_change(self, variable, data): + # TODO Documentation + """ + + :param variable: + :param data: + :return: + """ + from cf_units import Unit + + if data is not None: + units = None + for var_name in self.variables_attributes: + if var_name == variable: + units = self.variables_attributes[var_name]['units'] + break + + if Unit(units).symbol == Unit('mol.s-1').symbol: + data = data * 1000 * self.grid.cell_area + elif Unit(units).symbol == Unit('g.s-1').symbol: + data = data * 1000 * self.grid.cell_area + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError("The unit '{0}' of specie {1} is not defined correctly. ".format(units, variable) + + "Should be 'mol.s-1.m-2' or 'kg.s-1.m-2'") + sys.exit(1) + return data + + def change_variable_attributes(self): + """ + Modify the emission list to be consistent to use the output as input for CMAQ model. + + :return: Emission list ready for CMAQ + :rtype: dict + """ + from cf_units import Unit + + new_variable_dict = {} + for variable in self.variables_attributes: + if Unit(variable['units']).symbol == Unit('mol.s-1').symbol: + new_variable_dict[variable['name']] = { + 'units': "{:<16}".format('mole/s'), + 'var_desc': "{:<80}".format(variable['long_name']), + 'long_name': "{:<16}".format(variable['name']), + } + elif Unit(variable['units']).symbol == Unit('g.s-1').symbol: + new_variable_dict[variable['name']] = { + 'units': "{:<16}".format('g/s'), + 'var_desc': "{:<80}".format(variable['long_name']), + 'long_name': "{:<16}".format(variable['name']), + } + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError("The unit '{0}' of specie {1} is not ".format(variable['units'], variable['name']) + + "defined correctly. Should be 'mol.s-1' or 'g.s-1'") + sys.exit(1) + + self.variables_attributes = new_variable_dict + + @staticmethod + def create_tflag(st_date, hours_array, num_vars): + """ + Create the content of the CMAQ variable TFLAG + + :param st_date: Starting date + :type st_date: datetime.datetime + + :param hours_array: Array with as elements as time steps. Each element has the delta hours from the starting + date. + :type hours_array: numpy.array + + :param num_vars: Number of variables that will contain the NetCDF. + :type num_vars: int + + :return: Array with the content of TFLAG + :rtype: numpy.array + """ + from datetime import timedelta + + a = np.array([[[]]]) + + for inc_hours in hours_array: + date = st_date + timedelta(hours=inc_hours) + b = np.array([[int(date.strftime('%Y%j'))], [int(date.strftime('%H%M%S'))]] * num_vars) + a = np.append(a, b) + + a.shape = (len(hours_array), 2, num_vars) + return a + + @staticmethod + def str_var_list(var_list): + """ + Transform a list to a string with the elements with 16 white spaces. + + :param var_list: List of variables. + :type var_list: list + + :return: List transformed on string. + :rtype: str + """ + str_var_list = "" + for var in var_list: + str_var_list += "{:<16}".format(var) + + return str_var_list + + def read_global_attributes(self): + # TODO Documentation + """ + + :return: + """ + import pandas as pd + from warnings import warn as warning + float_atts = ['VGTOP'] + int_atts = ['FTYPE', 'NTHIK', 'VGTYP'] + str_atts = ['EXEC_ID', 'GDNAM'] + list_float_atts = ['VGLVLS'] + + atts_dict = { + 'EXEC_ID': "{:<80}".format('0.1alpha'), + 'FTYPE': np.int32(1), + 'NTHIK': np.int32(1), + 'VGTYP': np.int32(7), + 'VGTOP': np.float32(5000.), + 'VGLVLS': np.array([1., 0.], dtype=np.float32), + 'GDNAM': "{:<16}".format(''), + } + + if self.global_attributes_path is not None: + df = pd.read_csv(self.global_attributes_path) + + for att in atts_dict.iterkeys(): + try: + if att in int_atts: + atts_dict[att] = np.int32(df.loc[df['attribute'] == att, 'value'].item()) + elif att in float_atts: + atts_dict[att] = np.float32(df.loc[df['attribute'] == att, 'value'].item()) + elif att in str_atts: + atts_dict[att] = str(df.loc[df['attribute'] == att, 'value'].item()) + elif att in list_float_atts: + atts_dict[att] = np.array(df.loc[df['attribute'] == att, 'value'].item().split(), + dtype=np.float32) + except ValueError: + settings.write_log('WARNING: The global attribute {0} is not defined;'.format(att) + + ' Using default value {0}'.format(atts_dict[att])) + if settings.rank == 0: + warning('WARNING: The global attribute {0} is not defined; Using default value {1}'.format( + att, atts_dict[att])) + + else: + settings.write_log('WARNING: Check the .err file to get more information.') + message = 'WARNING: No output attributes defined, check the output_attributes' + message += ' parameter of the configuration file.\nUsing default values:' + for key, value in atts_dict.iteritems(): + message += '\n\t{0} = {1}'.format(key, value) + if settings.rank == 0: + warning(message) + + return atts_dict + + def create_global_attributes(self, var_list): + """ + Create the global attributes and the order that they have to be filled. + + :param var_list: List of variables + :type var_list: list + + :return: Dict of global attributes and a list with the keys ordered. + :rtype: tuple + """ + from datetime import datetime + + global_attributes = self.read_global_attributes() + + if len(self.hours) > 1: + tstep = (self.hours[1] - self.hours[0]) * 10000 + else: + tstep = 1 * 10000 + + now = datetime.now() + global_attributes['IOAPI_VERSION'] = 'None: made only with NetCDF libraries' + global_attributes['CDATE'] = np.int32(now.strftime('%Y%j')) + global_attributes['CTIME'] = np.int32(now.strftime('%H%M%S')) + global_attributes['WDATE'] = np.int32(now.strftime('%Y%j')) + global_attributes['WTIME'] = np.int32(now.strftime('%H%M%S')) + global_attributes['SDATE'] = np.int32(self.date.strftime('%Y%j')) + global_attributes['STIME'] = np.int32(self.date.strftime('%H%M%S')) + global_attributes['TSTEP'] = np.int32(tstep) + global_attributes['NLAYS'] = np.int32(len(self.levels)) + global_attributes['NVARS'] = np.int32(len(var_list)) + global_attributes['UPNAM'] = "{:<16}".format('HERMESv3') + global_attributes['FILEDESC'] = 'Emissions generated by HERMESv3_GR.' + global_attributes['HISTORY'] = \ + 'Code developed by Barcelona Supercomputing Center (BSC, https://www.bsc.es/).' + \ + 'Developer: Carles Tena Medina (carles.tena@bsc.es)' + \ + 'Reference: Guevara et al., 2018, GMD., in preparation.' + global_attributes['VAR-LIST'] = self.str_var_list(var_list) + + if self.grid.grid_type == 'lcc': + global_attributes['GDTYP'] = np.int32(2) + global_attributes['NCOLS'] = np.int32(self.grid.nx) + global_attributes['NROWS'] = np.int32(self.grid.ny) + global_attributes['P_ALP'] = np.float(self.grid.lat_1) + global_attributes['P_BET'] = np.float(self.grid.lat_2) + global_attributes['P_GAM'] = np.float(self.grid.lon_0) + global_attributes['XCENT'] = np.float(self.grid.lon_0) + global_attributes['YCENT'] = np.float(self.grid.lat_0) + global_attributes['XORIG'] = np.float(self.grid.x_0) - np.float(self.grid.inc_x) / 2 + global_attributes['YORIG'] = np.float(self.grid.y_0) - np.float(self.grid.inc_y) / 2 + global_attributes['XCELL'] = np.float(self.grid.inc_x) + global_attributes['YCELL'] = np.float(self.grid.inc_y) + + return global_attributes + + @staticmethod + def create_cmaq_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, levels=None, date=None, + hours=None, regular_lat_lon=False, rotated=False, nx=None, ny=None, lat_1=None, lat_2=None, + lon_0=None, lat_0=None, x_0=None, y_0=None, inc_x=None, inc_y=None): + # TODO Documentation + """ + + :param netcdf_path: + :param center_latitudes: + :param center_longitudes: + :param data_list: + :param levels: + :param date: + :param hours: + :param regular_lat_lon: + :param rotated: + :param nx: + :param ny: + :param lat_1: + :param lat_2: + :param lon_0: + :param lat_0: + :param x_0: + :param y_0: + :param inc_x: + :param inc_y: + :return: + """ + + data_list, var_list = WriterCmaq.change_variable_attributes(data_list) + + if settings.writing_serial: + WriterCmaq.write_serial_netcdf( + netcdf_path, center_latitudes, center_longitudes, data_list, + levels=levels, date=date, hours=hours, + global_attributes=WriterCmaq.create_global_attributes(date, nx, ny, len(levels), lat_1, lat_2, lon_0, + lat_0, x_0, y_0, inc_x, inc_y, var_list), + regular_lat_lon=regular_lat_lon, + rotated=rotated, ) + else: + WriterCmaq.write_parallel_netcdf( + netcdf_path, center_latitudes, center_longitudes, data_list, + levels=levels, date=date, hours=hours, + global_attributes=WriterCmaq.create_global_attributes(date, nx, ny, len(levels), lat_1, lat_2, lon_0, + lat_0, x_0, y_0, inc_x, inc_y, var_list), + regular_lat_lon=regular_lat_lon, + rotated=rotated, ) + + @staticmethod + def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, levels=None, date=None, hours=None, + global_attributes=None, regular_lat_lon=False, rotated=False): + # TODO Documentation + """ + + :param netcdf_path: + :param center_latitudes: + :param center_longitudes: + :param data_list: + :param levels: + :param date: + :param hours: + :param global_attributes: + :param regular_lat_lon: + :param rotated: + :return: + """ + if regular_lat_lon: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError('ERROR: Regular Lat Lon grid not implemented for CMAQ') + sys.exit(1) + + elif rotated: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError('ERROR: Rotated grid not implemented for CMAQ') + sys.exit(1) + + netcdf = Dataset(netcdf_path, mode='w', format="NETCDF4") + + # ===== Dimensions ===== + netcdf.createDimension('TSTEP', len(hours)) + netcdf.createDimension('DATE-TIME', 2) + netcdf.createDimension('LAY', len(levels)) + netcdf.createDimension('VAR', len(data_list)) + netcdf.createDimension('ROW', center_latitudes.shape[0]) + netcdf.createDimension('COL', center_longitudes.shape[1]) + + # ===== Variables ===== + tflag = netcdf.createVariable('TFLAG', 'i', ('TSTEP', 'VAR', 'DATE-TIME',)) + tflag.setncatts({'units': "{:<16}".format(''), 'long_name': "{:<16}".format('TFLAG'), + 'var_desc': "{:<80}".format('Timestep-valid flags: (1) YYYYDDD or (2) HHMMSS')}) + tflag[:] = WriterCmaq.create_tflag(date, hours, len(data_list)) + + # Rest of variables + for variable in data_list: + var = netcdf.createVariable(variable['name'], 'f', ('TSTEP', 'LAY', 'ROW', 'COL',), zlib=True) + var.units = variable['units'] + var.long_name = str(variable['long_name']) + var.var_desc = str(variable['var_desc']) + var[:] = variable['data'] + + # ===== Global attributes ===== + global_attributes, order = global_attributes + for attribute in order: + netcdf.setncattr(attribute, global_attributes[attribute]) + + netcdf.close() + + def create_parallel_netcdf(self): + # TODO Documentation + """ + Create an empty netCDF + """ + st_time = timeit.default_timer() + settings.write_log("\tCreating parallel NetCDF file.", level=2) + # netcdf = Dataset(netcdf_path, mode='w', format="NETCDF4", parallel=True, comm=settings.comm, info=MPI.Info()) + netcdf = Dataset(self.path, mode='w', format="NETCDF4") + + # ===== Dimensions ===== + settings.write_log("\t\tCreating NetCDF dimensions.", level=2) + netcdf.createDimension('TSTEP', len(self.hours)) + # netcdf.createDimension('TSTEP', None) + settings.write_log("\t\t\t'TSTEP' dimension: {0}".format('UNLIMITED ({0})'.format(len(self.hours))), level=3) + + netcdf.createDimension('DATE-TIME', 2) + settings.write_log("\t\t\t'DATE-TIME' dimension: {0}".format(2), level=3) + + netcdf.createDimension('LAY', len(self.levels)) + settings.write_log("\t\t\t'LAY' dimension: {0}".format(len(self.levels)), level=3) + + netcdf.createDimension('VAR', len(self.variables_attributes)) + settings.write_log("\t\t\t'VAR' dimension: {0}".format(len(self.variables_attributes)), level=3) + + netcdf.createDimension('ROW', self.grid.center_latitudes.shape[0]) + settings.write_log("\t\t\t'ROW' dimension: {0}".format(self.grid.center_latitudes.shape[0]), level=3) + + netcdf.createDimension('COL', self.grid.center_longitudes.shape[1]) + settings.write_log("\t\t\t'COL' dimension: {0}".format(self.grid.center_longitudes.shape[1]), level=3) + + # ===== Variables ===== + settings.write_log("\t\tCreating NetCDF variables.", level=2) + tflag = netcdf.createVariable('TFLAG', 'i', ('TSTEP', 'VAR', 'DATE-TIME',)) + tflag.setncatts({'units': "{:<16}".format(''), 'long_name': "{:<16}".format('TFLAG'), + 'var_desc': "{:<80}".format('Timestep-valid flags: (1) YYYYDDD or (2) HHMMSS')}) + tflag[:] = self.create_tflag(self.date, self.hours, len(self.variables_attributes)) + settings.write_log("\t\t\t'TFLAG' variable created with size: {0}".format(tflag[:].shape), level=3) + + index = 0 + # data_list, var_list = self.change_variable_attributes(self.variables_attributes) + for var_name in self.variables_attributes.iterkeys(): + index += 1 + var = netcdf.createVariable(var_name, 'f', ('TSTEP', 'LAY', 'ROW', 'COL',), zlib=self.compress) + var.setncatts(self.variables_attributes[var_name]) + settings.write_log("\t\t\t'{0}' variable created with size: {1}".format(var_name, var[:].shape) + + "\n\t\t\t\t'{0}' variable will be filled later.".format(var_name), level=3) + + # ===== Global attributes ===== + settings.write_log("\t\tCreating NetCDF metadata.", level=2) + global_attributes = self.create_global_attributes(self.variables_attributes.keys()) + for attribute in self.global_attributes_order: + netcdf.setncattr(attribute, global_attributes[attribute]) + + netcdf.close() + + settings.write_time('WriterCmaq', 'create_parallel_netcdf', timeit.default_timer() - st_time, level=3) + + return True + + def write_serial_netcdf(self, emission_list): + """ + Write the netCDF in serial mode. + + :param emission_list: List of the processed emissions for the different emission inventories + :type emission_list: list + + :return: True when it finish well. + :rtype: bool + """ + st_time = timeit.default_timer() + + mpi_numpy = False + mpi_vector = True + + # Gathering the index + if mpi_numpy or mpi_vector: + rank_position = np.array([self.grid.x_lower_bound, self.grid.x_upper_bound, self.grid.y_lower_bound, + self.grid.y_upper_bound], dtype='i') + full_position = None + if settings.rank == 0: + full_position = np.empty([settings.size, 4], dtype='i') + settings.comm.Gather(rank_position, full_position, root=0) + + if settings.rank == 0: + netcdf = Dataset(self.path, mode='w', format="NETCDF4") + + # ===== Dimensions ===== + settings.write_log("\tCreating NetCDF file.", level=2) + settings.write_log("\t\tCreating NetCDF dimensions.", level=2) + netcdf.createDimension('TSTEP', len(self.hours)) + settings.write_log("\t\t\t'TSTEP' dimension: {0}".format(len(self.hours)), level=3) + netcdf.createDimension('DATE-TIME', 2) + settings.write_log("\t\t\t'DATE-TIME' dimension: {0}".format(2), level=3) + netcdf.createDimension('LAY', len(self.levels)) + settings.write_log("\t\t\t'LAY' dimension: {0}".format(len(self.levels)), level=3) + netcdf.createDimension('VAR', len(self.variables_attributes)) + settings.write_log("\t\t\t'VAR' dimension: {0}".format(len(self.variables_attributes)), level=3) + netcdf.createDimension('ROW', self.grid.center_latitudes.shape[0]) + settings.write_log("\t\t\t'ROW' dimension: {0}".format(self.grid.center_latitudes.shape[0]), level=3) + netcdf.createDimension('COL', self.grid.center_longitudes.shape[1]) + settings.write_log("\t\t\t'COL' dimension: {0}".format(self.grid.center_longitudes.shape[1]), level=3) + + # ===== Variables ===== + settings.write_log("\t\tCreating NetCDF variables.", level=2) + tflag = netcdf.createVariable('TFLAG', 'i', ('TSTEP', 'VAR', 'DATE-TIME',)) + tflag.setncatts({'units': "{:<16}".format(''), 'long_name': "{:<16}".format('TFLAG'), + 'var_desc': "{:<80}".format('Timestep-valid flags: (1) YYYYDDD or (2) HHMMSS')}) + tflag[:] = self.create_tflag(self.date, self.hours, len(self.variables_attributes)) + settings.write_log("\t\t\t'TFLAG' variable created with size: {0}".format(tflag[:].shape), level=3) + + full_shape = None + index = 0 + # data_list, var_list = self.change_variable_attributes(self.variables_attributes) + for var_name in self.variables_attributes.iterkeys(): + if settings.size != 1: + settings.write_log("\t\t\tGathering {0} data.".format(var_name), level=3) + rank_data = self.calculate_data_by_var(var_name, emission_list, self.grid.shape) + if mpi_numpy or mpi_vector: + if rank_data is not None: + root_shape = settings.comm.bcast(rank_data.shape, root=0) + if full_shape is None: + full_shape = settings.comm.allgather(rank_data.shape) + # print 'Rank {0} full_shape: {1}\n'.format(settings.rank, full_shape) + if mpi_numpy: + if settings.size != 1: + if settings.rank == 0: + recvbuf = np.empty((settings.size,) + rank_data.shape) + else: + recvbuf = None + if root_shape != rank_data.shape: + rank_data_aux = np.empty(root_shape) + rank_data_aux[:, :, :, :-1] = rank_data + rank_data = rank_data_aux + # print 'Rank {0} data.shape {1}'.format(settings.rank, rank_data.shape) + settings.comm.Gather(rank_data, recvbuf, root=0) + else: + recvbuf = rank_data + elif mpi_vector: + if rank_data is not None: + counts_i = self.tuple_to_index(full_shape) + rank_buff = [rank_data, counts_i[settings.rank]] + if settings.rank == 0: + displacements = self.calculate_displacements(counts_i) + recvdata = np.empty(sum(counts_i), dtype=settings.precision) + else: + displacements = None + recvdata = None + if settings.precision == np.float32: + recvbuf = [recvdata, counts_i, displacements, MPI.FLOAT] + elif settings.precision == np.float64: + recvbuf = [recvdata, counts_i, displacements, MPI.DOUBLE] + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError('ERROR: precision {0} unknown'.format(settings.precision)) + sys.exit(1) + + settings.comm.Gatherv(rank_buff, recvbuf, root=0) + + else: + if settings.size != 1: + data = settings.comm.gather(rank_data, root=0) + else: + data = rank_data + + if settings.rank == 0: + if not (mpi_numpy or mpi_vector): + if settings.size != 1: + try: + data = np.concatenate(data, axis=3) + except (UnboundLocalError, TypeError, IndexError): + data = 0 + st_time = timeit.default_timer() + index += 1 + + var = netcdf.createVariable(var_name, 'f', ('TSTEP', 'LAY', 'ROW', 'COL',), zlib=self.compress) + var.setncatts(self.variables_attributes[var_name]) + # var.units = variable['units'] + # var.long_name = str(variable['long_name']) + # var.var_desc = str(variable['var_desc']) + # var[:] = variable['data'] + + if mpi_numpy: + data = np.ones(var[:].shape, dtype=settings.precision) * 100 + for i in xrange(settings.size): + try: + if i == 0: + var[:, :, :, :full_position[i][3]] = recvbuf[i] + elif i == settings.size - 1: + var[:, :, :, full_position[i][2]:] = recvbuf[i, :, :, :, :-1] + else: + var[:, :, :, full_position[i][2]:full_position[i][3]] = \ + recvbuf[i, :, :, :, : full_shape[i][-1]] + except ValueError: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError("ERROR on i {0} ".format(i) + + "data shape: {0} ".format(data[:, :, :, full_position[i][2]:].shape) + + "recvbuf shape {0}".format(recvbuf[i].shape)) + sys.exit(1) + + elif mpi_vector: + if rank_data is not None: + data = np.empty(var[:].shape, dtype=settings.precision) + for i in xrange(settings.size): + # print 'Resizeing {0}'.format(i) + if not i == settings.size - 1: + data[:, :, full_position[i][0]:full_position[i][1], + full_position[i][2]:full_position[i][3]] = \ + np.array(recvbuf[0][displacements[i]: displacements[i + 1]]).reshape(full_shape[i]) + else: + data[:, :, full_position[i][0]:full_position[i][1], + full_position[i][2]:full_position[i][3]] = \ + np.array(recvbuf[0][displacements[i]:]).reshape(full_shape[i]) + else: + data = 0 + var[:] = data + else: + var[:] = data + settings.write_log("\t\t\t'{0}' variable created with size: {1}".format(var_name, var[:].shape), + level=3) + settings.write_log("\t\tCreating NetCDF metadata.", level=2) + if settings.rank == 0: + # ===== Global attributes ===== + global_attributes = self.create_global_attributes(self.variables_attributes.keys()) + for attribute in self.global_attributes_order: + netcdf.setncattr(attribute, global_attributes[attribute]) + + netcdf.close() + settings.write_time('WriterCmaq', 'write_serial_netcdf', timeit.default_timer() - st_time, level=3) + return True diff --git a/hermesv3_gr/modules/writing/writer_monarch.py b/hermesv3_gr/modules/writing/writer_monarch.py new file mode 100644 index 0000000000000000000000000000000000000000..3321b06c0f0945da74668f8e8d50d6c6bb503663 --- /dev/null +++ b/hermesv3_gr/modules/writing/writer_monarch.py @@ -0,0 +1,800 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import sys +import timeit +import numpy as np +from netCDF4 import Dataset +from mpi4py import MPI +from hermesv3_gr.modules.writing.writer import Writer +from hermesv3_gr.config import settings + + +class WriterMonarch(Writer): + """ + Class to Write the output file in CF-1.6 conventions. + + :param path: Path to the destination file. + :type path: str + + :param grid: Grid of the destination file. + :type grid: Grid + + :param levels: List with the levels of the grid. + :type levels: list + + :param date: Date of the output file + :type date: datetime.datetime + + :param hours: List with the timestamp hours. + :type hours: list. + + :param global_attributes_path: Path to the file that contains the static global attributes. + :type global_attributes_path: str + + :param compress: Indicates if you want to compress the netCDF variable data. + :type compress: bool + + :param parallel: Indicates if you want to write in parallel mode. + :type parallel. bool + """ + + def __init__(self, path, grid, levels, date, hours, global_attributes_path, compress=True, parallel=False): + super(WriterMonarch, self).__init__(path, grid, levels, date, hours, global_attributes_path, compress, parallel) + + # self.global_attributes = { + # 'nom_attribut': 'value_attribut' + # } + + def unit_change(self, variable, data): + """ + Do the unit conversions of the data. + + :param variable: Variable to convert. + :type variable: dict + + :param data: Data to change. + :type data: numpy.array + + :return: Data with the new units. + :rtype: numpy.array + """ + from cf_units import Unit + st_time = timeit.default_timer() + + if data is not None: + units = None + for var_name in self.variables_attributes: + if var_name == variable: + units = self.variables_attributes[var_name]['units'] + break + + if Unit(units).symbol == Unit('mol.s-1.m-2').symbol: + data = data * 1000 + elif Unit(units).symbol == Unit('kg.s-1.m-2').symbol: + pass + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError("The unit '{0}' of specie {1} is not defined correctly. ".format(units, variable) + + "Should be 'mol.s-1.m-2' or 'kg.s-1.m-2'") + sys.exit(1) + settings.write_time('WriterMonarch', 'unit_change', timeit.default_timer() - st_time, level=3) + return data + + def change_variable_attributes(self): + """ + Modify the emission list to be consistent to use the output as input for CMAQ model. + + :return: Emission list ready for CMAQ + :rtype: dict + """ + new_variable_dict = {} + for variable in self.variables_attributes: + new_variable_dict[variable['name']] = variable + del new_variable_dict[variable['name']]['name'] + + self.variables_attributes = new_variable_dict + + def create_parallel_netcdf(self): + """ + Create an empty netCDF4. + + :return: True at end. + :rtype: bool + """ + from cf_units import Unit, encode_time + + st_time = timeit.default_timer() + + RegularLatLon = False + Rotated = False + LambertConformalConic = False + if self.grid.grid_type == 'global': + RegularLatLon = True + elif self.grid.grid_type == 'rotated': + Rotated = True + elif self.grid.grid_type == 'lcc': + LambertConformalConic = True + + settings.write_log("\tCreating parallel NetCDF file.", level=2) + # netcdf = Dataset(netcdf_path, mode='w', format="NETCDF4", parallel=True, comm=settings.comm, info=MPI.Info()) + netcdf = Dataset(self.path, mode='w', format="NETCDF4") + # print 'NETCDF PATH: {0}'.format(netcdf_path) + + settings.write_log("\t\tCreating NetCDF dimensions.", level=2) + # ===== Dimensions ===== + if RegularLatLon: + var_dim = ('lat', 'lon',) + + # Latitude + if len(self.grid.center_latitudes.shape) == 1: + netcdf.createDimension('lat', self.grid.center_latitudes.shape[0]) + settings.write_log("\t\t\t'lat' dimension: {0}".format(self.grid.center_latitudes.shape[0]), level=3) + lat_dim = ('lat',) + elif len(self.grid.center_latitudes.shape) == 2: + netcdf.createDimension('lat', self.grid.center_latitudes.shape[0]) + settings.write_log("\t\t\t'lat' dimension: {0}".format(self.grid.center_latitudes.shape[0]), level=3) + lat_dim = ('lon', 'lat', ) + else: + print 'ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format( + len(self.grid.center_latitudes.shape)) + sys.exit(1) + + # Longitude + if len(self.grid.center_longitudes.shape) == 1: + netcdf.createDimension('lon', self.grid.center_longitudes.shape[0]) + settings.write_log("\t\t\t'lon' dimension: {0}".format(self.grid.center_longitudes.shape[0]), level=3) + lon_dim = ('lon',) + elif len(self.grid.center_longitudes.shape) == 2: + netcdf.createDimension('lon', self.grid.center_longitudes.shape[1]) + settings.write_log("\t\t\t'lon' dimension: {0}".format(self.grid.center_longitudes.shape[1]), level=3) + lon_dim = ('lon', 'lat', ) + else: + print 'ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format( + len(self.grid.center_longitudes.shape)) + sys.exit(1) + elif Rotated: + var_dim = ('rlat', 'rlon',) + + # Rotated Latitude + if self.grid.rlat is None: + print 'ERROR: For rotated grids is needed the rotated latitudes.' + sys.exit(1) + netcdf.createDimension('rlat', len(self.grid.rlat)) + settings.write_log("\t\t\t'rlat' dimension: {0}".format(len(self.grid.rlat)), level=3) + lat_dim = ('rlat', 'rlon',) + + # Rotated Longitude + if self.grid.rlon is None: + print 'ERROR: For rotated grids is needed the rotated longitudes.' + sys.exit(1) + netcdf.createDimension('rlon', len(self.grid.rlon)) + settings.write_log("\t\t\t'rlon' dimension: {0}".format(len(self.grid.rlon)), level=3) + lon_dim = ('rlat', 'rlon',) + + elif LambertConformalConic: + var_dim = ('y', 'x',) + + netcdf.createDimension('y', len(self.grid.y)) + settings.write_log("\t\t\t'y' dimension: {0}".format(len(self.grid.y)), level=3) + lat_dim = ('y', 'x', ) + + netcdf.createDimension('x', len(self.grid.x)) + settings.write_log("\t\t\t'x' dimension: {0}".format(len(self.grid.x)), level=3) + lon_dim = ('y', 'x', ) + else: + lat_dim = None + lon_dim = None + var_dim = None + + # Levels + if self.levels is not None: + netcdf.createDimension('lev', len(self.levels)) + settings.write_log("\t\t\t'lev' dimension: {0}".format(len(self.levels)), level=3) + + # Bounds + if self.grid.boundary_latitudes is not None: + # print boundary_latitudes.shape + # print len(boundary_latitudes[0, 0]) + netcdf.createDimension('nv', len(self.grid.boundary_latitudes[0, 0])) + settings.write_log("\t\t\t'nv' dimension: {0}".format(len(self.grid.boundary_latitudes[0, 0])), level=3) + # sys.exit() + + # Time + # netcdf.createDimension('time', None) + netcdf.createDimension('time', len(self.hours)) + settings.write_log("\t\t\t'time' dimension: {0}".format(len(self.hours)), level=3) + + # ===== Variables ===== + settings.write_log("\t\tCreating NetCDF variables.", level=2) + # Time + if self.date is None: + time = netcdf.createVariable('time', 'd', ('time',)) + time.units = "months since 2000-01-01 00:00:00" + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = [0.] + else: + time = netcdf.createVariable('time', 'd', ('time',)) + time.units = str(Unit('hours').offset_by_time(encode_time(self.date.year, self.date.month, self.date.day, + self.date.hour, self.date.minute, self.date.second))) + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + if settings.rank == 0: + time[:] = self.hours + settings.write_log("\t\t\t'time' variable created with size: {0}".format(time[:].shape), level=3) + + # Latitude + lats = netcdf.createVariable('lat', 'f', lat_dim, zlib=self.compress) + lats.units = "degrees_north" + lats.axis = "Y" + lats.long_name = "latitude coordinate" + lats.standard_name = "latitude" + if settings.rank == 0: + lats[:] = self.grid.center_latitudes + settings.write_log("\t\t\t'lat' variable created with size: {0}".format(lats[:].shape), level=3) + + if self.grid.boundary_latitudes is not None: + lats.bounds = "lat_bnds" + lat_bnds = netcdf.createVariable('lat_bnds', 'f', lat_dim + ('nv',), zlib=self.compress) + # print lat_bnds[:].shape, boundary_latitudes.shape + if settings.rank == 0: + lat_bnds[:] = self.grid.boundary_latitudes + settings.write_log("\t\t\t'lat_bnds' variable created with size: {0}".format(lat_bnds[:].shape), level=3) + + # Longitude + lons = netcdf.createVariable('lon', 'f', lon_dim, zlib=self.compress) + lons.units = "degrees_east" + lons.axis = "X" + lons.long_name = "longitude coordinate" + lons.standard_name = "longitude" + if settings.rank == 0: + lons[:] = self.grid.center_longitudes + settings.write_log("\t\t\t'lon' variable created with size: {0}".format(lons[:].shape), level=3) + + if self.grid.boundary_longitudes is not None: + lons.bounds = "lon_bnds" + lon_bnds = netcdf.createVariable('lon_bnds', 'f', lon_dim + ('nv',), zlib=self.compress) + # print lon_bnds[:].shape, boundary_longitudes.shape + if settings.rank == 0: + lon_bnds[:] = self.grid.boundary_longitudes + settings.write_log("\t\t\t'lon_bnds' variable created with size: {0}".format(lon_bnds[:].shape), level=3) + + if Rotated: + # Rotated Latitude + rlat = netcdf.createVariable('rlat', 'f', ('rlat',), zlib=self.compress) + rlat.long_name = "latitude in rotated pole grid" + rlat.units = Unit("degrees").symbol + rlat.standard_name = "grid_latitude" + if settings.rank == 0: + rlat[:] = self.grid.rlat + settings.write_log("\t\t\t'rlat' variable created with size: {0}".format(rlat[:].shape), level=3) + + # Rotated Longitude + rlon = netcdf.createVariable('rlon', 'f', ('rlon',), zlib=self.compress) + rlon.long_name = "longitude in rotated pole grid" + rlon.units = Unit("degrees").symbol + rlon.standard_name = "grid_longitude" + if settings.rank == 0: + rlon[:] = self.grid.rlon + settings.write_log("\t\t\t'rlon' variable created with size: {0}".format(rlon[:].shape), level=3) + if LambertConformalConic: + x_var = netcdf.createVariable('x', 'd', ('x',), zlib=self.compress) + x_var.units = Unit("km").symbol + x_var.long_name = "x coordinate of projection" + x_var.standard_name = "projection_x_coordinate" + if settings.rank == 0: + x_var[:] = self.grid.x + settings.write_log("\t\t\t'x' variable created with size: {0}".format(x_var[:].shape), level=3) + + y_var = netcdf.createVariable('y', 'd', ('y',), zlib=self.compress) + y_var.units = Unit("km").symbol + y_var.long_name = "y coordinate of projection" + y_var.standard_name = "projection_y_coordinate" + if settings.rank == 0: + y_var[:] = self.grid.y + settings.write_log("\t\t\t'y' variable created with size: {0}".format(y_var[:].shape), level=3) + + cell_area_dim = var_dim + # Levels + if self.levels is not None: + var_dim = ('lev',) + var_dim + lev = netcdf.createVariable('lev', 'f', ('lev',), zlib=self.compress) + lev.units = Unit("m").symbol + lev.positive = 'up' + if settings.rank == 0: + lev[:] = self.levels + settings.write_log("\t\t\t'lev' variable created with size: {0}".format(lev[:].shape), level=3) + # print 'DATA LIIIIST {0}'.format(data_list) + # # All variables + if len(self.variables_attributes) is 0: + var = netcdf.createVariable('aux_var', 'f', ('time',) + var_dim, zlib=self.compress) + if settings.rank == 0: + var[:] = 0 + + index = 0 + for var_name, variable in self.variables_attributes.iteritems(): + index += 1 + + var = netcdf.createVariable(var_name, 'f', ('time',) + var_dim, zlib=self.compress) + + var.units = Unit(variable['units']).symbol + if 'long_name' in variable: + var.long_name = str(variable['long_name']) + if 'standard_name' in variable: + var.standard_name = str(variable['standard_name']) + if 'cell_method' in variable: + var.cell_method = str(variable['cell_method']) + var.coordinates = "lat lon" + if self.grid.cell_area is not None: + var.cell_measures = 'area: cell_area' + if RegularLatLon: + var.grid_mapping = 'crs' + elif Rotated: + var.grid_mapping = 'rotated_pole' + elif LambertConformalConic: + var.grid_mapping = 'Lambert_conformal' + settings.write_log("\t\t\t'{0}' variable created with size: {1}".format(var_name, var[:].shape) + + "\n\t\t\t\t'{0}' variable will be filled later.".format(var_name), level=3) + + settings.write_log("\t\tCreating NetCDF metadata.", level=2) + # Grid mapping + if RegularLatLon: + # CRS + mapping = netcdf.createVariable('crs', 'i') + mapping.grid_mapping_name = "latitude_longitude" + mapping.semi_major_axis = 6371000.0 + mapping.inverse_flattening = 0 + elif Rotated: + # Rotated pole + mapping = netcdf.createVariable('rotated_pole', 'c') + mapping.grid_mapping_name = 'rotated_latitude_longitude' + mapping.grid_north_pole_latitude = self.grid.new_pole_latitude_degrees + mapping.grid_north_pole_longitude = 90 - self.grid.new_pole_longitude_degrees + elif LambertConformalConic: + # CRS + mapping = netcdf.createVariable('Lambert_conformal', 'i') + mapping.grid_mapping_name = "lambert_conformal_conic" + mapping.standard_parallel = "{0}, {1}".format(self.grid.lat_1, self.grid.lat_2) + mapping.longitude_of_central_meridian = self.grid.lon_0 + mapping.latitude_of_projection_origin = self.grid.lat_0 + + # Cell area + if self.grid.cell_area is not None: + c_area = netcdf.createVariable('cell_area', 'f', cell_area_dim) + c_area.long_name = "area of the grid cell" + c_area.standard_name = "cell_area" + c_area.units = Unit("m2").symbol + # print c_area[:].shape, cell_area.shape + # c_area[grid.x_lower_bound:grid.x_upper_bound, grid.y_lower_bound:grid.y_upper_bound] = cell_area + + if self.global_attributes is not None: + netcdf.setncatts(self.global_attributes) + + netcdf.close() + + settings.write_time('WriterMonarch', 'create_parallel_netcdf', timeit.default_timer() - st_time, level=3) + return True + + def write_serial_netcdf(self, emission_list,): + """ + Write the netCDF4 file in serial mode. + + :param emission_list: Data to append. + :type emission_list: list + + :return: True at end. + :rtype: bool + """ + from cf_units import Unit, encode_time + + st_time = timeit.default_timer() + + mpi_numpy = False + mpi_vector = True + + # Gathering the index + if mpi_numpy or mpi_vector: + rank_position = np.array([self.grid.x_lower_bound, self.grid.x_upper_bound, self.grid.y_lower_bound, + self.grid.y_upper_bound], dtype='i') + full_position = None + if settings.rank == 0: + full_position = np.empty([settings.size, 4], dtype='i') + settings.comm.Gather(rank_position, full_position, root=0) + + if settings.rank == 0: + + regular_latlon = False + rotated = False + lcc = False + + if self.grid.grid_type == 'global': + regular_latlon = True + elif self.grid.grid_type == 'rotated': + rotated = True + elif self.grid.grid_type == 'lcc': + lcc = True + settings.write_log("\tCreating NetCDF file.", level=2) + netcdf = Dataset(self.path, mode='w', format="NETCDF4") + + # ===== Dimensions ===== + settings.write_log("\t\tCreating NetCDF dimensions.", level=2) + if regular_latlon: + var_dim = ('lat', 'lon',) + + # Latitude + if len(self.grid.center_latitudes.shape) == 1: + settings.write_log("\t\t\t'lat' dimension: {0}".format(self.grid.center_latitudes.shape[0]), + level=3) + netcdf.createDimension('lat', self.grid.center_latitudes.shape[0]) + lat_dim = ('lat',) + elif len(self.grid.center_latitudes.shape) == 2: + settings.write_log("\t\t\t'lat' dimension: {0}".format(self.grid.center_latitudes.shape[0]), + level=3) + netcdf.createDimension('lat', self.grid.center_latitudes.shape[0]) + lat_dim = ('lon', 'lat', ) + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError( + 'ERROR: Latitudes must be on a 1D or 2D array instead of {0} shape.'.format( + len(self.grid.center_latitudes.shape))) + sys.exit(1) + + # Longitude + if len(self.grid.center_longitudes.shape) == 1: + settings.write_log("\t\t\t'lon' dimension: {0}".format(self.grid.center_longitudes.shape[0]), + level=3) + netcdf.createDimension('lon', self.grid.center_longitudes.shape[0]) + lon_dim = ('lon',) + elif len(self.grid.center_longitudes.shape) == 2: + settings.write_log("\t\t\t'lon' dimension: {0}".format(self.grid.center_longitudes.shape[0]), + level=3) + netcdf.createDimension('lon', self.grid.center_longitudes.shape[1]) + lon_dim = ('lon', 'lat', ) + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError( + 'ERROR: Longitudes must be on a 1D or 2D array instead of {0} shape.'.format( + len(self.grid.center_longitudes.shape))) + sys.exit(1) + elif rotated: + var_dim = ('rlat', 'rlon',) + + # rotated Latitude + if self.grid.rlat is None: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError('ERROR: For rotated grids is needed the rotated latitudes.') + sys.exit(1) + settings.write_log("\t\t'rlat' dimension: {0}".format(len(self.grid.rlat)), level=2) + netcdf.createDimension('rlat', len(self.grid.rlat)) + lat_dim = ('rlat', 'rlon',) + + # rotated Longitude + if self.grid.rlon is None: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError('ERROR: For rotated grids is needed the rotated longitudes.') + sys.exit(1) + settings.write_log("\t\t\t'rlon' dimension: {0}".format(len(self.grid.rlon)), level=3) + netcdf.createDimension('rlon', len(self.grid.rlon)) + lon_dim = ('rlat', 'rlon',) + + elif lcc: + var_dim = ('y', 'x',) + settings.write_log("\t\t\t'y' dimension: {0}".format(len(self.grid.y)), level=3) + netcdf.createDimension('y', len(self.grid.y)) + lat_dim = ('y', 'x', ) + settings.write_log("\t\t\t'x' dimension: {0}".format(len(self.grid.x)), level=3) + netcdf.createDimension('x', len(self.grid.x)) + lon_dim = ('y', 'x', ) + else: + lat_dim = None + lon_dim = None + var_dim = None + + # Levels + if self.levels is not None: + settings.write_log("\t\t\t'lev' dimension: {0}".format(len(self.levels)), level=3) + netcdf.createDimension('lev', len(self.levels)) + + # Bounds + if self.grid.boundary_latitudes is not None: + settings.write_log("\t\t\t'nv' dimension: {0}".format(len(self.grid.boundary_latitudes[0, 0])), level=3) + netcdf.createDimension('nv', len(self.grid.boundary_latitudes[0, 0])) + + # Time + settings.write_log("\t\t\t'time' dimension: {0}".format(len(self.hours)), level=3) + netcdf.createDimension('time', len(self.hours)) + + # ===== Variables ===== + settings.write_log("\t\tCreating NetCDF variables.", level=2) + # Time + if self.date is None: + time = netcdf.createVariable('time', 'd', ('time',)) + time.units = "months since 2000-01-01 00:00:00" + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = [0.] + else: + time = netcdf.createVariable('time', 'd', ('time',)) + time.units = str(Unit('hours').offset_by_time(encode_time( + self.date.year, self.date.month, self.date.day, self.date.hour, self.date.minute, + self.date.second))) + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = self.hours + settings.write_log("\t\t\t'time' variable created with size: {0}".format(time[:].shape), level=3) + + # Latitude + lats = netcdf.createVariable('lat', 'f', lat_dim, zlib=self.compress) + lats.units = "degrees_north" + lats.axis = "Y" + lats.long_name = "latitude coordinate" + lats.standard_name = "latitude" + lats[:] = self.grid.center_latitudes + settings.write_log("\t\t\t'lat' variable created with size: {0}".format(lats[:].shape), level=3) + + if self.grid.boundary_latitudes is not None: + lats.bounds = "lat_bnds" + lat_bnds = netcdf.createVariable('lat_bnds', 'f', lat_dim + ('nv',), zlib=self.compress) + # print lat_bnds[:].shape, boundary_latitudes.shape + lat_bnds[:] = self.grid.boundary_latitudes + settings.write_log( + "\t\t\t'lat_bnds' variable created with size: {0}".format(lat_bnds[:].shape), level=3) + + # Longitude + lons = netcdf.createVariable('lon', 'f', lon_dim, zlib=self.compress) + lons.units = "degrees_east" + lons.axis = "X" + lons.long_name = "longitude coordinate" + lons.standard_name = "longitude" + lons[:] = self.grid.center_longitudes + settings.write_log("\t\t\t'lon' variable created with size: {0}".format(lons[:].shape), + level=3) + + if self.grid.boundary_longitudes is not None: + lons.bounds = "lon_bnds" + lon_bnds = netcdf.createVariable('lon_bnds', 'f', lon_dim + ('nv',), zlib=self.compress) + # print lon_bnds[:].shape, boundary_longitudes.shape + lon_bnds[:] = self.grid.boundary_longitudes + settings.write_log( + "\t\t\t'lon_bnds' variable created with size: {0}".format(lon_bnds[:].shape), level=3) + + if rotated: + # rotated Latitude + rlat = netcdf.createVariable('rlat', 'f', ('rlat',), zlib=self.compress) + rlat.long_name = "latitude in rotated pole grid" + rlat.units = Unit("degrees").symbol + rlat.standard_name = "grid_latitude" + rlat[:] = self.grid.rlat + settings.write_log("\t\t\t'rlat' variable created with size: {0}".format(rlat[:].shape), level=3) + + # rotated Longitude + rlon = netcdf.createVariable('rlon', 'f', ('rlon',), zlib=self.compress) + rlon.long_name = "longitude in rotated pole grid" + rlon.units = Unit("degrees").symbol + rlon.standard_name = "grid_longitude" + rlon[:] = self.grid.rlon + settings.write_log("\t\t\t'rlon' variable created with size: {0}".format(rlon[:].shape), level=3) + if lcc: + x_var = netcdf.createVariable('x', 'd', ('x',), zlib=self.compress) + x_var.units = Unit("km").symbol + x_var.long_name = "x coordinate of projection" + x_var.standard_name = "projection_x_coordinate" + x_var[:] = self.grid.x + settings.write_log("\t\t\t'x' variable created with size: {0}".format(x_var[:].shape), level=3) + + y_var = netcdf.createVariable('y', 'd', ('y',), zlib=self.compress) + y_var.units = Unit("km").symbol + y_var.long_name = "y coordinate of projection" + y_var.standard_name = "projection_y_coordinate" + y_var[:] = self.grid.y + settings.write_log("\t\t\t'y' variable created with size: {0}".format(y_var[:].shape), level=3) + + cell_area_dim = var_dim + # Levels + if self.levels is not None: + var_dim = ('lev',) + var_dim + lev = netcdf.createVariable('lev', 'f', ('lev',), zlib=self.compress) + lev.units = Unit("m").symbol + lev.positive = 'up' + lev[:] = self.levels + settings.write_log("\t\t\t'lev' variable created with size: {0}".format(lev[:].shape), level=3) + + if len(self.variables_attributes) is 0: + var = netcdf.createVariable('aux_var', 'f', ('time',) + var_dim, zlib=self.compress) + var[:] = 0 + + full_shape = None + index = 0 + for var_name in self.variables_attributes.iterkeys(): + if settings.size != 1: + settings.write_log("\t\t\tGathering {0} data.".format(var_name), level=3) + rank_data = self.calculate_data_by_var(var_name, emission_list, self.grid.shape) + if mpi_numpy or mpi_vector: + if rank_data is not None: + root_shape = settings.comm.bcast(rank_data.shape, root=0) + if full_shape is None: + full_shape = settings.comm.allgather(rank_data.shape) + # print 'Rank {0} full_shape: {1}\n'.format(settings.rank, full_shape) + + if mpi_numpy: + if settings.size != 1: + if settings.rank == 0: + recvbuf = np.empty((settings.size,) + rank_data.shape) + else: + recvbuf = None + if root_shape != rank_data.shape: + rank_data_aux = np.empty(root_shape) + rank_data_aux[:, :, :, :-1] = rank_data + rank_data = rank_data_aux + # print 'Rank {0} data.shape {1}'.format(settings.rank, rank_data.shape) + settings.comm.Gather(rank_data, recvbuf, root=0) + else: + recvbuf = rank_data + elif mpi_vector: + if rank_data is not None: + counts_i = self.tuple_to_index(full_shape) + rank_buff = [rank_data, counts_i[settings.rank]] + if settings.rank == 0: + displacements = self.calculate_displacements(counts_i) + recvdata = np.empty(sum(counts_i), dtype=settings.precision) + else: + displacements = None + recvdata = None + if settings.precision == np.float32: + recvbuf = [recvdata, counts_i, displacements, MPI.FLOAT] + elif settings.precision == np.float64: + recvbuf = [recvdata, counts_i, displacements, MPI.DOUBLE] + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError('ERROR: precision {0} unknown'.format(settings.precision)) + sys.exit(1) + + settings.comm.Gatherv(rank_buff, recvbuf, root=0) + + else: + if settings.size != 1: + data = settings.comm.gather(rank_data, root=0) + else: + data = rank_data + + if settings.rank == 0: + if not (mpi_numpy or mpi_vector): + if settings.size != 1: + try: + data = np.concatenate(data, axis=3) + except (UnboundLocalError, TypeError, IndexError): + data = 0 + index += 1 + var = netcdf.createVariable(var_name, 'f', ('time',) + var_dim, zlib=self.compress) + + var.units = Unit(self.variables_attributes[var_name]['units']).symbol + + if 'long_name' in self.variables_attributes[var_name]: + var.long_name = str(self.variables_attributes[var_name]['long_name']) + + if 'standard_name' in self.variables_attributes[var_name]: + var.standard_name = str(self.variables_attributes[var_name]['standard_name']) + + if 'cell_method' in self.variables_attributes[var_name]: + var.cell_method = str(self.variables_attributes[var_name]['cell_method']) + + var.coordinates = "lat lon" + + if self.grid.cell_area is not None: + var.cell_measures = 'area: cell_area' + if regular_latlon: + var.grid_mapping = 'crs' + elif rotated: + var.grid_mapping = 'rotated_pole' + elif lcc: + var.grid_mapping = 'Lambert_conformal' + + if mpi_numpy: + data = np.ones(var[:].shape, dtype=settings.precision) * 100 + for i in xrange(settings.size): + try: + if i == 0: + var[:, :, :, :full_position[i][3]] = recvbuf[i] + elif i == settings.size - 1: + var[:, :, :, full_position[i][2]:] = recvbuf[i, :, :, :, :-1] + else: + var[:, :, :, full_position[i][2]:full_position[i][3]] = \ + recvbuf[i, :, :, :, : full_shape[i][-1]] + except ValueError: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError("ERROR on i {0} ".format(i) + + "data shape: {0} ".format(data[:, :, :, full_position[i][2]:].shape) + + "recvbuf shape {0}".format(recvbuf[i].shape)) + sys.exit(1) + + elif mpi_vector: + if rank_data is not None: + data = np.empty(var[:].shape, dtype=settings.precision) + for i in xrange(settings.size): + if not i == settings.size - 1: + data[:, :, full_position[i][0]:full_position[i][1], + full_position[i][2]:full_position[i][3]] = \ + np.array(recvbuf[0][displacements[i]: displacements[i + 1]]).reshape(full_shape[i]) + else: + data[:, :, full_position[i][0]:full_position[i][1], + full_position[i][2]:full_position[i][3]] = \ + np.array(recvbuf[0][displacements[i]:]).reshape(full_shape[i]) + else: + data = 0 + var[:] = data + else: + var[:] = data + settings.write_log("\t\t\t'{0}' variable created with size: {1}".format(var_name, var[:].shape), + level=3) + settings.write_log("\t\tCreating NetCDF metadata.", level=2) + if settings.rank == 0: + # Grid mapping + if regular_latlon: + # CRS + mapping = netcdf.createVariable('crs', 'i') + mapping.grid_mapping_name = "latitude_longitude" + mapping.semi_major_axis = 6371000.0 + mapping.inverse_flattening = 0 + elif rotated: + # rotated pole + mapping = netcdf.createVariable('rotated_pole', 'c') + mapping.grid_mapping_name = 'rotated_latitude_longitude' + mapping.grid_north_pole_latitude = 90 - self.grid.new_pole_latitude_degrees + mapping.grid_north_pole_longitude = self.grid.new_pole_longitude_degrees + elif lcc: + # CRS + mapping = netcdf.createVariable('Lambert_conformal', 'i') + mapping.grid_mapping_name = "lambert_conformal_conic" + mapping.standard_parallel = "{0}, {1}".format(self.grid.lat_1, self.grid.lat_2) + mapping.longitude_of_central_meridian = self.grid.lon_0 + mapping.latitude_of_projection_origin = self.grid.lat_0 + + if self.grid.cell_area is not None: + cell_area = settings.comm.gather(self.grid.cell_area, root=0) + if settings.rank == 0: + # Cell area + if self.grid.cell_area is not None: + c_area = netcdf.createVariable('cell_area', 'f', cell_area_dim) + c_area.long_name = "area of the grid cell" + c_area.standard_name = "cell_area" + c_area.units = Unit("m2").symbol + + cell_area = np.concatenate(cell_area, axis=1) + + c_area[:] = cell_area + + if settings.rank == 0: + if self.global_attributes is not None: + netcdf.setncatts(self.global_attributes) + if settings.rank == 0: + netcdf.close() + settings.write_time('WriterMonarch', 'write_serial_netcdf', timeit.default_timer() - st_time, level=3) diff --git a/hermesv3_gr/modules/writing/writer_wrf_chem.py b/hermesv3_gr/modules/writing/writer_wrf_chem.py new file mode 100644 index 0000000000000000000000000000000000000000..5cf01cf0dd1cebf6cd8743b1eb1aa4bbd0d7f47d --- /dev/null +++ b/hermesv3_gr/modules/writing/writer_wrf_chem.py @@ -0,0 +1,485 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import sys +import timeit +import numpy as np +from netCDF4 import Dataset +from mpi4py import MPI +from hermesv3_gr.config import settings +from hermesv3_gr.modules.writing.writer import Writer + + +class WriterWrfChem(Writer): + """ + Class to Write the output file for the WRF-CHEM Chemical Transport Model. + + :param path: Path to the destination file. + :type path: str + + :param grid: Grid of the destination file. + :type grid: Grid + + :param levels: List with the levels of the grid. + :type levels: list + + :param date: Date of the output file + :type date: datetime.datetime + + :param hours: List with the timestamp hours. + :type hours: list. + + :param global_attributes_path: Path to the file that contains the static global attributes. + :type global_attributes_path: str + + :param compress: Indicates if you want to compress the netCDF variable data. + :type compress: bool + + :param parallel: Indicates if you want to write in parallel mode. + :type parallel. bool + """ + + def __init__(self, path, grid, levels, date, hours, global_attributes_path, compress=True, parallel=False): + super(WriterWrfChem, self).__init__(path, grid, levels, date, hours, global_attributes_path, compress, parallel) + + self.global_attributes_order = [ + 'TITLE', 'START_DATE', 'WEST-EAST_GRID_DIMENSION', 'SOUTH-NORTH_GRID_DIMENSION', + 'BOTTOM-TOP_GRID_DIMENSION', 'DX', 'DY', 'GRIDTYPE', 'DIFF_OPT', 'KM_OPT', 'DAMP_OPT', 'DAMPCOEF', 'KHDIF', + 'KVDIF', 'MP_PHYSICS', 'RA_LW_PHYSICS', 'RA_SW_PHYSICS', 'SF_SFCLAY_PHYSICS', 'SF_SURFACE_PHYSICS', + 'BL_PBL_PHYSICS', 'CU_PHYSICS', 'SF_LAKE_PHYSICS', 'SURFACE_INPUT_SOURCE', 'SST_UPDATE', 'GRID_FDDA', + 'GFDDA_INTERVAL_M', 'GFDDA_END_H', 'GRID_SFDDA', 'SGFDDA_INTERVAL_M', 'SGFDDA_END_H', + 'WEST-EAST_PATCH_START_UNSTAG', 'WEST-EAST_PATCH_END_UNSTAG', 'WEST-EAST_PATCH_START_STAG', + 'WEST-EAST_PATCH_END_STAG', 'SOUTH-NORTH_PATCH_START_UNSTAG', 'SOUTH-NORTH_PATCH_END_UNSTAG', + 'SOUTH-NORTH_PATCH_START_STAG', 'SOUTH-NORTH_PATCH_END_STAG', 'BOTTOM-TOP_PATCH_START_UNSTAG', + 'BOTTOM-TOP_PATCH_END_UNSTAG', 'BOTTOM-TOP_PATCH_START_STAG', 'BOTTOM-TOP_PATCH_END_STAG', 'GRID_ID', + 'PARENT_ID', 'I_PARENT_START', 'J_PARENT_START', 'PARENT_GRID_RATIO', 'DT', 'CEN_LAT', 'CEN_LON', + 'TRUELAT1', 'TRUELAT2', 'MOAD_CEN_LAT', 'STAND_LON', 'POLE_LAT', 'POLE_LON', 'GMT', 'JULYR', 'JULDAY', + 'MAP_PROJ', 'MMINLU', 'NUM_LAND_CAT', 'ISWATER', 'ISLAKE', 'ISICE', 'ISURBAN', 'ISOILWATER'] + + def unit_change(self, variable, data): + # TODO Documentation + """ + + :param variable: + :param data: + :return: + """ + from cf_units import Unit + + if data is not None: + units = None + for var_name in self.variables_attributes: + if var_name == variable: + units = self.variables_attributes[var_name]['units'] + break + + if Unit(units).symbol == Unit('mol.h-1.km-2').symbol: + # 10e6 -> from m2 to km2 + # 10e3 -> from kmol to mol + # 3600n -> from s to h + data = data * 10e6 * 10e3 * 3600 + elif Unit(units).symbol == Unit('ug.s-1.m-2').symbol: + # 10e9 -> from kg to ug + data = data * 10e9 + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError("The unit '{0}' of specie {1} is not defined correctly.".format(units, variable) + + " Should be 'mol.h-1.km-2' or 'ug.s-1.m-2'") + sys.exit(1) + return data + + def change_variable_attributes(self): + # TODO Documentation + """ + + :return: + """ + from cf_units import Unit + + new_variable_dict = {} + for variable in self.variables_attributes: + if Unit(variable['units']).symbol == Unit('mol.h-1.km-2').symbol: + new_variable_dict[variable['name']] = { + 'FieldType': np.int32(104), + 'MemoryOrder': "XYZ", + 'description': "EMISSIONS", + 'units': "mol km^-2 hr^-1", + 'stagger': "", + 'coordinates': "XLONG XLAT" + } + elif Unit(variable['units']).symbol == Unit('ug.s-1.m-2').symbol: + new_variable_dict[variable['name']] = { + 'FieldType': np.int32(104), + 'MemoryOrder': "XYZ", + 'description': "EMISSIONS", + 'units': "ug/m3 m/s", + 'stagger': "", + 'coordinates': "XLONG XLAT" + } + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError("The unit '{0}' of specie {1} is not ".format(variable['units'], variable['name']) + + "defined correctly. Should be 'mol.h-1.km-2' or 'ug.s-1.m-2'") + sys.exit(1) + + self.variables_attributes = new_variable_dict + + def read_global_attributes(self): + # TODO Documentation + """ + + :return: + """ + import pandas as pd + from warnings import warn as warning + + float_atts = ['DAMPCOEF', 'KHDIF', 'KVDIF', 'CEN_LAT', 'CEN_LON', 'DT'] + int_atts = ['BOTTOM-TOP_GRID_DIMENSION', 'DIFF_OPT', 'KM_OPT', 'DAMP_OPT', + 'MP_PHYSICS', 'RA_LW_PHYSICS', 'RA_SW_PHYSICS', 'SF_SFCLAY_PHYSICS', 'SF_SURFACE_PHYSICS', + 'BL_PBL_PHYSICS', 'CU_PHYSICS', 'SF_LAKE_PHYSICS', 'SURFACE_INPUT_SOURCE', 'SST_UPDATE', + 'GRID_FDDA', 'GFDDA_INTERVAL_M', 'GFDDA_END_H', 'GRID_SFDDA', 'SGFDDA_INTERVAL_M', 'SGFDDA_END_H', + 'BOTTOM-TOP_PATCH_START_UNSTAG', 'BOTTOM-TOP_PATCH_END_UNSTAG', 'BOTTOM-TOP_PATCH_START_STAG', + 'BOTTOM-TOP_PATCH_END_STAG', 'GRID_ID', 'PARENT_ID', 'I_PARENT_START', 'J_PARENT_START', + 'PARENT_GRID_RATIO', 'NUM_LAND_CAT', 'ISWATER', 'ISLAKE', 'ISICE', 'ISURBAN', 'ISOILWATER', + 'HISTORY'] + str_atts = ['GRIDTYPE', 'MMINLU'] + if self.grid.grid_type == 'lcc': + lat_ts = np.float32(self.grid.lat_0) + elif self.grid.grid_type == 'mercator': + lat_ts = np.float32(self.grid.lat_ts) + + atts_dict = { + 'BOTTOM-TOP_GRID_DIMENSION': np.int32(45), + 'GRIDTYPE': 'C', + 'DIFF_OPT': np.int32(1), + 'KM_OPT': np.int32(4), + 'DAMP_OPT': np.int32(3), + 'DAMPCOEF': np.float32(0.2), + 'KHDIF': np.float32(0.), + 'KVDIF': np.float32(0.), + 'MP_PHYSICS': np.int32(6), + 'RA_LW_PHYSICS': np.int32(4), + 'RA_SW_PHYSICS': np.int32(4), + 'SF_SFCLAY_PHYSICS': np.int32(2), + 'SF_SURFACE_PHYSICS': np.int32(2), + 'BL_PBL_PHYSICS': np.int32(8), + 'CU_PHYSICS': np.int32(0), + 'SF_LAKE_PHYSICS': np.int32(0), + 'SURFACE_INPUT_SOURCE': np.int32(1), + 'SST_UPDATE': np.int32(0), + 'GRID_FDDA': np.int32(0), + 'GFDDA_INTERVAL_M': np.int32(0), + 'GFDDA_END_H': np.int32(0), + 'GRID_SFDDA': np.int32(0), + 'SGFDDA_INTERVAL_M': np.int32(0), + 'SGFDDA_END_H': np.int32(0), + 'BOTTOM-TOP_PATCH_START_UNSTAG': np.int32(1), + 'BOTTOM-TOP_PATCH_END_UNSTAG': np.int32(44), + 'BOTTOM-TOP_PATCH_START_STAG': np.int32(1), + 'BOTTOM-TOP_PATCH_END_STAG': np.int32(45), + 'GRID_ID': np.int32(1), + 'PARENT_ID': np.int32(0), + 'I_PARENT_START': np.int32(1), + 'J_PARENT_START': np.int32(1), + 'PARENT_GRID_RATIO': np.int32(1), + 'DT': np.float32(18.), + 'MMINLU': 'MODIFIED_IGBP_MODIS_NOAH', + 'NUM_LAND_CAT': np.int32(41), + 'ISWATER': np.int32(17), + 'ISLAKE': np.int32(-1), + 'ISICE': np.int32(15), + 'ISURBAN': np.int32(13), + 'ISOILWATER': np.int32(14), + 'CEN_LAT': lat_ts, + 'CEN_LON': np.float32(self.grid.lon_0) + } + + if self.global_attributes_path is not None: + df = pd.read_csv(self.global_attributes_path) + + for att in atts_dict.iterkeys(): + try: + if att in int_atts: + atts_dict[att] = np.int32(df.loc[df['attribute'] == att, 'value'].item()) + elif att in float_atts: + atts_dict[att] = np.float32(df.loc[df['attribute'] == att, 'value'].item()) + elif att in str_atts: + atts_dict[att] = str(df.loc[df['attribute'] == att, 'value'].item()) + except ValueError: + print 'A warning has occurred. Check the .err file to get more information.' + if settings.rank == 0: + warning('The global attribute {0} is not defined; Using default value {1}'.format( + att, atts_dict[att])) + + else: + settings.write_log('WARNING: Check the .err file to get more information.') + message = 'WARNING: No output attributes defined, check the output_attributes' + message += ' parameter of the configuration file.\nUsing default values:' + for key, value in atts_dict.iteritems(): + message += '\n\t{0} = {1}'.format(key, value) + if settings.rank == 0: + warning(message) + + return atts_dict + + def create_global_attributes(self): + # TODO Documentation + """ + Create the global attributes that have to be filled. + """ + + global_attributes = self.read_global_attributes() + + global_attributes['TITLE'] = 'Emissions generated by HERMESv3_GR.' + global_attributes['START_DATE'] = self.date.strftime("%Y-%m-%d_%H:%M:%S") + global_attributes['JULYR'] = np.int32(self.date.year) + global_attributes['JULDAY'] = np.int32(self.date.strftime("%j")) + global_attributes['GMT'] = np.float32(self.date.hour) + global_attributes['HISTORY'] = \ + 'Code developed by Barcelona Supercomputing Center (BSC, https://www.bsc.es/). ' + \ + 'Developer: Carles Tena Medina (carles.tena@bsc.es). ' + \ + 'Reference: Guevara et al., 2018, GMD., in preparation.' + + if self.grid.grid_type == 'lcc' or self.grid.grid_type == 'mercator': + global_attributes['WEST-EAST_GRID_DIMENSION'] = np.int32(self.grid.nx + 1) + global_attributes['SOUTH-NORTH_GRID_DIMENSION'] = np.int32(self.grid.ny + 1) + global_attributes['DX'] = np.float32(self.grid.inc_x) + global_attributes['DY'] = np.float32(self.grid.inc_y) + global_attributes['SURFACE_INPUT_SOURCE'] = np.int32(1) + global_attributes['WEST-EAST_PATCH_START_UNSTAG'] = np.int32(1) + global_attributes['WEST-EAST_PATCH_END_UNSTAG'] = np.int32(self.grid.nx) + global_attributes['WEST-EAST_PATCH_START_STAG'] = np.int32(1) + global_attributes['WEST-EAST_PATCH_END_STAG'] = np.int32(self.grid.nx + 1) + global_attributes['SOUTH-NORTH_PATCH_START_UNSTAG'] = np.int32(1) + global_attributes['SOUTH-NORTH_PATCH_END_UNSTAG'] = np.int32(self.grid.ny) + global_attributes['SOUTH-NORTH_PATCH_START_STAG'] = np.int32(1) + global_attributes['SOUTH-NORTH_PATCH_END_STAG'] = np.int32(self.grid.ny + 1) + + global_attributes['POLE_LAT'] = np.float32(90) + global_attributes['POLE_LON'] = np.float32(0) + + if self.grid.grid_type == 'lcc': + global_attributes['MAP_PROJ'] = np.int32(1) + global_attributes['TRUELAT1'] = np.float32(self.grid.lat_1) + global_attributes['TRUELAT2'] = np.float32(self.grid.lat_2) + global_attributes['MOAD_CEN_LAT'] = np.float32(self.grid.lat_0) + global_attributes['STAND_LON'] = np.float32(self.grid.lon_0) + elif self.grid.grid_type == 'mercator': + global_attributes['MAP_PROJ'] = np.int32(3) + global_attributes['TRUELAT1'] = np.float32(self.grid.lat_ts) + global_attributes['TRUELAT2'] = np.float32(0) + global_attributes['MOAD_CEN_LAT'] = np.float32(self.grid.lat_ts) + global_attributes['STAND_LON'] = np.float32(self.grid.lon_0) + + return global_attributes + + def create_times_var(self): + # TODO Documentation + """ + + :return: + """ + from datetime import timedelta + import netCDF4 + + aux_times_list = [] + + for hour in self.hours: + aux_date = self.date + timedelta(hours=hour) + aux_times_list.append(aux_date.strftime("%Y-%m-%d_%H:%M:%S")) + + str_out = netCDF4.stringtochar(np.array(aux_times_list)) + return str_out + + def create_parallel_netcdf(self): + # TODO Documentation + """ + + :return: + """ + st_time = timeit.default_timer() + settings.write_log("\tCreating parallel NetCDF file.", level=2) + netcdf = Dataset(self.path, mode='w', format="NETCDF4") + + # ===== Dimensions ===== + settings.write_log("\t\tCreating NetCDF dimensions.", level=2) + netcdf.createDimension('Time', len(self.hours)) + # netcdf.createDimension('Time', None) + settings.write_log("\t\t\t'Time' dimension: {0}".format('UNLIMITED ({0})'.format(len(self.hours))), + level=3) + netcdf.createDimension('DateStrLen', 19) + settings.write_log("\t\t\t'DateStrLen' dimension: 19", level=3) + netcdf.createDimension('west_east', self.grid.center_longitudes.shape[1]) + settings.write_log("\t\t\t'west_east' dimension: {0}".format(len(self.hours)), level=3) + netcdf.createDimension('south_north', self.grid.center_latitudes.shape[0]) + settings.write_log("\t\t\t'south_north' dimension: {0}".format(self.grid.center_latitudes.shape[0]), + level=3) + netcdf.createDimension('emissions_zdim', len(self.levels)) + settings.write_log("\t\t\t'emissions_zdim' dimension: {0}".format(len(self.levels)), level=3) + + # ===== Variables ===== + settings.write_log("\t\tCreating NetCDF variables.", level=2) + times = netcdf.createVariable('Times', 'S1', ('Time', 'DateStrLen', )) + times[:] = self.create_times_var() + settings.write_log("\t\t\t'Times' variable created with size: {0}".format(times[:].shape), level=3) + + index = 0 + for var_name in self.variables_attributes.iterkeys(): + index += 1 + var = netcdf.createVariable(var_name, 'f', ('Time', 'emissions_zdim', 'south_north', 'west_east',), + zlib=self.compress) + var.setncatts(self.variables_attributes[var_name]) + settings.write_log("\t\t\t'{0}' variable created with size: {1}".format(var_name, var[:].shape) + + "\n\t\t\t\t'{0}' variable will be filled later.".format(var_name), level=3) + + # ===== Global attributes ===== + settings.write_log("\t\tCreating NetCDF metadata.", level=2) + global_attributes = self.create_global_attributes() + for attribute in self.global_attributes_order: + netcdf.setncattr(attribute, global_attributes[attribute]) + + netcdf.close() + + settings.write_time('WriterCmaq', 'create_parallel_netcdf', timeit.default_timer() - st_time, level=3) + + return True + + def write_serial_netcdf(self, emission_list): + # TODO Documentation + """ + + :param emission_list: + :return: + """ + st_time = timeit.default_timer() + + # Gathering the index + rank_position = np.array( + [self.grid.x_lower_bound, self.grid.x_upper_bound, self.grid.y_lower_bound, self.grid.y_upper_bound], + dtype='i') + full_position = None + if settings.rank == 0: + full_position = np.empty([settings.size, 4], dtype='i') + settings.comm.Gather(rank_position, full_position, root=0) + + if settings.rank == 0: + settings.write_log("\tCreating NetCDF file.", level=2) + netcdf = Dataset(self.path, mode='w', format="NETCDF4") + + # ===== Dimensions ===== + settings.write_log("\t\tCreating NetCDF dimensions.", level=2) + netcdf.createDimension('Time', None) + settings.write_log("\t\t\t'Time' dimension: UNLIMITED", level=3) + netcdf.createDimension('DateStrLen', 19) + settings.write_log("\t\t\t'DateStrLen' dimension: 19", level=3) + netcdf.createDimension('west_east', self.grid.center_longitudes.shape[1]) + settings.write_log("\t\t\t'west_east' dimension: {0}".format(len(self.hours)), level=3) + netcdf.createDimension('south_north', self.grid.center_latitudes.shape[0]) + settings.write_log("\t\t\t'south_north' dimension: {0}".format(self.grid.center_latitudes.shape[0]), + level=3) + netcdf.createDimension('emissions_zdim', len(self.levels)) + settings.write_log("\t\t\t'emissions_zdim' dimension: {0}".format(len(self.levels)), level=3) + + # ===== Variables ===== + settings.write_log("\t\tCreating NetCDF variables.", level=2) + times = netcdf.createVariable('Times', 'S1', ('Time', 'DateStrLen', )) + times[:] = self.create_times_var() + settings.write_log("\t\t\t'Times' variable created with size: {0}".format(times[:].shape), level=3) + + full_shape = None + index = 0 + + # self.change_variable_attributes() + + for var_name in self.variables_attributes.iterkeys(): + if settings.size != 1: + settings.write_log("\t\t\tGathering {0} data.".format(var_name), level=3) + rank_data = self.calculate_data_by_var(var_name, emission_list, self.grid.shape) + if rank_data is not None: + # root_shape = settings.comm.bcast(rank_data.shape, root=0) + if full_shape is None: + full_shape = settings.comm.allgather(rank_data.shape) + + counts_i = self.tuple_to_index(full_shape) + rank_buff = [rank_data, counts_i[settings.rank]] + if settings.rank == 0: + displacements = self.calculate_displacements(counts_i) + recvdata = np.empty(sum(counts_i), dtype=settings.precision) + else: + displacements = None + recvdata = None + if settings.precision == np.float32: + recvbuf = [recvdata, counts_i, displacements, MPI.FLOAT] + elif settings.precision == np.float64: + recvbuf = [recvdata, counts_i, displacements, MPI.DOUBLE] + else: + settings.write_log('ERROR: Check the .err file to get more info.') + if settings.rank == 0: + raise TypeError('ERROR: precision {0} unknown'.format(settings.precision)) + sys.exit(1) + + settings.comm.Gatherv(rank_buff, recvbuf, root=0) + + if settings.rank == 0: + if settings.size != 1: + try: + data = np.concatenate(data, axis=3) + except (UnboundLocalError, TypeError, IndexError): + data = 0 + st_time = timeit.default_timer() + index += 1 + + var = netcdf.createVariable(var_name, 'f', ('Time', 'emissions_zdim', 'south_north', 'west_east',), + zlib=self.compress) + var.setncatts(self.variables_attributes[var_name]) + + var_time = timeit.default_timer() + + # data_list = []#np.empty(shape, dtype=np.float64) + + if rank_data is not None: + data = np.empty(var[:].shape, dtype=settings.precision) + for i in xrange(settings.size): + # print 'Resizeing {0}'.format(i) + if not i == settings.size - 1: + data[:, :, full_position[i][0]:full_position[i][1], + full_position[i][2]:full_position[i][3]] = \ + np.array(recvbuf[0][displacements[i]: displacements[i + 1]]).reshape(full_shape[i]) + else: + data[:, :, full_position[i][0]:full_position[i][1], + full_position[i][2]:full_position[i][3]] = \ + np.array(recvbuf[0][displacements[i]:]).reshape(full_shape[i]) + else: + data = 0 + var[:] = data + settings.write_log("\t\t\t'{0}' variable created with size: {1}".format(var_name, var[:].shape), + level=3) + settings.write_log("\t\tCreating NetCDF metadata.", level=2) + if settings.rank == 0: + # ===== Global attributes ===== + global_attributes = self.create_global_attributes() + for attribute in self.global_attributes_order: + netcdf.setncattr(attribute, global_attributes[attribute]) + + netcdf.close() + settings.write_time('WriterWrfChem', 'write_serial_netcdf', timeit.default_timer() - st_time, level=3) + return True diff --git a/hermesv3_gr/tools/__init__.py b/hermesv3_gr/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hermesv3_gr/tools/coordinates_tools.py b/hermesv3_gr/tools/coordinates_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..8dbc7cd496f0ea0ff1c9b92df8349599b0eea9ec --- /dev/null +++ b/hermesv3_gr/tools/coordinates_tools.py @@ -0,0 +1,386 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +def get_grid_area(filename): + """ + Calculate the area for each cell of the grid using CDO + + :param filename: Path to the file to calculate the cell area + :type filename: str + + :return: Area of each cell of the grid. + :rtype: numpy.array + """ + from cdo import Cdo + from netCDF4 import Dataset + + cdo = Cdo() + s = cdo.gridarea(input=filename) + nc_aux = Dataset(s, mode='r') + grid_area = nc_aux.variables['cell_area'][:] + nc_aux.close() + + return grid_area + + +def latlon2rotated(lon_pole_deg, lat_pole_deg, lon_deg, lat_deg, lon_min=-180): + # TODO Documentation + """ + Transform lat lon degrees into the rotated coordinates. + + :param lon_pole_deg: + :param lat_pole_deg: + :param lon_deg: + :param lat_deg: + :param lon_min: + :return: + """ + import math + + degrees_to_radians = math.pi / 180. + radians_to_degrees = 180. / math.pi + + # lon_max = lon_min + 360 + + # stlm=sin(tlm) + sin_lat_pole_rad = math.sin(lat_pole_deg * degrees_to_radians) + # ctlm=cos(tlm) + cos_lat_pole_rad = math.cos(lat_pole_deg * degrees_to_radians) + # stph=sin(tph) + # sin_lon_pole_rad = math.sin(lon_pole_deg * degrees_to_radians) + # ctph=cos(tph) + # cos_lon_pole_rad = math.cos(lon_pole_deg * degrees_to_radians) + + # relm=(xlon-tlm0d)*dtr !distance from the centre lon (in rad) + distance_from_center_lon = (lon_deg - lon_pole_deg) * degrees_to_radians + # crlm=cos(relm) !cos of this distance + cos_distance_from_center_lon = math.cos(distance_from_center_lon) + # srlm=sin(relm) !sin of this distance + sin_distance_from_center_lon = math.sin(distance_from_center_lon) + # aph=xlat*dtr !lat in rad + lat_rad = lat_deg * degrees_to_radians + # cph=cos(aph) !cos of lat + cos_lat_rad = math.cos(lat_rad) + # sph=sin(aph) !sin of lat + sin_lat_rad = math.sin(lat_rad) + + # cc=cph*crlm !cos of lat times cos of lon distance + cycdx = cos_lat_rad * cos_distance_from_center_lon + # anum=cph*srlm !cos of lat times sin of lon distance + # denom=ctph0*cc+stph0*sph !cos of the centre lat times cc plus sin of the centre lat times sin of lat + # tlm=atan2(anum,denom) + rotated_lon = math.atan2(cos_lat_rad * sin_distance_from_center_lon, + cos_lat_pole_rad * cycdx + sin_lat_pole_rad * sin_lat_rad) + # tph=asin(ctph0*sph-stph0*cc) + sin_rotated_lat = cos_lat_pole_rad * sin_lat_rad - sin_lat_pole_rad * cycdx + if sin_rotated_lat > 1.: + sin_rotated_lat = 1. + if sin_rotated_lat < -1.: + sin_rotated_lat = -1. + + rotated_lat = math.asin(sin_rotated_lat) + + return rotated_lon * radians_to_degrees, rotated_lat * radians_to_degrees + + +def rotated2latlon(lon_pole_deg, lat_pole_deg, lon_deg, lat_deg, lon_min=-180): + # TODO Documentation + """ + Transform rotated coordinates into lat lon degrees. + + :param lon_pole_deg: + :param lat_pole_deg: + :param lon_deg: + :param lat_deg: + :param lon_min: + :return: + """ + import numpy as np + import math + + degrees_to_radians = math.pi / 180. + # radians_to_degrees = 180. / math.pi + + # Positive east to negative east + lon_pole_deg -= 180 + + tph0 = lat_pole_deg * degrees_to_radians + tlm = lon_deg * degrees_to_radians + tph = lat_deg * degrees_to_radians + tlm0d = lon_pole_deg + ctph0 = np.cos(tph0) + stph0 = np.sin(tph0) + + stlm = np.sin(tlm) + ctlm = np.cos(tlm) + stph = np.sin(tph) + ctph = np.cos(tph) + + # Latitude + sph = (ctph0 * stph) + (stph0 * ctph * ctlm) + # if sph > 1.: + # sph = 1. + # if sph < -1.: + # sph = -1. + # print type(sph) + sph[sph > 1.] = 1. + sph[sph < -1.] = -1. + + aph = np.arcsin(sph) + aphd = aph / degrees_to_radians + + # Longitude + anum = ctph * stlm + denom = (ctlm * ctph - stph0 * sph) / ctph0 + relm = np.arctan2(anum, denom) - math.pi + almd = relm / degrees_to_radians + tlm0d + + # if almd < min_lon: + # almd += 360 + # elif almd > max_lon: + # almd -= 360 + + almd[almd > (lon_min + 360)] -= 360 + almd[almd < lon_min] += 360 + + return almd, aphd + + +def rotated2latlon_single(lon_pole_deg, lat_pole_deg, lon_deg, lat_deg, lon_min=-180): + # TODO Docuemtnation + """ + + :param lon_pole_deg: + :param lat_pole_deg: + :param lon_deg: + :param lat_deg: + :param lon_min: + :return: + """ + import math + + degrees_to_radians = math.pi / 180. + # radians_to_degrees = 180. / math.pi + + # lon_max = lon_min + 360 + + # Positive east to negative east + lon_pole_deg -= 180 + + tph0 = lat_pole_deg * degrees_to_radians + tlm = lon_deg * degrees_to_radians + tph = lat_deg * degrees_to_radians + tlm0d = lon_pole_deg + ctph0 = math.cos(tph0) + stph0 = math.sin(tph0) + + stlm = math.sin(tlm) + ctlm = math.cos(tlm) + stph = math.sin(tph) + ctph = math.cos(tph) + + # Latitude + sph = (ctph0 * stph) + (stph0 * ctph * ctlm) + # if sph > 1.: + # sph = 1. + # if sph < -1.: + # sph = -1. + + aph = math.asin(sph) + aphd = aph / degrees_to_radians + + # Longitude + anum = ctph * stlm + denom = (ctlm * ctph - stph0 * sph) / ctph0 + relm = math.atan2(anum, denom) - math.pi + almd = relm / degrees_to_radians + tlm0d + + if almd > (lon_min + 360): + almd -= 360 + elif almd < lon_min: + almd += 360 + + return almd, aphd + + +def create_bounds(coords, number_vertices=2): + """ + Calculate the vertices coordinates. + + :param coords: Coordinates in degrees (latitude or longitude) + :type coords: numpy.array + + :param number_vertices: Non mandatory parameter that informs the number of vertices that must have the boundaries. + (by default 2) + :type number_vertices: int + + :return: Array with as many elements as vertices for each value of coords. + :rtype: numpy.array + """ + import numpy as np + + interval = coords[1] - coords[0] + + coords_left = coords - interval / 2 + coords_right = coords + interval / 2 + if number_vertices == 2: + bound_coords = np.dstack((coords_left, coords_right)) + elif number_vertices == 4: + bound_coords = np.dstack((coords_left, coords_right, coords_right, coords_left)) + else: + raise ValueError('The number of vertices of the boudaries must be 2 or 4') + + return bound_coords + + +def create_bounds_esmpy(coords, spheric=False): + # TODO Documentation + """ + + :param coords: + :param spheric: + :return: + """ + import numpy as np + + interval = coords[1] - coords[0] + + bound_coords = coords - interval / 2 + if not spheric: + bound_coords = np.append(bound_coords, [bound_coords[-1] + interval]) + + return bound_coords + + +def create_regular_rotated(lat_origin, lon_origin, lat_inc, lon_inc, n_lat, n_lon): + # TODO Documentation + """ + + :param lat_origin: + :param lon_origin: + :param lat_inc: + :param lon_inc: + :param n_lat: + :param n_lon: + :return: + """ + import numpy as np + + center_latitudes = np.arange(lat_origin, lat_origin + (n_lat*lat_inc), lat_inc, dtype=np.float) + center_longitudes = np.arange(lon_origin, lon_origin + (n_lon*lon_inc), lon_inc, dtype=np.float) + + corner_latitudes = create_bounds_esmpy(center_latitudes) + corner_longitudes = create_bounds_esmpy(center_longitudes) + + return center_latitudes, center_longitudes, corner_latitudes, corner_longitudes + + +def create_regular_old(lat_origin, lon_origin, lat_inc, lon_inc, n_lat, n_lon): + # TODO Documentation + import numpy as np + + center_latitudes = np.arange(lat_origin, lat_origin + (n_lat*lat_inc), lat_inc, dtype=np.float) + center_longitudes = np.arange(lon_origin, lon_origin + (n_lon*lon_inc), lon_inc, dtype=np.float) + + corner_latitudes = create_bounds(center_latitudes) + corner_longitudes = create_bounds(center_longitudes) + + return center_latitudes, center_longitudes, corner_latitudes, corner_longitudes + + +def create_regular_grid(center_lat, center_lon, west_boundary, south_boundary, inc_lat, inc_lon): + """ + Create a custom grid with the given parameters. The grid is divided in 4 arrays: + - Center Latitudes + - Center Longitudes + - Boundary Latitudes (# latitudes +1) + - Boundary Longitudes (# longitudes +1) + + :param center_lat: Latitude of the center of the grid (degrees). + :type center_lat: float + + :param center_lon: Longitude of the center of the grid (degrees). + :type center_lon: float + + :param west_boundary: Distance from de center to the western boundary (degrees) + (not to the center of the first cell) + :type west_boundary: float + + :param south_boundary: Distance from de center to the southern boundary (degrees) + (not to the center of the first cell) + :type south_boundary: float + + :param inc_lat: Vertical resolution of each cell (degrees). + :type inc_lat: float + + :param inc_lon: Horizontal resolution of each cell (degrees) + :type inc_lon: float + + :return: Arrays with the Center Latitudes, Center Longitudes, Boundary Latitudes, Boundary Longitudes. + :rtype: tuple (numpy.array, numpy.array, numpy.array, numpy.array) + """ + import numpy as np + + lat_origin = center_lat - abs(south_boundary) # + (inc_lat/2) + lon_origin = center_lon - abs(west_boundary) # + (inc_lon/2) + n_lat = (abs(south_boundary)/inc_lat)*2 + n_lon = (abs(west_boundary)/inc_lon)*2 + + center_latitudes = np.arange(lat_origin + inc_lat, lat_origin + (n_lat*inc_lat) - inc_lat + inc_lat / 2, inc_lat, + dtype=np.float) + center_longitudes = np.arange(lon_origin + inc_lon, lon_origin + (n_lon*inc_lon) - inc_lon + inc_lon / 2, inc_lon, + dtype=np.float) + + corner_latitudes = create_bounds(center_latitudes) + corner_longitudes = create_bounds(center_longitudes) + + center_latitudes = np.concatenate([ + [lat_origin + inc_lat / 2 - inc_lat / 4], + center_latitudes, + [lat_origin + (n_lat * inc_lat) - inc_lat / 2 + inc_lat / 4]]) + + center_longitudes = np.concatenate([ + [lon_origin + inc_lon / 2 - inc_lon / 4], + center_longitudes, + [lon_origin + (n_lon * inc_lon) - inc_lon / 2 + inc_lon / 4]]) + + corner_latitudes = np.concatenate([ + [[[lat_origin, lat_origin + inc_lat / 2]]], + corner_latitudes, + [[[lat_origin + (n_lat * inc_lat) - inc_lat / 2, lat_origin + (n_lat * inc_lat)]]]], axis=1) + + corner_longitudes = np.concatenate([ + [[[lon_origin, lon_origin + inc_lon / 2]]], + corner_longitudes, + [[[lon_origin + (n_lon * inc_lon) - inc_lon / 2, lon_origin + (n_lon * inc_lon)]]]], axis=1) + + return center_latitudes, center_longitudes, corner_latitudes, corner_longitudes + + +if __name__ == '__main__': + import numpy as np + new_pole_lon_d = 20.0 # lonpole tlm0d + new_pole_lat_d = 35.0 # latpole tph0d + print latlon2rotated(new_pole_lon_d, new_pole_lat_d, 20.0, 35.0) + print latlon2rotated(new_pole_lon_d, new_pole_lat_d, -20.2485, -9.9036) + print rotated2latlon_single(new_pole_lon_d, new_pole_lat_d, 0, 0) + print rotated2latlon_single(new_pole_lon_d, new_pole_lat_d, -51., -35.) + print rotated2latlon(new_pole_lon_d, new_pole_lat_d, np.array([-51., -51., -51., -51.]), + np.array([-35., -34.9, -34.8, -34.7])) diff --git a/hermesv3_gr/tools/custom_calendar.py b/hermesv3_gr/tools/custom_calendar.py new file mode 100644 index 0000000000000000000000000000000000000000..c221328de61f4fc73ea6fa0c4c252aa9629f29bd --- /dev/null +++ b/hermesv3_gr/tools/custom_calendar.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +import sys +import datetime +import holidays + + +def custom_holidays(zone, year): + """ + Calculate the festivity days that appear in the library holidays adding the Maundy Thursday and the God Friday + + :param zone: Name of the country. It has to appear and has to have the same format (capital letters) of the library + holidays: https://pypi.python.org/pypi/holidays + :type zone: str + + :param year: Year to get the festivities. + :type year: int + + :return: Dictionary with the festivity days. + :rtype: dict + """ + c_holidays = get_holidays(zone, year) + + # Adding more festivities than appear in the library + pascua_sunday = pascua(year) + c_holidays.update({pascua_sunday - datetime.timedelta(days=3): 'Maundy Thursday'}) # Jueves Santo + c_holidays.update({pascua_sunday - datetime.timedelta(days=2): 'God Friday'}) # Viernes Santo + + return c_holidays + + +def get_holidays(zone, year): + """ + Find the holidays for the selected zone and year. + + :param zone: Name of the country. It has to appear and has to have the same format (capital letters) of the library + holidays: https://pypi.python.org/pypi/holidays + :type zone: str + + :param year: Year to found the Pascua. + :type year: int + + :return: Dictionary with the festivity days. + :rtype: dict + """ + method_to_call = getattr(holidays, zone) + result = method_to_call(years=year) + return result + + +def pascua(year): + """ + Calculate the "Pascua" date + + :param year: Year to found the Pascua. + :type year: int + + :return: Sunday of Pascua. + :rtype: datetime.date + """ + # Magic constants + m = 24 + n = 5 + + # Remainders + a = year % 19 + b = year % 4 + c = year % 7 + d = (19 * a + m) % 30 + e = (2 * b + 4 * c + 6 * d + n) % 7 + + if d + e < 10: + day = d + e + 22 + month = 3 + else: + day = d + e - 9 + month = 4 + + # Special exceptions + if day == 26 and month == 4: + day = 19 + if day == 25 and month == 4 and d == 28 and e == 6 and a > 10: + day = 18 + + return datetime.date(year, month, day) diff --git a/hermesv3_gr/tools/download_benchmark.py b/hermesv3_gr/tools/download_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..5d58bc8331a1a907235a16519c8e8e83d4dc1960 --- /dev/null +++ b/hermesv3_gr/tools/download_benchmark.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import sys +import os + + +def query_yes_no(question, default="yes"): + valid = {"yes": True, "y": True, "1": True, 1: True, + "no": False, "n": False, "0": False, 0: False} + if default is None: + prompt = " [y/n] " + elif default == "yes": + prompt = " [Y/n] " + elif default == "no": + prompt = " [y/N] " + else: + raise ValueError("invalid default answer: '%s'" % default) + + while True: + sys.stdout.write(question + prompt) + choice = raw_input().lower() + if default is not None and choice == '': + return valid[default] + elif choice in valid: + return valid[choice] + else: + sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") + + +def check_args(args, exe_str): + if len(args) == 0: + print("Missing destination path after '{0}'. e.g.:".format(exe_str) + + "\n\t{0} /home/user/HERMES".format(exe_str)) + sys.exit(1) + elif len(args) > 1: + print("Too much arguments through '{0}'. Only destination path is needed e.g.:".format(exe_str) + + "\n\t{0} /home/user/HERMES".format(exe_str)) + sys.exit(1) + else: + dir_path = args[0] + + if not os.path.exists(dir_path): + if query_yes_no("'{0}' does not exist. Do you want to create it? ".format(dir_path)): + os.makedirs(dir_path) + else: + sys.exit(0) + + return dir_path + + +def download_files(parent_path): + from ftplib import FTP + + ftp = FTP('bscesftp.bsc.es') + ftp.login() + dst_file = os.path.join(parent_path, 'HERMESv3_GR_Benchmark.zip') + + ftp.retrbinary('RETR HERMESv3_GR_Benchmark.zip', open(dst_file, 'wb').write) + + ftp.quit() + + return dst_file + + +def unzip_files(zippath, parent_path): + import zipfile + + zip_file = zipfile.ZipFile(zippath, 'r') + zip_file.extractall(parent_path) + zip_file.close() + os.remove(zippath) + + +def download_benchmark(): + argv = sys.argv[1:] + + parent_dir = check_args(argv, 'hermesv3_gr_download_benchmark') + + zippath = download_files(parent_dir) + unzip_files(zippath, parent_dir) + + +if __name__ == '__main__': + download_benchmark() diff --git a/hermesv3_gr/tools/netcdf_tools.py b/hermesv3_gr/tools/netcdf_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..f0d72dede20951049303d4de871368074ac39090 --- /dev/null +++ b/hermesv3_gr/tools/netcdf_tools.py @@ -0,0 +1,683 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import sys +from netCDF4 import Dataset +from mpi4py import MPI + +ICOMM = MPI.COMM_WORLD +COMM = ICOMM.Split(color=0, key=0) +RANK = COMM.Get_rank() +SIZE = COMM.Get_size() + + +def open_netcdf(netcdf_path): + """ + Open a netCDF file. + + :param netcdf_path: Path to the netCDF file. + :type netcdf_path: str + + :return: netCDF + :rtype: Dataset + """ + netcdf = Dataset(netcdf_path, mode='a') + return netcdf + + +def close_netcdf(netcdf): + """ + Close the netCDF. + + :param netcdf: netCDF + :type netcdf: Dataset + :return: + """ + netcdf.close() + + +def get_grid_area(filename): + """ + Calculate the area of each cell. + + :param filename: Full path to the NetCDF to calculate the cell areas. + :type filename: str + + :return: Returns the area of each cell. + :rtype: numpy.array + """ + from cdo import Cdo + + cdo = Cdo() + src = cdo.gridarea(input=filename) + netcdf = Dataset(src, mode='r') + grid_area = netcdf.variables['cell_area'][:] + netcdf.close() + + return grid_area + + +def extract_vars(netcdf_path, variables_list, attributes_list=()): + """ + Get the data from the list of variabbles. + + :param netcdf_path: Path to the netCDF file + :type netcdf_path: str + + :param variables_list: List of the names of the variables to get. + :type variables_list: list + + :param attributes_list: List of the names of the variable attributes to get. + :type attributes_list: list + + :return: List of the variables from the netCDF as a dictionary with data as values and with the other keys their + attributes. + :rtype: list. + """ + data_list = [] + netcdf = Dataset(netcdf_path, mode='r') + for var in variables_list: + if var == 'emi_nox_no2': + var1 = var + var2 = 'emi_nox' + else: + var1 = var2 = var + dict_aux = \ + { + 'name': var1, + 'data': netcdf.variables[var2][:], + } + for attribute in attributes_list: + dict_aux.update({attribute: netcdf.variables[var2].getncattr(attribute)}) + data_list.append(dict_aux) + netcdf.close() + + return data_list + + +def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, + levels=None, date=None, hours=None, + boundary_latitudes=None, boundary_longitudes=None, cell_area=None, global_attributes=None, + regular_latlon=False, + rotated=False, rotated_lats=None, rotated_lons=None, north_pole_lat=None, north_pole_lon=None, + lcc=False, lcc_x=None, lcc_y=None, lat_1_2=None, lon_0=None, lat_0=None, + mercator=False, lat_ts=None): + # TODO Documentation + """ + + :param netcdf_path: + :param center_latitudes: + :param center_longitudes: + :param data_list: + :param levels: + :param date: + :param hours: + :param boundary_latitudes: + :param boundary_longitudes: + :param cell_area: + :param global_attributes: + :param regular_latlon: + :param rotated: + :param rotated_lats: + :param rotated_lons: + :param north_pole_lat: + :param north_pole_lon: + :param lcc: + :param lcc_x: + :param lcc_y: + :param lat_1_2: + :param lon_0: + :param lat_0: + :param mercator: + :param lat_ts: + :return: + """ + + from cf_units import Unit, encode_time + + if not (regular_latlon or lcc or rotated or mercator): + regular_latlon = True + netcdf = Dataset(netcdf_path, mode='w', format="NETCDF4") + + # ===== Dimensions ===== + if regular_latlon: + var_dim = ('lat', 'lon',) + + # Latitude + if len(center_latitudes.shape) == 1: + netcdf.createDimension('lat', center_latitudes.shape[0]) + lat_dim = ('lat',) + elif len(center_latitudes.shape) == 2: + netcdf.createDimension('lat', center_latitudes.shape[0]) + lat_dim = ('lon', 'lat', ) + else: + print 'ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape)) + sys.exit(1) + + # Longitude + if len(center_longitudes.shape) == 1: + netcdf.createDimension('lon', center_longitudes.shape[0]) + lon_dim = ('lon',) + elif len(center_longitudes.shape) == 2: + netcdf.createDimension('lon', center_longitudes.shape[1]) + lon_dim = ('lon', 'lat', ) + else: + print 'ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format(len(center_longitudes.shape)) + sys.exit(1) + elif rotated: + var_dim = ('rlat', 'rlon',) + + # Rotated Latitude + if rotated_lats is None: + print 'ERROR: For rotated grids is needed the rotated latitudes.' + sys.exit(1) + netcdf.createDimension('rlat', len(rotated_lats)) + lat_dim = ('rlat', 'rlon',) + + # Rotated Longitude + if rotated_lons is None: + print 'ERROR: For rotated grids is needed the rotated longitudes.' + sys.exit(1) + netcdf.createDimension('rlon', len(rotated_lons)) + lon_dim = ('rlat', 'rlon',) + elif lcc or mercator: + var_dim = ('y', 'x',) + + netcdf.createDimension('y', len(lcc_y)) + lat_dim = ('y', 'x', ) + + netcdf.createDimension('x', len(lcc_x)) + lon_dim = ('y', 'x', ) + else: + lat_dim = None + lon_dim = None + var_dim = None + + # Levels + if levels is not None: + netcdf.createDimension('lev', len(levels)) + + # Bounds + if boundary_latitudes is not None: + try: + netcdf.createDimension('nv', len(boundary_latitudes[0, 0])) + except TypeError: + netcdf.createDimension('nv', boundary_latitudes.shape[1]) + + # Time + netcdf.createDimension('time', None) + + # ===== Variables ===== + # Time + if date is None: + time = netcdf.createVariable('time', 'd', ('time',), zlib=True) + time.units = "months since 2000-01-01 00:00:00" + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = [0.] + else: + time = netcdf.createVariable('time', 'd', ('time',), zlib=True) + u = Unit('hours') + # print u.offset_by_time(encode_time(date.year, date.month, date.day, date.hour, date.minute, date.second)) + # Unit('hour since 1970-01-01 00:00:00.0000000 UTC') + time.units = str(u.offset_by_time(encode_time(date.year, date.month, date.day, date.hour, date.minute, + date.second))) + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = hours + + # Latitude + lats = netcdf.createVariable('lat', 'f', lat_dim, zlib=True) + lats.units = "degrees_north" + lats.axis = "Y" + lats.long_name = "latitude coordinate" + lats.standard_name = "latitude" + lats[:] = center_latitudes + + if boundary_latitudes is not None: + lats.bounds = "lat_bnds" + lat_bnds = netcdf.createVariable('lat_bnds', 'f', lat_dim + ('nv',), zlib=True) + # print lat_bnds[:].shape, boundary_latitudes.shape + lat_bnds[:] = boundary_latitudes + + # Longitude + lons = netcdf.createVariable('lon', 'f', lon_dim, zlib=True) + + lons.units = "degrees_east" + lons.axis = "X" + lons.long_name = "longitude coordinate" + lons.standard_name = "longitude" + # print 'lons:', lons[:].shape, center_longitudes.shape + lons[:] = center_longitudes + if boundary_longitudes is not None: + lons.bounds = "lon_bnds" + lon_bnds = netcdf.createVariable('lon_bnds', 'f', lon_dim + ('nv',), zlib=True) + # print lon_bnds[:].shape, boundary_longitudes.shape + lon_bnds[:] = boundary_longitudes + + if rotated: + # Rotated Latitude + rlat = netcdf.createVariable('rlat', 'f', ('rlat',), zlib=True) + rlat.long_name = "latitude in rotated pole grid" + rlat.units = Unit("degrees").symbol + rlat.standard_name = "grid_latitude" + rlat[:] = rotated_lats + + # Rotated Longitude + rlon = netcdf.createVariable('rlon', 'f', ('rlon',), zlib=True) + rlon.long_name = "longitude in rotated pole grid" + rlon.units = Unit("degrees").symbol + rlon.standard_name = "grid_longitude" + rlon[:] = rotated_lons + if lcc or mercator: + x = netcdf.createVariable('x', 'd', ('x',), zlib=True) + x.units = Unit("km").symbol + x.long_name = "x coordinate of projection" + x.standard_name = "projection_x_coordinate" + x[:] = lcc_x + + y = netcdf.createVariable('y', 'd', ('y',), zlib=True) + y.units = Unit("km").symbol + y.long_name = "y coordinate of projection" + y.standard_name = "projection_y_coordinate" + y[:] = lcc_y + + cell_area_dim = var_dim + # Levels + if levels is not None: + var_dim = ('lev',) + var_dim + lev = netcdf.createVariable('lev', 'f', ('lev',), zlib=True) + lev.units = Unit("m").symbol + lev.positive = 'up' + lev[:] = levels + + # All variables + if len(data_list) is 0: + var = netcdf.createVariable('aux_var', 'f', ('time',) + var_dim, zlib=True) + var[:] = 0 + for variable in data_list: + # print ('time',) + var_dim + var = netcdf.createVariable(variable['name'], 'f', ('time',) + var_dim, zlib=True) + var.units = Unit(variable['units']).symbol + if 'long_name' in variable: + var.long_name = str(variable['long_name']) + if 'standard_name' in variable: + var.standard_name = str(variable['standard_name']) + if 'cell_method' in variable: + var.cell_method = str(variable['cell_method']) + var.coordinates = "lat lon" + if cell_area is not None: + var.cell_measures = 'area: cell_area' + if regular_latlon: + var.grid_mapping = 'crs' + elif rotated: + var.grid_mapping = 'rotated_pole' + elif lcc: + var.grid_mapping = 'Lambert_conformal' + elif mercator: + var.grid_mapping = 'mercator' + try: + var[:] = variable['data'] + except ValueError: + print 'VAR ERROR, netcdf shape: {0}, variable shape: {1}'.format(var[:].shape, variable['data'].shape) + + # Grid mapping + if regular_latlon: + # CRS + mapping = netcdf.createVariable('crs', 'i') + mapping.grid_mapping_name = "latitude_longitude" + mapping.semi_major_axis = 6371000.0 + mapping.inverse_flattening = 0 + elif rotated: + # Rotated pole + mapping = netcdf.createVariable('rotated_pole', 'c') + mapping.grid_mapping_name = 'rotated_latitude_longitude' + mapping.grid_north_pole_latitude = north_pole_lat + mapping.grid_north_pole_longitude = north_pole_lon + elif lcc: + # CRS + mapping = netcdf.createVariable('Lambert_conformal', 'i') + mapping.grid_mapping_name = "lambert_conformal_conic" + mapping.standard_parallel = lat_1_2 + mapping.longitude_of_central_meridian = lon_0 + mapping.latitude_of_projection_origin = lat_0 + elif mercator: + # Mercator + mapping = netcdf.createVariable('mercator', 'i') + mapping.grid_mapping_name = "mercator" + mapping.longitude_of_projection_origin = lon_0 + mapping.standard_parallel = lat_ts + + # Cell area + if cell_area is not None: + c_area = netcdf.createVariable('cell_area', 'f', cell_area_dim) + c_area.long_name = "area of the grid cell" + c_area.standard_name = "cell_area" + c_area.units = Unit("m2").symbol + # print c_area[:].shape, cell_area.shape + c_area[:] = cell_area + + if global_attributes is not None: + netcdf.setncatts(global_attributes) + + netcdf.close() + + +def create_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, + levels=None, date=None, hours=None, + boundary_latitudes=None, boundary_longitudes=None, cell_area=None, global_attributes=None, + regular_latlon=False, + rotated=False, rotated_lats=None, rotated_lons=None, north_pole_lat=None, north_pole_lon=None, + lcc=False, lcc_x=None, lcc_y=None, lat_1_2=None, lon_0=None, lat_0=None): + # TODO Documentation + """ + + :param netcdf_path: + :param center_latitudes: + :param center_longitudes: + :param data_list: + :param levels: + :param date: + :param hours: + :param boundary_latitudes: + :param boundary_longitudes: + :param cell_area: + :param global_attributes: + :param regular_latlon: + :param rotated: + :param rotated_lats: + :param rotated_lons: + :param north_pole_lat: + :param north_pole_lon: + :param lcc: + :param lcc_x: + :param lcc_y: + :param lat_1_2: + :param lon_0: + :param lat_0: + :return: + """ + from cf_units import Unit, encode_time + import numpy as np + + if not (regular_latlon or lcc or rotated): + regular_latlon = True + + netcdf = Dataset(netcdf_path, mode='w', format="NETCDF4") + + # ===== Dimensions ===== + if regular_latlon: + var_dim = ('lat', 'lon',) + + # Latitude + if len(center_latitudes.shape) == 1: + netcdf.createDimension('lat', center_latitudes.shape[0]) + lat_dim = ('lat',) + elif len(center_latitudes.shape) == 2: + netcdf.createDimension('lat', center_latitudes.shape[0]) + lat_dim = ('lon', 'lat', ) + else: + print 'ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape)) + sys.exit(1) + + # Longitude + if len(center_longitudes.shape) == 1: + netcdf.createDimension('lon', center_longitudes.shape[0]) + lon_dim = ('lon',) + elif len(center_longitudes.shape) == 2: + netcdf.createDimension('lon', center_longitudes.shape[1]) + lon_dim = ('lon', 'lat', ) + else: + print 'ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format(len(center_longitudes.shape)) + sys.exit(1) + elif rotated: + var_dim = ('rlat', 'rlon',) + + # Rotated Latitude + if rotated_lats is None: + print 'ERROR: For rotated grids is needed the rotated latitudes.' + sys.exit(1) + netcdf.createDimension('rlat', len(rotated_lats)) + lat_dim = ('rlat', 'rlon',) + + # Rotated Longitude + if rotated_lons is None: + print 'ERROR: For rotated grids is needed the rotated longitudes.' + sys.exit(1) + netcdf.createDimension('rlon', len(rotated_lons)) + lon_dim = ('rlat', 'rlon',) + + elif lcc: + var_dim = ('y', 'x',) + + netcdf.createDimension('y', len(lcc_y)) + lat_dim = ('y', 'x', ) + + netcdf.createDimension('x', len(lcc_x)) + lon_dim = ('y', 'x', ) + else: + lat_dim = None + lon_dim = None + var_dim = None + + # Levels + if levels is not None: + netcdf.createDimension('lev', len(levels)) + + # Bounds + if boundary_latitudes is not None: + # print boundary_latitudes.shape + # print len(boundary_latitudes[0, 0]) + netcdf.createDimension('nv', len(boundary_latitudes[0, 0])) + # sys.exit() + + # Time + netcdf.createDimension('time', None) + + # ===== Variables ===== + # Time + if date is None: + time = netcdf.createVariable('time', 'd', ('time',), zlib=True) + time.units = "months since 2000-01-01 00:00:00" + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = [0.] + else: + time = netcdf.createVariable('time', 'd', ('time',), zlib=True) + u = Unit('hours') + # print u.offset_by_time(encode_time(date.year, date.month, date.day, date.hour, date.minute, date.second)) + # Unit('hour since 1970-01-01 00:00:00.0000000 UTC') + time.units = str(u.offset_by_time(encode_time(date.year, date.month, date.day, date.hour, date.minute, + date.second))) + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = hours + + # Latitude + lats = netcdf.createVariable('lat', 'f', lat_dim, zlib=True) + lats.units = "degrees_north" + lats.axis = "Y" + lats.long_name = "latitude coordinate" + lats.standard_name = "latitude" + lats[:] = center_latitudes + + if boundary_latitudes is not None: + lats.bounds = "lat_bnds" + lat_bnds = netcdf.createVariable('lat_bnds', 'f', lat_dim + ('nv',), zlib=True) + # print lat_bnds[:].shape, boundary_latitudes.shape + lat_bnds[:] = boundary_latitudes + + # Longitude + lons = netcdf.createVariable('lon', 'f', lon_dim, zlib=True) + + lons.units = "degrees_east" + lons.axis = "X" + lons.long_name = "longitude coordinate" + lons.standard_name = "longitude" + lons[:] = center_longitudes + if boundary_longitudes is not None: + lons.bounds = "lon_bnds" + lon_bnds = netcdf.createVariable('lon_bnds', 'f', lon_dim + ('nv',), zlib=True) + # print lon_bnds[:].shape, boundary_longitudes.shape + lon_bnds[:] = boundary_longitudes + + if rotated: + # Rotated Latitude + rlat = netcdf.createVariable('rlat', 'f', ('rlat',), zlib=True) + rlat.long_name = "latitude in rotated pole grid" + rlat.units = Unit("degrees").symbol + rlat.standard_name = "grid_latitude" + rlat[:] = rotated_lats + + # Rotated Longitude + rlon = netcdf.createVariable('rlon', 'f', ('rlon',), zlib=True) + rlon.long_name = "longitude in rotated pole grid" + rlon.units = Unit("degrees").symbol + rlon.standard_name = "grid_longitude" + rlon[:] = rotated_lons + if lcc: + x = netcdf.createVariable('x', 'd', ('x',), zlib=True) + x.units = Unit("km").symbol + x.long_name = "x coordinate of projection" + x.standard_name = "projection_x_coordinate" + x[:] = lcc_x + + y = netcdf.createVariable('y', 'd', ('y',), zlib=True) + y.units = Unit("km").symbol + y.long_name = "y coordinate of projection" + y.standard_name = "projection_y_coordinate" + y[:] = lcc_y + + cell_area_dim = var_dim + # Levels + if levels is not None: + var_dim = ('lev',) + var_dim + lev = netcdf.createVariable('lev', 'f', ('lev',), zlib=True) + lev.units = Unit("m").symbol + lev.positive = 'up' + lev[:] = levels + + # All variables + if len(data_list) is 0: + var = netcdf.createVariable('aux_var', 'f', ('time',) + var_dim, zlib=True) + var[:] = 0 + for variable in data_list: + # print ('time',) + var_dim + # print variable + var = netcdf.createVariable(variable['name'], 'f', ('time',) + var_dim, zlib=True) + var.units = Unit(variable['units']).symbol + if 'long_name' in variable: + var.long_name = str(variable['long_name']) + if 'standard_name' in variable: + var.standard_name = str(variable['standard_name']) + if 'cell_method' in variable: + var.cell_method = str(variable['cell_method']) + var.coordinates = "lat lon" + if cell_area is not None: + var.cell_measures = 'area: cell_area' + if regular_latlon: + var.grid_mapping = 'crs' + elif rotated: + var.grid_mapping = 'rotated_pole' + elif lcc: + var.grid_mapping = 'Lambert_conformal' + # print 'HOURSSSSSSSSSSSSSSSSSSSSS:', hours + # if variable['data'] is not 0: + # print var[:].shape, variable['data'].shape, variable['data'].max() + shape = tuple() + exec ("shape = (len(hours), {0}.size, {1}.size, {2}.size)".format(var_dim[0], var_dim[1], var_dim[2])) + # exit() + print shape + var[:] = np.zeros(shape) + + # Grid mapping + if regular_latlon: + # CRS + mapping = netcdf.createVariable('crs', 'i') + mapping.grid_mapping_name = "latitude_longitude" + mapping.semi_major_axis = 6371000.0 + mapping.inverse_flattening = 0 + elif rotated: + # Rotated pole + mapping = netcdf.createVariable('rotated_pole', 'c') + mapping.grid_mapping_name = 'rotated_latitude_longitude' + mapping.grid_north_pole_latitude = north_pole_lat + mapping.grid_north_pole_longitude = north_pole_lon + elif lcc: + # CRS + mapping = netcdf.createVariable('Lambert_conformal', 'i') + mapping.grid_mapping_name = "lambert_conformal_conic" + mapping.standard_parallel = lat_1_2 + mapping.longitude_of_central_meridian = lon_0 + mapping.latitude_of_projection_origin = lat_0 + + # Cell area + if cell_area is not None: + c_area = netcdf.createVariable('cell_area', 'f', cell_area_dim) + c_area.long_name = "area of the grid cell" + c_area.standard_name = "cell_area" + c_area.units = Unit("m2").symbol + # print c_area[:].shape, cell_area.shape + c_area[:] = cell_area + + if global_attributes is not None: + netcdf.setncatts(global_attributes) + return netcdf + + +def tuple_to_index(tuple_list, bidimensional=False): + # TODO Documentation + """ + + :param tuple_list: + :param bidimensional: + :return: + """ + from operator import mul + new_list = [] + for my_tuple in tuple_list: + if bidimensional: + new_list.append(my_tuple[-1] * my_tuple[-2]) + else: + new_list.append(reduce(mul, my_tuple)) + return new_list + + +def calculate_displacements(counts): + # TODO Documentation + """ + + :param counts: + :return: + """ + new_list = [0] + accum = 0 + for counter in counts[:-1]: + accum += counter + new_list.append(accum) + return new_list + + +if __name__ == '__main__': + pass diff --git a/hermesv3_gr/tools/sample_files.py b/hermesv3_gr/tools/sample_files.py new file mode 100644 index 0000000000000000000000000000000000000000..ebb2dd8e41cbfb4a35697354485c0536a23643d5 --- /dev/null +++ b/hermesv3_gr/tools/sample_files.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import sys +import os + + +def make_conf_file_list(): + main_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, os.pardir) + + file_list = [ + {'conf': [ + os.path.join(main_dir, 'conf', 'hermes.conf'), + os.path.join(main_dir, 'conf', 'EI_configuration.csv'), + ]}, + ] + + return file_list + + +def make_profiles_file_list(): + main_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, os.pardir) + + file_list = [ + {'data': [ + os.path.join(main_dir, 'data', 'global_attributes.csv'), + {'profiles': [{ + 'speciation': [ + os.path.join(main_dir, 'data', 'profiles', 'speciation', 'MolecularWeights.csv'), + os.path.join(main_dir, 'data', 'profiles', 'speciation', 'Speciation_profile_cb05_aero5_CMAQ.csv'), + os.path.join(main_dir, 'data', 'profiles', 'speciation', + 'Speciation_profile_cb05_aero5_MONARCH.csv'), + os.path.join(main_dir, 'data', 'profiles', 'speciation', + 'Speciation_profile_cb05_aero6_CMAQ.csv'), + os.path.join(main_dir, 'data', 'profiles', 'speciation', + 'Speciation_profile_radm2_madesorgam_WRF_CHEM.csv'), + os.path.join(main_dir, 'data', 'profiles', 'speciation', + 'Speciation_profile_cb05e51_aero6_CMAQ.csv'), + ]}, + {'temporal': [ + os.path.join(main_dir, 'data', 'profiles', 'temporal', 'TemporalProfile_Daily.csv'), + os.path.join(main_dir, 'data', 'profiles', 'temporal', 'TemporalProfile_Hourly.csv'), + os.path.join(main_dir, 'data', 'profiles', 'temporal', 'TemporalProfile_Monthly.csv'), + os.path.join(main_dir, 'data', 'profiles', 'temporal', 'tz_world_country_iso3166.csv'), + ]}, + {'vertical': [ + os.path.join(main_dir, 'data', 'profiles', 'vertical', + 'Benchmark_15layers_vertical_description.csv'), + os.path.join(main_dir, 'data', 'profiles', 'vertical', 'Vertical_profile.csv'), + ]}, + ]}, + ]}, + ] + + return file_list + + +def make_preproc_file_list(): + main_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, os.pardir) + + file_list = [ + os.path.join(main_dir, 'preproc', 'ceds_preproc.py'), + os.path.join(main_dir, 'preproc', 'eclipsev5a_preproc.py'), + os.path.join(main_dir, 'preproc', 'edgarv432_ap_preproc.py'), + os.path.join(main_dir, 'preproc', 'edgarv432_voc_preproc.py'), + os.path.join(main_dir, 'preproc', 'emep_preproc.py'), + os.path.join(main_dir, 'preproc', 'gfas12_preproc.py'), + os.path.join(main_dir, 'preproc', 'htapv2_preproc.py'), + os.path.join(main_dir, 'preproc', 'tno_mac_iii_preproc.py'), + os.path.join(main_dir, 'preproc', 'tno_mac_iii_preproc_voc_ratios.py'), + os.path.join(main_dir, 'preproc', 'wiedinmyer_preproc.py'), + ] + + return file_list + + +def query_yes_no(question, default="yes"): + valid = {"yes": True, "y": True, "1": True, 1: True, + "no": False, "n": False, "0": False, 0: False} + if default is None: + prompt = " [y/n] " + elif default == "yes": + prompt = " [Y/n] " + elif default == "no": + prompt = " [y/N] " + else: + raise ValueError("invalid default answer: '%s'" % default) + + while True: + sys.stdout.write(question + prompt) + choice = raw_input().lower() + if default is not None and choice == '': + return valid[default] + elif choice in valid: + return valid[choice] + else: + sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") + + +def check_args(args, exe_str): + if len(args) == 0: + print("Missing destination path after '{0}'. e.g.:".format(exe_str) + + "\n\t{0} /home/user/HERMES/HERMES_IN".format(exe_str)) + sys.exit(1) + elif len(args) > 1: + print("Too much arguments through '{0}'. Only destination path is needed e.g.:".format(exe_str) + + "\n\t{0} /home/user/HERMES/HERMES_IN".format(exe_str)) + sys.exit(1) + else: + dir_path = args[0] + + if not os.path.exists(dir_path): + if query_yes_no("'{0}' does not exist. Do you want to create it? ".format(dir_path)): + os.makedirs(dir_path) + else: + sys.exit(0) + + return dir_path + + +def copy_files(file_list, directory): + from shutil import copy2 + + if not os.path.exists(directory): + os.makedirs(directory) + + for element in file_list: + if dict == type(element): + copy_files(element.values()[0], os.path.join(directory, element.keys()[0])) + else: + copy2(element, directory) + return True + + +def copy_config_files(): + argv = sys.argv[1:] + + parent_dir = check_args(argv, 'hermesv3_gr_copy_config_files') + + copy_files(make_conf_file_list(), parent_dir) + copy_files(make_profiles_file_list(), parent_dir) + + +def copy_preproc_files(): + argv = sys.argv[1:] + + parent_dir = check_args(argv, 'hermesv3_gr_copy_preproc_files') + + copy_files(make_preproc_file_list(), parent_dir) + + +if __name__ == '__main__': + copy_config_files() diff --git a/preproc/ceds_preproc.py b/preproc/ceds_preproc.py new file mode 100755 index 0000000000000000000000000000000000000000..3e22812d99fb4a47a9c218c635f5521f7d7d4277 --- /dev/null +++ b/preproc/ceds_preproc.py @@ -0,0 +1,321 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +import sys + + +# ============== CONFIGURATION PARAMETERS ====================== +INPUT_PATH = '/esarchive/recon/jgcri/ceds/original_files' +OUTPUT_PATH = '/esarchive/recon/jgcri/ceds' +LIST_POLLUTANTS = ['BC', 'CO', 'NH3', 'NMVOC', 'NOx', 'OC', 'SO2'] +VOC_POLLUTANTS = ['VOC01', 'VOC02', 'VOC03', 'VOC04', 'VOC05', 'VOC06', 'VOC07', 'VOC08', 'VOC09', 'VOC12', 'VOC13', + 'VOC14', 'VOC15', 'VOC16', 'VOC17', 'VOC18', 'VOC19', 'VOC20', 'VOC21', 'VOC22', 'VOC23', 'VOC24', + 'VOC25'] + +LIST_SECTORS = ['agriculture', 'energy', 'industry', 'transport', 'residential', 'solvents', 'waste', 'ships'] +# LIST_YEARS = from 1950 to 2014 +LIST_YEARS = [2014] +INPUT_NAME = '-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-sectorDim_gr_01-12.nc' +VOC_INPUT_NAME = '-em-speciated-VOC_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-sector' + \ + 'Dim-supplemental-data_gr_01-12.nc' +DO_AIR = True +AIR_INPUT_NAME = '-em-AIR-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26_gr_01-12.nc' +# ============================================================== + + +def voc_to_vocname(voc): + """ + Gets the voc complete name from the VOCXX format + + :param voc: Voc number in the format VOCXX + :type voc: str + + :return: Voc name + :rtype:str + """ + voc_dict = { + 'VOC01': 'alcohols', + 'VOC02': 'ethane', + 'VOC03': 'propane', + 'VOC04': 'butanes', + 'VOC05': 'pentanes', + 'VOC06': 'hexanes-pl', + 'VOC07': 'ethene', + 'VOC08': 'propene', + 'VOC09': 'ethyne', + 'VOC12': 'other-alke', + 'VOC13': 'benzene', + 'VOC14': 'toluene', + 'VOC15': 'xylene', + 'VOC16': 'trimethylb', + 'VOC17': 'other-arom', + 'VOC18': 'esters', + 'VOC19': 'ethers', + 'VOC20': 'chlorinate', + 'VOC21': 'methanal', + 'VOC22': 'other-alka', + 'VOC23': 'ketones', + 'VOC24': 'acids', + 'VOC25': 'other-voc' + } + + return voc_dict[voc] + + +def sector_to_index(sector): + """ + Gets the index where are allocated the emissions for the selected sector. + + :param sector: Name to the sector to get the index position. + :type sector: str + + :return: Index of the position of the current sector + :rtype: int + """ + sector_dict = { + 'agriculture': 0, + 'energy': 1, + 'industry': 2, + 'transport': 3, + 'residential': 4, + 'solvents': 5, + 'waste': 6, + 'ships': 7 + } + + return sector_dict[sector] + + +def get_input_name(pollutant, year, air=False): + """ + Gets the path for the input file name + + :param pollutant: Name of the pollutant + :type pollutant: str + + :param year: Year to extract + :type year: int + + :param air: Indicates if the input file is related with air emissions + :type air: bool + + :return: Path to the input file + :rtype: str + """ + if air: + file_name = AIR_INPUT_NAME.replace('', pollutant) + elif pollutant in LIST_POLLUTANTS: + file_name = INPUT_NAME.replace('', pollutant) + elif pollutant in VOC_POLLUTANTS: + file_name = VOC_INPUT_NAME.replace('', '{0}-{1}'.format(pollutant, voc_to_vocname(pollutant))) + else: + raise ValueError('Pollutant {0} not in pollutant list or voc list'.format(pollutant)) + + if year < 1851 or year > 2014: + raise ValueError('Select a year between 1851 and 2014') + elif year <= 1899: + file_name = file_name.replace('', str(1851)).replace('', str(1899)) + elif year <= 1949: + file_name = file_name.replace('', str(1900)).replace('', str(1949)) + elif year <= 1949: + file_name = file_name.replace('', str(1950)).replace('', str(1999)) + else: + file_name = file_name.replace('', str(2000)).replace('', str(2014)) + + return os.path.join(INPUT_PATH, file_name) + + +def get_full_year_data(file_name, pollutant, sector, year, air=False): + """ + Gets the needed date in the input format. + + :param file_name: path to the input file. + :type file_name: str + + :param pollutant: Name of the pollutant. + :type pollutant: str + + :param sector: Name of the sector. + :type sector: str + + :param year: Year to calculate. + :type year: int + + :param air: Indicates if the input file is related with air emissions + :type air: bool + + :return: Data of the selected emission. + :rtype: numpy.array + """ + from netCDF4 import Dataset + from datetime import datetime + import cf_units + import numpy as np + + nc = Dataset(file_name, mode='r') + + time = nc.variables['time'] + + time_array = cf_units.num2date(time[:], time.units, time.calendar) + time_array = np.array([datetime(year=x.year, month=x.month, day=1) for x in time_array]) + + i_time = np.where(time_array == datetime(year=year, month=1, day=1))[0][0] + if air: + data = nc.variables['AIR'][i_time:i_time + 12, :, :, :] + elif pollutant in LIST_POLLUTANTS: + data = nc.variables['{0}_em_anthro'.format(pollutant)][i_time:i_time+12, sector_to_index(sector), :, :] + elif pollutant in VOC_POLLUTANTS: + data = nc.variables['{0}-{1}_em_speciated_VOC'.format( + pollutant, voc_to_vocname(pollutant).replace('-', '_'))][i_time:i_time+12, sector_to_index(sector), :, :] + else: + data = None + nc.close() + + return data + + +def get_global_attributes(file_name): + """ + Gets the global attributes of the input file. + + :param file_name: Path to the NetCDF file + :type file_name: str + + :return: Global attributes + :rtype: dict + """ + from netCDF4 import Dataset + + nc = Dataset(file_name, mode='r') + + atts_dict = {} + for name in nc.ncattrs(): + atts_dict[name] = nc.getncattr(name) + + nc.close() + return atts_dict + + +def do_transformation(year): + """ + Does the transformation for the selected year + + :param year: Year to calculate + :type year: int + """ + from datetime import datetime + from hermesv3_gr.tools.netcdf_tools import extract_vars, get_grid_area, write_netcdf + for pollutant in LIST_POLLUTANTS + VOC_POLLUTANTS: + file_name = get_input_name(pollutant, year) + if os.path.exists(file_name): + c_lats, c_lons, b_lats, b_lons = extract_vars(file_name, ['lat', 'lon', 'lat_bnds', 'lon_bnds']) + cell_area = get_grid_area(file_name) + + global_attributes = get_global_attributes(file_name) + for sector in LIST_SECTORS: + data = get_full_year_data(file_name, pollutant, sector, year) + + if pollutant == 'NOx': + pollutant_name = 'nox_no2' + else: + pollutant_name = pollutant.lower() + + file_path = os.path.join(OUTPUT_PATH, 'monthly_mean', '{0}_{1}'.format(pollutant_name, sector)) + if not os.path.exists(file_path): + os.makedirs(file_path) + + for month in xrange(1, 12 + 1, 1): + emission = { + 'name': pollutant_name, + 'units': 'kg.m-2.s-1', + 'data': data[month - 1, :, :].reshape((1,) + cell_area.shape) + } + write_netcdf( + os.path.join(file_path, '{0}_{1}{2}.nc'.format(pollutant_name, year, str(month).zfill(2))), + c_lats['data'], c_lons['data'], [emission], date=datetime(year=year, month=month, day=1), + boundary_latitudes=b_lats['data'], boundary_longitudes=b_lons['data'], cell_area=cell_area, + global_attributes=global_attributes) + else: + raise IOError('File not found {0}'.format(file_name)) + return True + + +def do_air_transformation(year): + """ + Does the transformations of the ari emissions for the selected year. + + :param year: Year to calculate + :type year: int + """ + from datetime import datetime + from hermesv3_gr.tools.netcdf_tools import extract_vars, get_grid_area, write_netcdf + + for pollutant in LIST_POLLUTANTS: + file_name = get_input_name(pollutant, year, air=True) + if os.path.exists(file_name): + c_lats, c_lons, b_lats, b_lons = extract_vars(file_name, ['lat', 'lon', 'lat_bnds', 'lon_bnds']) + cell_area = get_grid_area(file_name) + + global_attributes = get_global_attributes(file_name) + + data = get_full_year_data(file_name, pollutant, '', year, air=True) + + if pollutant == 'NOx': + pollutant_name = 'nox_no2' + else: + pollutant_name = pollutant.lower() + + for sector in ['air_lto', 'air_cds', 'air_crs']: + file_path = os.path.join(OUTPUT_PATH, 'monthly_mean', '{0}_{1}'.format(pollutant_name, sector)) + if not os.path.exists(file_path): + os.makedirs(file_path) + + if sector == 'air_lto': + data_aux = data[:, 0:1 + 1, :, :].sum(axis=1) + elif sector == 'air_cds': + data_aux = data[:, 2:14 + 1, :, :].sum(axis=1) + elif sector == 'air_crs': + data_aux = data[:, 15:24 + 1, :, :].sum(axis=1) + else: + print 'ERROR' + sys.exit(1) + + for month in xrange(1, 12 + 1, 1): + emission = { + 'name': pollutant_name, + 'units': 'kg.m-2.s-1', + 'data': data_aux[month - 1, :, :].reshape((1,) + cell_area.shape) + } + write_netcdf( + os.path.join(file_path, '{0}_{1}{2}.nc'.format(pollutant_name, year, str(month).zfill(2))), + c_lats['data'], c_lons['data'], [emission], date=datetime(year=year, month=month, day=1), + boundary_latitudes=b_lats['data'], boundary_longitudes=b_lons['data'], cell_area=cell_area, + global_attributes=global_attributes) + else: + raise IOError('File not found {0}'.format(file_name)) + return True + + +if __name__ == '__main__': + for y in LIST_YEARS: + # do_transformation(y) + if DO_AIR: + do_air_transformation(y) diff --git a/preproc/eclipsev5a_preproc.py b/preproc/eclipsev5a_preproc.py new file mode 100755 index 0000000000000000000000000000000000000000..9d28b7763b7d870f4c80497f572597ab73c6a3e0 --- /dev/null +++ b/preproc/eclipsev5a_preproc.py @@ -0,0 +1,401 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +from datetime import datetime +from netCDF4 import Dataset +import numpy as np +from cf_units import Unit + + +# ============== CONFIGURATION PARAMETERS ====================== +INPUT_PATH = '/esarchive/recon/iiasa/eclipsev5a/original_files' +OUTPUT_PATH = '/esarchive/recon/iiasa/eclipsev5a' +INPUT_NAME = 'ECLIPSE_base_CLE_V5a_.nc' +INPUT_NAME_FLARING = 'ECLIPSE_V5a_baseline_CLE_flaring.nc' +INPUT_NAME_SHIPS = "ship_CLE_emis_.nc" +MONTHLY_PATTERN_FILE = 'ECLIPSEv5_monthly_patterns.nc' +LIST_YEARS = [1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030, 2040, 2050] +LIST_POLLUTANTS = ['BC', 'CH4', 'CO', 'NH3', 'NOx', 'OC', 'OM', 'PM10', 'PM25', 'SO2', 'VOC'] +# ============================================================== + + +MONTH_FACTOR = 1000000. / (30. * 24. * 3600.) # To pass from kt/month to Kg/s +YEAR_FACTOR = 1000000. / (365. * 24. * 3600.) # To pass from kt/year to Kg/s +VAR_UNITS = 'kg.m-2.s-1' + + +def get_grid_area(filename): + """ + Calculate the area for each cell of the grid using CDO + + :param filename: Path to the file to calculate the cell area + :type filename: str + + :return: Area of each cell of the grid. + :rtype: numpy.array + """ + from cdo import Cdo + + cdo = Cdo() + src = cdo.gridarea(input=filename) + nc_aux = Dataset(src, mode='r') + grid_area = nc_aux.variables['cell_area'][:] + nc_aux.close() + + return grid_area + + +def create_bounds(coordinates, number_vertices=2): + """ + Calculate the vertices coordinates. + + :param coordinates: Coordinates in degrees (latitude or longitude) + :type coordinates: numpy.array + + :param number_vertices: Non mandatory parameter that informs the number of vertices that must have the boundaries. + (by default 2) + :type number_vertices: int + + :return: Array with as many elements as vertices for each value of coords. + :rtype: numpy.array + """ + interval = coordinates[1] - coordinates[0] + + coords_left = coordinates - interval / 2 + coords_right = coordinates + interval / 2 + if number_vertices == 2: + bound_coords = np.dstack((coords_left, coords_right)) + elif number_vertices == 4: + bound_coords = np.dstack((coords_left, coords_right, coords_right, coords_left)) + else: + raise ValueError('The number of vertices of the boudaries must be 2 or 4') + + return bound_coords + + +def write_netcdf(output_name_path, data_list, center_lats, center_lons, grid_cell_area, date): + # TODO Documentation + print output_name_path + # Creating NetCDF & Dimensions + nc_output = Dataset(output_name_path, mode='w', format="NETCDF4") + nc_output.createDimension('nv', 2) + nc_output.createDimension('lon', center_lons.shape[0]) + nc_output.createDimension('lat', center_lats.shape[0]) + nc_output.createDimension('time', None) + + # TIME + time = nc_output.createVariable('time', 'd', ('time',), zlib=True) + # time.units = "{0} since {1}".format(tstep_units, global_atts['Start_DateTime'].strftime('%Y-%m-%d %H:%M:%S')) + time.units = "hours since {0}".format(date.strftime('%Y-%m-%d %H:%M:%S')) + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = [0] + + # LATITUDE + lat = nc_output.createVariable('lat', 'f', ('lat',), zlib=True) + lat.bounds = "lat_bnds" + lat.units = "degrees_north" + lat.axis = "Y" + lat.long_name = "latitude" + lat.standard_name = "latitude" + lat[:] = center_lats + + lat_bnds = nc_output.createVariable('lat_bnds', 'f', ('lat', 'nv',), zlib=True) + lat_bnds[:] = create_bounds(center_lats) + + # LONGITUDE + lon = nc_output.createVariable('lon', 'f', ('lon',), zlib=True) + lon.bounds = "lon_bnds" + lon.units = "degrees_east" + lon.axis = "X" + lon.long_name = "longitude" + lon.standard_name = "longitude" + lon[:] = center_lons + + lon_bnds = nc_output.createVariable('lon_bnds', 'f', ('lon', 'nv',), zlib=True) + lon_bnds[:] = create_bounds(center_lons) + + for var in data_list: + # VARIABLE + nc_var = nc_output.createVariable(var['name'], 'f', ('time', 'lat', 'lon',), zlib=True) + nc_var.units = var['units'].symbol + nc_var.long_name = var['long_name'] + nc_var.coordinates = 'lat lon' + nc_var.grid_mapping = 'crs' + nc_var.cell_measures = 'area: cell_area' + nc_var[:] = var['data'] + + # CELL AREA + cell_area = nc_output.createVariable('cell_area', 'f', ('lat', 'lon',)) + cell_area.long_name = "area of the grid cell" + cell_area.standard_name = "area" + cell_area.units = "m2" + cell_area[:] = grid_cell_area + + # CRS + crs = nc_output.createVariable('crs', 'i') + crs.grid_mapping_name = "latitude_longitude" + crs.semi_major_axis = 6371000.0 + crs.inverse_flattening = 0 + + nc_output.setncattr('title', 'ECLIPSEv5a inventory') + nc_output.setncattr('Conventions', 'CF-1.6', ) + nc_output.setncattr('institution', 'IIASA', ) + nc_output.setncattr('source', 'IIASA', ) + nc_output.setncattr('history', 'Re-writing of the ECLIPSEv5a input to follow the CF-1.6 conventions;\n' + + '2017-11-28: Creating;\n') + nc_output.setncattr('web', 'http://www.iiasa.ac.at/web/home/research/researchPrograms/air/ECLIPSEv5a.html') + nc_output.setncattr('comment', 'Re-writing done by Carles Tena (carles.tena@bsc.es) from the BSC-CNS ' + + '(Barcelona Supercomputing Center)', ) + + nc_output.close() + + +def extract_sector_by_name(name): + # TODO Documentation + sector_dict = { + 'emis_agr': 'agriculture', + 'emis_awb': 'agriculture_waste', + 'emis_dom': 'residential', + 'emis_ene': 'energy', + 'emis_ind': 'industry', + 'emis_tra': 'transport', + 'emis_wst': 'waste', + 'emis_slv': 'solvent', + } + + try: + return_value = sector_dict[name] + except KeyError: + return_value = None + + return return_value + + +def extract_month_profile_by_sector(sector, month, pollutant=None): + # TODO Documentation + sector_dict = { + 'residential': 'dom', + 'energy': 'ene', + 'agriculture_waste': 'agr_awb', + 'agriculture': 'agr', + 'industry': 'ind', + 'transport': 'tra', + 'waste': 'other', + 'solvent': 'other', + } + if sector == '' and pollutant == 'NH3': + profile_name = 'agr_NH3' + else: + profile_name = sector_dict[sector] + + nc_profiles = Dataset(os.path.join(INPUT_PATH, MONTHLY_PATTERN_FILE), mode='r') + + profile = nc_profiles.variables[profile_name][month, :, :] + + nc_profiles.close() + profile = np.nan_to_num(profile) + # profile = profile.nan_to_num() + return profile + + +def get_output_name(pollutant, sector, year, month): + # TODO Docuemtnation + output_path_aux = os.path.join(OUTPUT_PATH, 'monthly_mean', '{0}_{1}'.format(pollutant, sector), ) + + if not(os.path.exists(output_path_aux)): + os.makedirs(output_path_aux) + return os.path.join(output_path_aux, '{0}_{1}.nc'.format( + pollutant, datetime(year=year, month=month, day=1).strftime('%Y%m'))) + + +def do_single_transformation(pollutant, sector, data, c_lats, c_lons, cell_area): + # TODO Docuemtnation + for i in xrange(len(LIST_YEARS)): + + for month in xrange(12): + # print i, list_years[i], month + 1 + if pollutant == 'NOx': + pollutant_name = 'nox_no2' + elif pollutant == 'VOC': + pollutant_name = 'nmvoc' + else: + pollutant_name = pollutant.lower() + output_name = get_output_name(pollutant_name.lower(), sector.lower(), LIST_YEARS[i], month + 1) + profile = extract_month_profile_by_sector(sector, month, pollutant) + data_aux = data[i, :, :] * profile + # print factor + data_aux = (data_aux * MONTH_FACTOR) / cell_area + # #data_aux = data_aux / cell_area + # print 'original: ', data[i, 192, 404] + # print 'factor: ', profile[192, 404] + # print 'destiny', data_aux[192, 404] + data_aux = data_aux.reshape((1,) + data_aux.shape) + data_list = [{ + 'name': pollutant_name, + 'long_name': pollutant_name, + 'data': data_aux, + 'units': Unit(VAR_UNITS), + }] + write_netcdf(output_name, data_list, c_lats, c_lons, cell_area, + datetime(year=LIST_YEARS[i], month=month + 1, day=1)) + + +def do_transformation(): + # TODO Documentation + for pollutant in LIST_POLLUTANTS: + file_name = os.path.join(INPUT_PATH, INPUT_NAME.replace('', pollutant)) + print file_name + nc = Dataset(file_name, mode='r') + c_lats = nc.variables['lat'][:] + c_lons = nc.variables['lon'][:] + cell_area = get_grid_area(file_name) + for var in nc.variables: + sector = extract_sector_by_name(var) + + if sector is not None: + do_single_transformation(pollutant, sector, nc.variables[var][:], c_lats, c_lons, cell_area) + nc.close() + + +def get_flaring_output_name(pollutant, sector, year): + # TODO Docuemtnation + output_path_aux = os.path.join(OUTPUT_PATH, 'yearly_mean', '{0}_{1}'.format(pollutant, sector), ) + + if not(os.path.exists(output_path_aux)): + os.makedirs(output_path_aux) + return os.path.join(output_path_aux, '{0}_{1}.nc'.format(pollutant, + datetime(year=year, month=1, day=1).strftime('%Y'))) + + +def get_flaring_var_name(nc_var): + # TODO Docuemtnation + nc_var_2_var = { + 'emis_SO2_flr': 'so2', + 'emis_NOx_flr': 'nox_no2', + 'emis_NH3_flr': 'nh3', + 'emis_VOC_flr': 'nmvoc', + 'emis_PM25_flr': 'pm25', + 'emis_BC_flr': 'bc', + 'emis_OC_flr': 'oc', + 'emis_PM10_flr': 'pm10', + 'emis_CO_flr': 'co', + 'emis_CH4_flr': 'ch4', + } + try: + return_value = nc_var_2_var[nc_var] + except KeyError: + return_value = None + return return_value + + +def do_flaring_transformation(): + # TODO Documentation + nc_in = Dataset(os.path.join(INPUT_PATH, INPUT_NAME_FLARING), mode='r') + c_lats = nc_in.variables['lat'][:] + c_lons = nc_in.variables['lon'][:] + cell_area = get_grid_area(os.path.join(INPUT_PATH, INPUT_NAME_FLARING)) + for var in nc_in.variables: + var_name = get_flaring_var_name(var) + if var_name is not None: + data = nc_in.variables[var][:] + data = np.nan_to_num(data) + for i in xrange(len(LIST_YEARS)): + output_name = get_flaring_output_name(var_name, 'flaring', LIST_YEARS[i]) + data_aux = data[i, :, :] + data_aux = (data_aux * YEAR_FACTOR) / cell_area + data_aux = data_aux.reshape((1,) + data_aux.shape) + data_list = [{ + 'name': var_name, + 'long_name': var_name, + 'data': data_aux, + 'units': Unit(VAR_UNITS), + }] + write_netcdf(output_name, data_list, c_lats, c_lons, cell_area, + datetime(year=LIST_YEARS[i], month=1, day=1)) + nc_in.close() + + +def get_ship_output_name(pollutant, sector, year): + # TODO Docuemntation + output_path_aux = os.path.join(OUTPUT_PATH, 'yearly_mean', '{0}_{1}'.format(pollutant, sector), ) + + if not(os.path.exists(output_path_aux)): + os.makedirs(output_path_aux) + return os.path.join(output_path_aux, '{0}_{1}.nc'.format(pollutant, + datetime(year=year, month=1, day=1).strftime('%Y'))) + + +def get_ship_var_name(nc_var): + # TODO Documentation + nc_var_2_var = { + 'SO2': 'so2', + 'NOx': 'nox_no2', + 'VOC': 'nmvoc', + 'PM25': 'pm25', + 'BC': 'bc', + 'OC': 'oc', + 'PM10': 'pm10', + 'CO': 'co', + 'CH4': 'ch4', + } + try: + return_value = nc_var_2_var[nc_var] + except KeyError: + return_value = None + return return_value + + +def do_ship_transformation(): + # TODO Documentation + for year in LIST_YEARS: + in_path = os.path.join(INPUT_PATH, INPUT_NAME_SHIPS.replace('', str(year))) + nc_in = Dataset(in_path, mode='r') + c_lats = nc_in.variables['lat'][:] + c_lons = nc_in.variables['lon'][:] + + cell_area = get_grid_area(in_path) + + for var in nc_in.variables: + var_name = get_ship_var_name(var) + if var_name is not None: + data = nc_in.variables[var][0, :, :] + data = np.nan_to_num(data) + + data = (data * YEAR_FACTOR) / cell_area + data = data.reshape((1,) + data.shape) + data_list = [{ + 'name': var_name, + 'long_name': var_name, + 'data': data, + 'units': Unit(VAR_UNITS), + }] + + write_netcdf(get_ship_output_name(var_name, 'ship', year), data_list, c_lats, c_lons, cell_area, + datetime(year=year, month=1, day=1)) + nc_in.close() + + +if __name__ == '__main__': + do_transformation() + do_flaring_transformation() + do_ship_transformation() diff --git a/preproc/edgarv432_ap_preproc.py b/preproc/edgarv432_ap_preproc.py new file mode 100755 index 0000000000000000000000000000000000000000..945781ed9defa22b3a8a07902c34b863d804b1d5 --- /dev/null +++ b/preproc/edgarv432_ap_preproc.py @@ -0,0 +1,405 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +from netCDF4 import Dataset +import numpy as np +from warnings import warn as warning + + +# ============== CONFIGURATION PARAMETERS ====================== +INPUT_PATH = '/esarchive/recon/jrc/edgarv432_ap/original_files/' +OUTPUT_PATH = '/esarchive/recon/jrc/edgarv432_ap' +# LIST_POLLUTANTS = ['BC', 'CO', 'NH3', 'NOx', 'OC', 'PM10', 'PM2.5_bio', 'PM2.5_fossil', 'SO2', 'NMVOC'] +LIST_POLLUTANTS = ['PM2.5_bio', 'PM2.5_fossil'] +# LIST_YEARS = [1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, +# 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, +# 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012] +LIST_YEARS = [2010] + +# To do yearly emissions +PROCESS_YEARLY = False +YEARLY_INPUT_NAME = 'yearly/v432___.0.1x0.1.nc' + +# To process monthly emissions, 2010 directly from monthly_input_name and other years calculated using bla bla bla +PROCESS_MONTHLY = True +MONTHLY_INPUT_NAME = 'monthly/v432__2010__.0.1x0.1.nc' +MONTHLY_PATTERN_FILE = 'temporal_profiles/v432_FM_.0.1x0.1.nc' +# ============================================================== + +""" +Main script to transform EDGARv4.3.2 AP global emission inventory to a NetCDF that follows the CF-1.6 conventions. + +This script also calculates the boundaries of teh cells and teh cell area. + +Carles Tena Medina (carles.tena@bsc.es) from Barcelona Supercomputing Center (BSC-CNS). +""" + + +def ipcc_to_sector_dict(): + # TODO Documentation + ipcc_sector_dict = { + "IPCC_1A1a": "ENE", + "IPCC_1A1b_1A1c_1A5b1_1B1b_1B2a5_1B2a6_1B2b5_2C1b": "REF_TRF", + "IPCC_1A2": "IND", + "IPCC_1A3a_CDS": "TNR_Aviation_CDS", + "IPCC_1A3a_CRS": "TNR_Aviation_CRS", + "IPCC_1A3a_LTO": "TNR_Aviation_LTO", + "IPCC_1A3b": "TRO", + "IPCC_1A3c_1A3e": "TNR_Other", + "IPCC_1A3d_1C2": "TNR_Ship", + "IPCC_1A4": "RCO", + "IPCC_1B1a_1B2a1_1B2a2_1B2a3_1B2a4_1B2c": "PRO", + "IPCC_2A": "NMM", + "IPCC_2B": "CHE", + "IPCC_2C1a_2C1c_2C1d_2C1e_2C1f_2C2": "IRO", + "IPCC_2C3_2C4_2C5": "NFE", + "IPCC_2D": "FOO_PAP", + "IPCC_2G": "NEU", + "IPCC_3": "PRU_SOL", + "IPCC_4B": "MNM", + "IPCC_4C_4D1_4D2_4D4": "AGS", + "IPCC_4F": "AWB", + "IPCC_6A_6D": "SWD_LDF", + "IPCC_6B": "WWT", + "IPCC_6C": "SWD_INC", + "IPCC_7A": "FFF" + } + + return ipcc_sector_dict + + +def create_bounds(coordinates, number_vertices=2): + """ + Calculate the vertices coordinates. + + :param coordinates: Coordinates in degrees (latitude or longitude) + :type coordinates: numpy.array + + :param number_vertices: Non mandatory parameter that informs the number of vertices that must have the boundaries. + (by default 2) + :type number_vertices: int + + :return: Array with as many elements as vertices for each value of coords. + :rtype: numpy.array + """ + interval = coordinates[1] - coordinates[0] + + coords_left = coordinates - interval / 2 + coords_right = coordinates + interval / 2 + if number_vertices == 2: + bound_coords = np.dstack((coords_left, coords_right)) + elif number_vertices == 4: + bound_coords = np.dstack((coords_left, coords_right, coords_right, coords_left)) + else: + raise ValueError('The number of vertices of the boudaries must be 2 or 4') + + return bound_coords + + +def get_grid_area(filename): + """ + Calculate the area for each cell of the grid using CDO + + :param filename: Path to the file to calculate the cell area + :type filename: str + + :return: Area of each cell of the grid. + :rtype: numpy.array + """ + from cdo import Cdo + from netCDF4 import Dataset + + cdo = Cdo() + s = cdo.gridarea(input=filename) + nc_aux = Dataset(s, mode='r') + grid_area = nc_aux.variables['cell_area'][:] + nc_aux.close() + + return grid_area + + +def write_netcdf(output_name_path, data, data_atts, center_lats, center_lons, grid_cell_area, year, sector, + month=None): + # TODO Documentation + # Creating NetCDF & Dimensions + print output_name_path + nc_output = Dataset(output_name_path, mode='w', format="NETCDF4") + nc_output.createDimension('nv', 2) + nc_output.createDimension('lon', center_lons.shape[0]) + nc_output.createDimension('lat', center_lats.shape[0]) + nc_output.createDimension('time', None) + + # TIME + time = nc_output.createVariable('time', 'd', ('time',), zlib=True) + # time.units = "{0} since {1}".format(tstep_units, global_atts['Start_DateTime'].strftime('%Y-%m-%d %H:%M:%S')) + if month is None: + time.units = "years since {0}-01-01 00:00:00".format(year) + else: + time.units = "months since {0}-{1}-01 00:00:00".format(year, str(month).zfill(2)) + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = [0] + + # LATITUDE + lat = nc_output.createVariable('lat', 'f', ('lat',), zlib=True) + lat.bounds = "lat_bnds" + lat.units = "degrees_north" + lat.axis = "Y" + lat.long_name = "latitude" + lat.standard_name = "latitude" + lat[:] = center_lats + + lat_bnds = nc_output.createVariable('lat_bnds', 'f', ('lat', 'nv',), zlib=True) + lat_bnds[:] = create_bounds(center_lats) + + # LONGITUDE + lon = nc_output.createVariable('lon', 'f', ('lon',), zlib=True) + lon.bounds = "lon_bnds" + lon.units = "degrees_east" + lon.axis = "X" + lon.long_name = "longitude" + lon.standard_name = "longitude" + lon[:] = center_lons + + lon_bnds = nc_output.createVariable('lon_bnds', 'f', ('lon', 'nv',), zlib=True) + lon_bnds[:] = create_bounds(center_lons) + + # VARIABLE + nc_var = nc_output.createVariable(data_atts['long_name'], 'f', ('time', 'lat', 'lon',), zlib=True) + nc_var.units = data_atts['units'] + nc_var.long_name = data_atts['long_name'] + nc_var.coordinates = data_atts['coordinates'] + nc_var.grid_mapping = data_atts['grid_mapping'] + nc_var.cell_measures = 'area: cell_area' + nc_var[:] = data.reshape((1,) + data.shape) + + # CELL AREA + cell_area = nc_output.createVariable('cell_area', 'f', ('lat', 'lon',)) + cell_area.long_name = "area of the grid cell" + cell_area.standard_name = "area" + cell_area.units = "m2" + cell_area[:] = grid_cell_area + + # CRS + crs = nc_output.createVariable('crs', 'i') + crs.grid_mapping_name = "latitude_longitude" + crs.semi_major_axis = 6371000.0 + crs.inverse_flattening = 0 + + nc_output.setncattr('title', 'EDGARv4.3.2_AP inventory for the sector {0} and pollutant {1}'.format( + sector, data_atts['long_name']), ) + nc_output.setncattr('Conventions', 'CF-1.6', ) + nc_output.setncattr('institution', 'JRC', ) + nc_output.setncattr('source', 'EDGARv4.3.2_AP', ) + nc_output.setncattr('history', 'Re-writing of the EDGAR input to follow the CF 1.6 conventions;\n' + + '2017-03-22: Added time dimension (UNLIMITED);\n' + + '2017-03-22: Added boundaries;\n' + + '2017-03-24: Added global attributes;\n' + + '2017-03-24: Re-naming pollutant;\n' + + '2017-04-03: Added cell_area variable;\n') + nc_output.setncattr('references', 'web: http://edgar.jrc.ec.europa.eu/overview.php?v=432\n' + + ' doi:https://data.europa.eu/doi/10.2904/JRC_DATASET_EDGAR', ) + nc_output.setncattr('comment', 'Re-writing done by Carles Tena (carles.tena@bsc.es) from the BSC-CNS ' + + '(Barcelona Supercomputing Center)', ) + + nc_output.close() + + +def do_yearly_transformation(year): + # TODO Documentation + for pollutant in LIST_POLLUTANTS: + for ipcc in ipcc_to_sector_dict().keys(): + file_path = os.path.join( + INPUT_PATH, + YEARLY_INPUT_NAME.replace('', pollutant).replace('', str(year)).replace('', + ipcc)) + + if os.path.exists(file_path): + grid_area = get_grid_area(file_path) + print file_path + nc_in = Dataset(file_path, mode='r') + + if pollutant in ['PM2.5_bio', 'PM2.5_fossil']: + in_pollutant = pollutant + pollutant = 'PM2.5' + else: + in_pollutant = None + + data = nc_in.variables['emi_{0}'.format(pollutant.lower())][:] + + data = np.array(data) + + # Reading lat, lon + lats = nc_in.variables['lat'][:] + lons = nc_in.variables['lon'][:] + nc_in.close() + + sector = ipcc_to_sector_dict()[ipcc] + if pollutant == 'PM2.5': + pollutant = in_pollutant.replace('.', '') + elif pollutant == 'NOx': + pollutant = 'nox_no2' + + data_attributes = {'long_name': pollutant.lower(), + 'units': 'kg.m-2.s-1', + 'coordinates': 'lat lon', + 'grid_mapping': 'crs'} + + out_path_aux = os.path.join(OUTPUT_PATH, 'yearly_mean', pollutant.lower() + '_' + sector.lower()) + if not os.path.exists(out_path_aux): + os.makedirs(out_path_aux) + write_netcdf(os.path.join(out_path_aux, '{0}_{1}.nc'.format(pollutant.lower(), year)), + data, data_attributes, lats, lons, grid_area, year, sector.lower()) + + else: + warning("The pollutant {0} for the IPCC sector {1} does not exist.\n File not found: {2}".format( + pollutant, ipcc, file_path)) + return True + + +def do_monthly_transformation(year): + # TODO Documentation + for pollutant in LIST_POLLUTANTS: + for ipcc in ipcc_to_sector_dict().keys(): + file_path = os.path.join( + INPUT_PATH, + YEARLY_INPUT_NAME.replace('', pollutant).replace('', str(year)).replace('', + ipcc)) + + if os.path.exists(file_path): + grid_area = get_grid_area(file_path) + print file_path + nc_in = Dataset(file_path, mode='r') + + if pollutant in ['PM2.5_bio', 'PM2.5_fossil']: + in_pollutant = pollutant + pollutant = 'PM2.5' + else: + in_pollutant = None + + data = nc_in.variables['emi_{0}'.format(pollutant.lower())][:] + + data = np.array(data) + + # Reading lat, lon + lats = nc_in.variables['lat'][:] + lons = nc_in.variables['lon'][:] + nc_in.close() + + sector = ipcc_to_sector_dict()[ipcc] + if pollutant == 'PM2.5': + pollutant = in_pollutant.replace('.', '') + elif pollutant == 'NOx': + pollutant = 'nox_no2' + + data_attributes = {'long_name': pollutant.lower(), + 'units': 'kg.m-2.s-1', + 'coordinates': 'lat lon', + 'grid_mapping': 'crs'} + + out_path_aux = os.path.join(OUTPUT_PATH, 'monthly_mean', pollutant.lower() + '_' + sector.lower()) + if not os.path.exists(out_path_aux): + os.makedirs(out_path_aux) + + nc_month_factors = Dataset(os.path.join(INPUT_PATH, MONTHLY_PATTERN_FILE.replace('', sector))) + month_factors = nc_month_factors.variables[sector][:] + for month in xrange(1, 12 + 1, 1): + data_aux = data * month_factors[month - 1, :, :] + write_netcdf(os.path.join(out_path_aux, '{0}_{1}{2}.nc'.format(pollutant.lower(), year, + str(month).zfill(2))), + data_aux, data_attributes, lats, lons, grid_area, year, sector.lower()) + + else: + warning( + "The pollutant {0} for the IPCC sector {1} does not exist.\n File not found: {2}".format( + pollutant, ipcc, file_path)) + return True + + +def do_2010_monthly_transformation(): + # TODO Documentation + for pollutant in LIST_POLLUTANTS: + for ipcc in ipcc_to_sector_dict().keys(): + for month in xrange(1, 12 + 1, 1): + file_path = os.path.join( + INPUT_PATH, + MONTHLY_INPUT_NAME.replace('', pollutant).replace('', + str(month)).replace('', ipcc)) + + if os.path.exists(file_path): + grid_area = get_grid_area(file_path) + print file_path + nc_in = Dataset(file_path, mode='r') + # print pollutant + # print pollutant in ['PM2.5_bio', 'PM2.5_fossil'] + if pollutant in ['PM2.5_bio', 'PM2.5_fossil']: + aux_pollutant = pollutant.replace('.', '') + in_pollutant = 'PM2.5' + else: + in_pollutant = pollutant + aux_pollutant = pollutant + + data = nc_in.variables['emi_{0}'.format(in_pollutant.lower())][:] + + data = np.array(data) + + # Reading lat, lon + lats = nc_in.variables['lat'][:] + lons = nc_in.variables['lon'][:] + nc_in.close() + + sector = ipcc_to_sector_dict()[ipcc] + if aux_pollutant in ['PM2.5_bio', 'PM2.5_fossil']: + aux_pollutant = aux_pollutant.replace('.', '') + elif aux_pollutant == 'NOx': + aux_pollutant = 'nox_no2' + + data_attributes = {'long_name': aux_pollutant.lower(), + 'units': 'kg.m-2.s-1', + 'coordinates': 'lat lon', + 'grid_mapping': 'crs'} + + out_path_aux = os.path.join(OUTPUT_PATH, 'monthly_mean', aux_pollutant.lower() + '_' + sector.lower()) + if not os.path.exists(out_path_aux): + os.makedirs(out_path_aux) + write_netcdf(os.path.join(out_path_aux, '{0}_{1}{2}.nc'.format(aux_pollutant.lower(), 2010, + str(month).zfill(2))), + data, data_attributes, lats, lons, grid_area, 2010, sector.lower()) + + else: + warning("The pollutant {0} for the IPCC sector {1} does not exist.\n File not found: {2}".format( + pollutant, ipcc, file_path)) + return True + + +if __name__ == '__main__': + + if PROCESS_YEARLY: + for y in LIST_YEARS: + do_yearly_transformation(y) + + if PROCESS_MONTHLY: + for y in LIST_YEARS: + if y == 2010: + do_2010_monthly_transformation() + else: + do_monthly_transformation(y) diff --git a/preproc/edgarv432_voc_preproc.py b/preproc/edgarv432_voc_preproc.py new file mode 100755 index 0000000000000000000000000000000000000000..8f8e6ccbc57115dc007fb99c821045a818e231be --- /dev/null +++ b/preproc/edgarv432_voc_preproc.py @@ -0,0 +1,385 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +from netCDF4 import Dataset +import numpy as np +from warnings import warn as warning + + +# ============== CONFIGURATION PARAMETERS ====================== +INPUT_PATH = '/esarchive/recon/jrc/edgarv432_voc/original_files/' +OUTPUT_PATH = '/esarchive/recon/jrc/edgarv432_voc' +LIST_POLLUTANTS = ['voc1', 'voc2', 'voc3', 'voc4', 'voc5', 'voc6', 'voc7', 'voc8', 'voc9', 'voc10', 'voc11', 'voc12', + 'voc13', 'voc14', 'voc15', 'voc16', 'voc17', 'voc18', 'voc19', 'voc20', 'voc21', 'voc22', 'voc23', + 'voc24', 'voc25'] +# list_years = [1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, +# 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, +# 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012] +LIST_YEARS = [2010] + +# To do yearly emissions +PROCESS_YEARLY = True +YEARLY_INPUT_NAME = 'yearly/v432_VOC_spec___.0.1x0.1.nc' + +# To process monthly emissions, 2010 directly from monthly_input_name and other years calculated using bla bla bla +PROCESS_MONTHLY = False +MONTHLY_INPUT_NAME = 'monthly/v432_VOC_spec__2010__.0.1x0.1.nc' +MONTHLY_PATTERN_FILE = 'temporal_profiles/v432_FM_.0.1x0.1.nc' +# ============================================================== + +""" +Main script to transform EDGARv4.3.2 AP global emission inventory to a NetCDF that follows the CF-1.6 conventions. + +This script also calculates the boundaries of teh cells and teh cell area. + +Carles Tena Medina (carles.tena@bsc.es) from Barcelona Supercomputing Center (BSC-CNS). +""" + + +def create_bounds(coordinates, number_vertices=2): + """ + Calculate the vertices coordinates. + + :param coordinates: Coordinates in degrees (latitude or longitude) + :type coordinates: numpy.array + + :param number_vertices: Non mandatory parameter that informs the number of vertices that must have the boundaries. + (by default 2) + :type number_vertices: int + + :return: Array with as many elements as vertices for each value of coords. + :rtype: numpy.array + """ + interval = coordinates[1] - coordinates[0] + + coords_left = coordinates - interval / 2 + coords_right = coordinates + interval / 2 + if number_vertices == 2: + bound_coords = np.dstack((coords_left, coords_right)) + elif number_vertices == 4: + bound_coords = np.dstack((coords_left, coords_right, coords_right, coords_left)) + else: + raise ValueError('The number of vertices of the boudaries must be 2 or 4') + + return bound_coords + + +def get_grid_area(filename): + """ + Calculate the area for each cell of the grid using CDO + + :param filename: Path to the file to calculate the cell area + :type filename: str + + :return: Area of each cell of the grid. + :rtype: numpy.array + """ + from cdo import Cdo + from netCDF4 import Dataset + + cdo = Cdo() + s = cdo.gridarea(input=filename) + nc_aux = Dataset(s, mode='r') + grid_area = nc_aux.variables['cell_area'][:] + nc_aux.close() + + return grid_area + + +def ipcc_to_sector_dict(): + # TODO Documentation + ipcc_sector_dict = { + "IPCC_4F": "AWB", + "IPCC_1A1": "ENE", + "IPCC_7A": "FFF", + "IPCC_1A2": "IND", + "IPCC_2_3": "PPA", + "IPCC_1B1a_1B2a1_1B2a2_1B2a3_1B2a4_1B2c": "PRO", + "IPCC_1A4": "RCO", + "IPCC_1A1b_1B2a5": "REF", + "IPCC_6": "SWD", + "IPCC_1A3a_CDS": "TNR_Aviation_CDS", + "IPCC_1A3a_CRS": "TNR_Aviation_CRS", + "IPCC_1A3a_LTO": "TNR_Aviation_LTO", + "IPCC_1A3c_1A3e": "TNR_Other", + "IPCC_1A3d_1C2": "TNR_Ship", + "IPCC_1A1c_1A5b1_1B1b_1B2a6_1B2b5_2C1b": "TRF", + "IPCC_1A3b": "TRO" + } + + return ipcc_sector_dict + + +def write_netcdf(output_name_path, data, data_atts, center_lats, center_lons, grid_cell_area, year, sector, + month=None): + # TODO Documentation + # Creating NetCDF & Dimensions + print output_name_path + nc_output = Dataset(output_name_path, mode='w', format="NETCDF4") + nc_output.createDimension('nv', 2) + nc_output.createDimension('lon', center_lons.shape[0]) + nc_output.createDimension('lat', center_lats.shape[0]) + nc_output.createDimension('time', None) + + # TIME + time = nc_output.createVariable('time', 'd', ('time',), zlib=True) + # time.units = "{0} since {1}".format(tstep_units, global_atts['Start_DateTime'].strftime('%Y-%m-%d %H:%M:%S')) + if month is None: + time.units = "years since {0}-01-01 00:00:00".format(year) + else: + time.units = "months since {0}-{1}-01 00:00:00".format(year, str(month).zfill(2)) + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = [0] + + # LATITUDE + lat = nc_output.createVariable('lat', 'f', ('lat',), zlib=True) + lat.bounds = "lat_bnds" + lat.units = "degrees_north" + lat.axis = "Y" + lat.long_name = "latitude" + lat.standard_name = "latitude" + lat[:] = center_lats + + lat_bnds = nc_output.createVariable('lat_bnds', 'f', ('lat', 'nv',), zlib=True) + lat_bnds[:] = create_bounds(center_lats) + + # LONGITUDE + lon = nc_output.createVariable('lon', 'f', ('lon',), zlib=True) + lon.bounds = "lon_bnds" + lon.units = "degrees_east" + lon.axis = "X" + lon.long_name = "longitude" + lon.standard_name = "longitude" + lon[:] = center_lons + + lon_bnds = nc_output.createVariable('lon_bnds', 'f', ('lon', 'nv',), zlib=True) + lon_bnds[:] = create_bounds(center_lons) + + # VARIABLE + nc_var = nc_output.createVariable(data_atts['long_name'], 'f', ('time', 'lat', 'lon',), zlib=True) + nc_var.units = data_atts['units'] + nc_var.long_name = data_atts['long_name'] + nc_var.coordinates = data_atts['coordiantes'] + nc_var.grid_mapping = data_atts['grid_mapping'] + nc_var.cell_measures = 'area: cell_area' + nc_var[:] = data.reshape((1,) + data.shape) + + # CELL AREA + cell_area = nc_output.createVariable('cell_area', 'f', ('lat', 'lon',)) + cell_area.long_name = "area of the grid cell" + cell_area.standard_name = "area" + cell_area.units = "m2" + cell_area[:] = grid_cell_area + + # CRS + crs = nc_output.createVariable('crs', 'i') + crs.grid_mapping_name = "latitude_longitude" + crs.semi_major_axis = 6371000.0 + crs.inverse_flattening = 0 + + nc_output.setncattr('title', 'EDGARv4.3.2_AP inventory for the sector {0} and pollutant {1}'.format( + sector, data_atts['long_name']), ) + nc_output.setncattr('Conventions', 'CF-1.6', ) + nc_output.setncattr('institution', 'JRC', ) + nc_output.setncattr('source', 'EDGARv4.3.2_AP', ) + nc_output.setncattr('history', 'Re-writing of the EDGAR input to follow the CF 1.6 conventions;\n' + + '2017-03-22: Added time dimension (UNLIMITED);\n' + + '2017-03-22: Added boundaries;\n' + + '2017-03-24: Added global attributes;\n' + + '2017-03-24: Re-naming pollutant;\n' + + '2017-04-03: Added cell_area variable;\n') + + nc_output.setncattr('references', 'web: http://edgar.jrc.ec.europa.eu/overview.php?v=432_VOC_spec\n' + + 'publication: Huang et al. (ACP, 2017)', ) + nc_output.setncattr('comment', 'Re-writing done by Carles Tena (carles.tena@bsc.es) from the BSC-CNS ' + + '(Barcelona Supercomputing Center)', ) + + nc_output.close() + + return True + + +def do_yearly_transformation(year): + # TODO Documentation + print year + for pollutant in LIST_POLLUTANTS: + for ipcc in ipcc_to_sector_dict().keys(): + file_path = os.path.join( + INPUT_PATH, + YEARLY_INPUT_NAME.replace('', pollutant).replace('', str(year)).replace('', + ipcc)) + + if os.path.exists(file_path): + grid_area = get_grid_area(file_path) + print file_path + nc_in = Dataset(file_path, mode='r') + + data = nc_in.variables['emi_{0}'.format(pollutant.lower())][:] + + data = np.array(data) + + # Reading lat, lon + lats = nc_in.variables['lat'][:] + lons = nc_in.variables['lon'][:] + nc_in.close() + + sector = ipcc_to_sector_dict()[ipcc] + if pollutant in ['voc{0}'.format(x) for x in xrange(1, 9 + 1, 1)]: + pollutant_aux = pollutant.replace('voc', 'voc0') + else: + pollutant_aux = pollutant + + data_attributes = {'long_name': pollutant_aux.lower(), + 'units': 'kg.m-2.s-1', + 'coordinates': 'lat lon', + 'grid_mapping': 'crs'} + out_path_aux = os.path.join(OUTPUT_PATH, 'yearly_mean', pollutant_aux.lower() + '_' + sector.lower()) + if not os.path.exists(out_path_aux): + os.makedirs(out_path_aux) + # print os.path.join(out_path_aux, '{0}_{1}.nc'.format(pollutant_aux.lower(), year)) + write_netcdf(os.path.join(out_path_aux, '{0}_{1}.nc'.format(pollutant_aux.lower(), year)), + data, data_attributes, lats, lons, grid_area, year, sector.lower()) + + else: + warning("The pollutant {0} for the IPCC sector {1} does not exist.\n File not found: {2}".format( + pollutant, ipcc, file_path)) + return True + + +def do_monthly_transformation(year): + # TODO Documentation + print year + for pollutant in LIST_POLLUTANTS: + for ipcc in ipcc_to_sector_dict().keys(): + file_path = os.path.join( + INPUT_PATH, + YEARLY_INPUT_NAME.replace('', pollutant).replace('', str(year)).replace('', + ipcc)) + + if os.path.exists(file_path): + grid_area = get_grid_area(file_path) + print file_path + nc_in = Dataset(file_path, mode='r') + + data = nc_in.variables['emi_{0}'.format(pollutant.lower())][:] + + data = np.array(data) + + # Reading lat, lon + lats = nc_in.variables['lat'][:] + lons = nc_in.variables['lon'][:] + nc_in.close() + + sector = ipcc_to_sector_dict()[ipcc] + + if pollutant in ['voc{0}'.format(x) for x in xrange(1, 9 + 1, 1)]: + pollutant_aux = pollutant.replace('voc', 'voc0') + else: + pollutant_aux = pollutant + + data_attributes = {'long_name': pollutant_aux.lower(), + 'units': 'kg.m-2.s-1', + 'coordinates': 'lat lon', + 'grid_mapping': 'crs'} + + out_path_aux = os.path.join(OUTPUT_PATH, 'monthly_mean', pollutant_aux.lower() + '_' + sector.lower()) + if not os.path.exists(out_path_aux): + os.makedirs(out_path_aux) + + nc_month_factors = Dataset(os.path.join(INPUT_PATH, MONTHLY_PATTERN_FILE.replace('', sector))) + month_factors = nc_month_factors.variables[sector][:] + for month in xrange(1, 12 + 1, 1): + data_aux = data * month_factors[month - 1, :, :] + write_netcdf(os.path.join(out_path_aux, '{0}_{1}{2}.nc'.format( + pollutant_aux.lower(), year, str(month).zfill(2))), + data_aux, data_attributes, lats, lons, grid_area, year, sector.lower()) + + else: + warning( + "The pollutant {0} for the IPCC sector {1} does not exist.\n File not found: {2}".format( + pollutant, ipcc, file_path)) + return True + + +def do_2010_monthly_transformation(): + # TODO Documentation + for pollutant in LIST_POLLUTANTS: + for ipcc in ipcc_to_sector_dict().keys(): + for month in xrange(1, 12 + 1, 1): + file_path = os.path.join( + INPUT_PATH, + MONTHLY_INPUT_NAME.replace('', pollutant).replace('', + str(month)).replace('', ipcc)) + + if os.path.exists(file_path): + grid_area = get_grid_area(file_path) + print file_path + nc_in = Dataset(file_path, mode='r') + + data = nc_in.variables['emi_{0}'.format(pollutant.lower())][:] + + data = np.array(data) + + # Reading lat, lon + lats = nc_in.variables['lat'][:] + lons = nc_in.variables['lon'][:] + nc_in.close() + + sector = ipcc_to_sector_dict()[ipcc] + + if pollutant in ['voc{0}'.format(x) for x in xrange(1, 9 + 1, 1)]: + pollutant_aux = pollutant.replace('voc', 'voc0') + else: + pollutant_aux = pollutant + + data_attributes = {'long_name': pollutant_aux.lower(), + 'units': 'kg.m-2.s-1', + 'coordinates': 'lat lon', + 'grid_mapping': 'crs'} + + out_path_aux = os.path.join( + OUTPUT_PATH, 'monthly_mean', pollutant_aux.lower() + '_' + sector.lower()) + if not os.path.exists(out_path_aux): + os.makedirs(out_path_aux) + write_netcdf(os.path.join(out_path_aux, '{0}_{1}{2}.nc'.format( + pollutant_aux.lower(), 2010, str(month).zfill(2))), + data, data_attributes, lats, lons, grid_area, 2010, sector.lower()) + + else: + warning("The pollutant {0} for the IPCC sector {1} does not exist.\n File not found: {2}".format( + pollutant, ipcc, file_path)) + return True + + +if __name__ == '__main__': + + if PROCESS_YEARLY: + for y in LIST_YEARS: + do_yearly_transformation(y) + + if PROCESS_MONTHLY: + for y in LIST_YEARS: + if y == 2010: + do_2010_monthly_transformation() + else: + do_monthly_transformation(y) diff --git a/preproc/emep_preproc.py b/preproc/emep_preproc.py new file mode 100755 index 0000000000000000000000000000000000000000..6df1d379fe453828f07f18cab1bc72bb1dadaa9b --- /dev/null +++ b/preproc/emep_preproc.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +from warnings import warn as warning +from datetime import datetime + + +# ============== CONFIGURATION PARAMETERS ====================== +INPUT_PATH = '/esarchive/recon/ceip/emepv18/original_files' +OUTPUT_PATH = '/esarchive/recon/ceip/emepv18/yearly_mean' +INPUT_NAME = '__2018_GRID_.txt' +# list_years = [2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016] +LIST_YEARS = [2015] +LIST_POLLUTANTS = ['NOx', 'NMVOC', 'SOx', 'NH3', 'PM2_5', 'PM10', 'CO'] +# ============================================================== + + +def correct_input_error(df): + # TODO Documentation + df.loc[df['LATITUDE'] == 36.14, 'LATITUDE'] = 36.15 + df.loc[df['LONGITUDE'] == 29.58, 'LONGITUDE'] = 29.55 + + return df + + +def get_sectors(): + # TODO Documentation + return ['A_PublicPower', 'B_Industry', 'C_OtherStationaryComb', 'D_Fugitive', 'E_Solvents', 'F_RoadTransport', + 'G_Shipping', 'H_Aviation', 'I_Offroad', 'J_Waste', 'K_AgriLivestock', 'L_AgriOther'] + + +def calculate_grid_definition(in_path): + # TODO Documentation + import pandas as pd + import numpy as np + + df = pd.read_table(in_path, sep=';', skiprows=[0, 1, 2, 3], names=[ + 'ISO2', 'YEAR', 'SECTOR', 'POLLUTANT', 'LONGITUDE', 'LATITUDE', 'UNIT', 'EMISSION']) + + df = correct_input_error(df) + # Longitudes + lons = np.sort(np.unique(df.LONGITUDE)) + lons_interval = lons[1:] - lons[:-1] + print 'Lon min: {0}; Lon max: {1}; Lon inc: {2}; Lon num: {3}'.format( + df.LONGITUDE.min(), df.LONGITUDE.max(), lons_interval.min(), len(lons)) + + # Latitudes + lats = np.sort(np.unique(df.LATITUDE)) + lats_interval = lats[1:] - lats[:-1] + print 'Lat min: {0}; Lat max: {1}; Lat inc: {2}; Lat num: {3}'.format( + df.LATITUDE.min(), df.LATITUDE.max(), lats_interval.min(), len(lats)) + + lats = np.arange(-90 + lats_interval.min()/2, 90, lats_interval.min(), dtype=np.float64) + lons = np.arange(-180 + lons_interval.min()/2, 180, lons_interval.min(), dtype=np.float64) + + return lats, lons, lats_interval.min(), lons_interval.min() + + +def do_transformation(year): + # TODO Documentation + from hermesv3_gr.tools.netcdf_tools import write_netcdf, get_grid_area + from hermesv3_gr.tools.coordinates_tools import create_bounds + import pandas as pd + import numpy as np + + unit_factor = 1000./(365.*24.*3600.) # From Mg/year to Kg/s + + for pollutant in LIST_POLLUTANTS: + for sector in get_sectors(): + in_file = os.path.join( + INPUT_PATH, + INPUT_NAME.replace('', str(year)).replace('', sector).replace('', pollutant)) + + if os.path.exists(in_file): + print in_file + c_lats, c_lons, lat_interval, lon_interval = calculate_grid_definition(in_file) + b_lats = create_bounds(c_lats, number_vertices=2) + b_lons = create_bounds(c_lons, number_vertices=2) + name = pollutant.lower() + if name == 'nox': + name = 'nox_no2' + elif name == 'pm2_5': + name = 'pm25' + elif name == 'voc': + name = 'nmvoc' + + element = { + 'name': name, + 'units': 'kg.m-2.s-1', + 'data': np.zeros((len(c_lats), len(c_lons))) + } + + df = pd.read_table( + in_file, sep=';', skiprows=[0, 1, 2, 3], + names=['ISO2', 'YEAR', 'SECTOR', 'POLLUTANT', 'LONGITUDE', 'LATITUDE', 'UNIT', 'EMISSION']) + + df = correct_input_error(df) + + df['row_lat'] = np.array((df.LATITUDE - (-90 + lat_interval/2)) / lat_interval, dtype=np.int32) + df['col_lon'] = np.array((df.LONGITUDE - (-180 + lon_interval/2)) / lon_interval, dtype=np.int32) + + df = df.groupby(['row_lat', 'col_lon']).sum().reset_index() + + element['data'][df.row_lat, df.col_lon] += df['EMISSION'] + + element['data'] = element['data'].reshape((1,) + element['data'].shape) + + complete_output_dir = os.path.join(OUTPUT_PATH, '{0}_{1}'.format(element['name'], sector.lower())) + if not os.path.exists(complete_output_dir): + os.makedirs(complete_output_dir) + complete_output_dir = os.path.join(complete_output_dir, '{0}_{1}.nc'.format(element['name'], year)) + + write_netcdf(complete_output_dir, c_lats, c_lons, [element], date=datetime(year, month=1, day=1), + boundary_latitudes=b_lats, boundary_longitudes=b_lons) + cell_area = get_grid_area(complete_output_dir) + element['data'] = element['data'] * unit_factor / cell_area + write_netcdf( + complete_output_dir, c_lats, c_lons, [element], date=datetime(year, month=1, day=1), + boundary_latitudes=b_lats, boundary_longitudes=b_lons, cell_area=cell_area, + global_attributes={ + 'references': "web: http://www.ceip.at/ms/ceip_home1/ceip_home/webdab_emepdatabase/" + + "emissions_emepmodels/", + 'comment': 'Re-writing done by Carles Tena (carles.tena@bsc.es) from the BSC-CNS ' + + '(Barcelona Supercomputing Center)' + }) + else: + warning("The pollutant {0} for the GNFR14 sector {1} does not exist.\n File not found: {2}".format( + pollutant, sector, in_file)) + return True + + +if __name__ == '__main__': + for y in LIST_YEARS: + do_transformation(y) diff --git a/preproc/gfas12_preproc.py b/preproc/gfas12_preproc.py new file mode 100755 index 0000000000000000000000000000000000000000..6dd4a62bf2a9393c402f97d0e2ff23d7cddc0137 --- /dev/null +++ b/preproc/gfas12_preproc.py @@ -0,0 +1,274 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +from netCDF4 import Dataset +import cf_units +import pandas as pd +import datetime +from datetime import datetime, timedelta + +# ============== CONFIGURATION PARAMETERS ====================== +INPUT_PATH = '/esarchive/recon/ecmwf/gfas/original_files/ga_mc_sfc_gfas_ecmf/' +INPUT_NAME = 'ga_.grb' +OUTPUT_PATH = '/esarchive/recon/ecmwf/gfas' + +STARTING_DATE = datetime(year=2018, month=8, day=29) +ENDIND_DATE = datetime(year=2018, month=8, day=29) + +PARAMETERS_FILE = '/esarchive/recon/ecmwf/gfas/original_files/ga_mc_sfc_gfas_ecmf/GFAS_Parameters.csv' +# ============================================================== + + +def create_bounds(coords, number_vertices=2): + """ + Calculate the vertices coordinates. + + :param coords: Coordinates in degrees (latitude or longitude) + :type coords: numpy.array + + :param number_vertices: Non mandatory parameter that informs the number of vertices that must have the boundaries. + (by default 2) + :type number_vertices: int + + :return: Array with as many elements as vertices for each value of coords. + :rtype: numpy.array + """ + import numpy as np + + interval = coords[1] - coords[0] + + coords_left = coords - interval / 2 + coords_right = coords + interval / 2 + if number_vertices == 2: + bound_coords = np.dstack((coords_left, coords_right)) + elif number_vertices == 4: + bound_coords = np.dstack((coords_left, coords_right, coords_right, coords_left)) + else: + raise ValueError('The number of vertices of the boudaries must be 2 or 4') + + return bound_coords + + +def get_grid_area(filename): + """ + Calculate the area for each cell of the grid using CDO + + :param filename: Path to the file to calculate the cell area + :type filename: str + + :return: Area of each cell of the grid. + :rtype: numpy.array + """ + from cdo import Cdo + from netCDF4 import Dataset + + cdo = Cdo() + s = cdo.gridarea(input=filename) + nc_aux = Dataset(s, mode='r') + grid_area = nc_aux.variables['cell_area'][:] + nc_aux.close() + + return grid_area + + +def write_netcdf(output_name_path, data_list, center_lats, center_lons, grid_cell_area, date): + """ + Write a NetCDF with the given information. + + :param output_name_path: Complete path to the output NetCDF to be stored. + :type output_name_path: str + + :param data_list + + :param center_lats: Latitudes of the center of each cell. + :type center_lats: numpy.array + + :param center_lons: Longitudes of the center of each cell. + :type center_lons: numpy.array + + :param grid_cell_area: Area of each cell of the grid. + :type grid_cell_area: numpy.array + + :param date: Date of the current netCDF. + :type date: datetime.datetime + + """ + print output_name_path + # Creating NetCDF & Dimensions + nc_output = Dataset(output_name_path, mode='w', format="NETCDF4") + nc_output.createDimension('nv', 2) + nc_output.createDimension('lon', center_lons.shape[0]) + nc_output.createDimension('lat', center_lats.shape[0]) + nc_output.createDimension('time', None) + + # TIME + time = nc_output.createVariable('time', 'd', ('time',), zlib=True) + # time.units = "{0} since {1}".format(tstep_units, global_atts['Start_DateTime'].strftime('%Y-%m-%d %H:%M:%S')) + time.units = "hours since {0}".format(date.strftime('%Y-%m-%d %H:%M:%S')) + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = [0] + + # LATITUDE + lat = nc_output.createVariable('lat', 'f', ('lat',), zlib=True) + lat.bounds = "lat_bnds" + lat.units = "degrees_north" + lat.axis = "Y" + lat.long_name = "latitude" + lat.standard_name = "latitude" + lat[:] = center_lats + + lat_bnds = nc_output.createVariable('lat_bnds', 'f', ('lat', 'nv',), zlib=True) + lat_bnds[:] = create_bounds(center_lats) + + # LONGITUDE + lon = nc_output.createVariable('lon', 'f', ('lon',), zlib=True) + lon.bounds = "lon_bnds" + lon.units = "degrees_east" + lon.axis = "X" + lon.long_name = "longitude" + lon.standard_name = "longitude" + lon[:] = center_lons + + lon_bnds = nc_output.createVariable('lon_bnds', 'f', ('lon', 'nv',), zlib=True) + lon_bnds[:] = create_bounds(center_lons) + + for var in data_list: + # VARIABLE + nc_var = nc_output.createVariable(var['name'], 'f', ('time', 'lat', 'lon',), zlib=True) + nc_var.units = var['units'].symbol + nc_var.long_name = var['long_name'] + nc_var.coordinates = 'lat lon' + nc_var.grid_mapping = 'crs' + nc_var.cell_measures = 'area: cell_area' + nc_var[:] = var['data'] + + # CELL AREA + cell_area = nc_output.createVariable('cell_area', 'f', ('lat', 'lon',)) + cell_area.long_name = "area of the grid cell" + cell_area.standard_name = "area" + cell_area.units = "m2" + cell_area[:] = grid_cell_area + + # CRS + crs = nc_output.createVariable('crs', 'i') + crs.grid_mapping_name = "latitude_longitude" + crs.semi_major_axis = 6371000.0 + crs.inverse_flattening = 0 + + nc_output.setncattr('title', 'GFASv1.2 inventory') + nc_output.setncattr('Conventions', 'CF-1.6', ) + nc_output.setncattr('institution', 'ECMWF', ) + nc_output.setncattr('source', 'GFAS', ) + nc_output.setncattr('history', 'Re-writing of the GFAS input to follow the CF-1.6 conventions;\n' + + '2017-04-05: Added boundaries;\n' + + '2017-04-05: Added global attributes;\n' + + '2017-04-05: Re-naming pollutant;\n' + + '2017-04-05: Added cell_area variable;\n' + + '2017-04-19: Added new varaibles;\n') + nc_output.setncattr('references', '', ) + nc_output.setncattr('comment', 'Re-writing done by Carles Tena (carles.tena@bsc.es) from the BSC-CNS ' + + '(Barcelona Supercomputing Center)', ) + + nc_output.close() + return True + + +def do_transformation(input_file, date, output_dir, variables_list): + """ + Transform the original file into a NEtCDF file that follows the conventions. + + :param input_file: + :type input_file: str + + :param date: Date of the file to do the transformation. + :type date: datetime.datetime + + :param output_dir: Path where have to be stored the output file. + :type output_dir: str + + :param variables_list: LIst of dictionaries with the information of each variable of the output files. + :type variables_list: list + """ + from cdo import Cdo + cdo = Cdo() + + nc_temp = cdo.copy(input=input_file, options='-R -r -f nc4c -z zip_4') + + nc_in = Dataset(nc_temp, mode='r') + + cell_area = get_grid_area(nc_temp) + + lats = nc_in.variables['lat'][:] + lons = nc_in.variables['lon'][:] + + for variable in variables_list: + variable['data'] = nc_in.variables[variable['original_name']][:] + + nc_in.close() + + out_path_aux = os.path.join(output_dir, 'daily_mean', 'multivar') + if not os.path.exists(out_path_aux): + os.makedirs(out_path_aux) + out_path_aux = os.path.join(out_path_aux, 'ga_{0}.nc'.format(date.strftime('%Y%m%d'))) + write_netcdf(out_path_aux, variables_list, lats, lons, cell_area, date) + + return True + + +def do_var_list(variables_file): + """ + Create the List of dictionaries + + :param variables_file: CSV file with the information of each variable + :type variables_file: str + + :return: Dictionaries list with the information of each variable. + :rtype: list + """ + df = pd.read_csv(variables_file, sep=';') + list_aux = [] + for i, element in df.iterrows(): + # print element + dict_aux = { + 'original_name': 'var' + str(element.id), + 'name': element['Short_Name'], + 'long_name': element['Name'], + 'units': cf_units.Unit(element['Units']), + } + list_aux.append(dict_aux) + return list_aux + + +if __name__ == '__main__': + + var_list = do_var_list(PARAMETERS_FILE) + + date_aux = STARTING_DATE + while date_aux <= ENDIND_DATE: + f = os.path.join(INPUT_PATH, INPUT_NAME.replace('', date_aux.strftime('%Y%m%d'))) + if os.path.isfile(f): + do_transformation(f, date_aux, OUTPUT_PATH, var_list) + else: + print 'ERROR: file {0} not found'.format(f) + + date_aux = date_aux + timedelta(days=1) diff --git a/preproc/htapv2_preproc.py b/preproc/htapv2_preproc.py new file mode 100755 index 0000000000000000000000000000000000000000..8766dde1cd05e094329f6fe1d6f8855c2c047c39 --- /dev/null +++ b/preproc/htapv2_preproc.py @@ -0,0 +1,578 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os + + +# ============== CONFIGURATION PARAMETERS ====================== +INPUT_PATH = '/esarchive/recon/jrc/htapv2/original_files' +OUTPUT_PATH = '/esarchive/recon/jrc/htapv2' + +INPUT_NAME = 'edgar_HTAP__emi___.0.1x0.1.nc' +INPUT_NAME_AIR = 'edgar_HTAP_emi___.0.1x0.1.nc' +INPUT_NAME_SHIPS = 'edgar_HTAP__emi_SHIPS_.0.1x0.1.nc' +# HTAP auxiliary NMVOC emission data for the industry sub-sectors +# (http://iek8wikis.iek.fz-juelich.de/HTAPWiki/WP1.1?highlight=%28%28WP1.1%29%29) +INPUT_NAME_NMVOC_INDUSTRY = 'HTAPv2_NMVOC___.0.1x0.1.nc' + +# list_years = [2008, 2010] +LIST_YEARS = [2010] + +# RETRO ratios applied to HTAPv2 NMVOC emissions +# (http://iek8wikis.iek.fz-juelich.de/HTAPWiki/WP1.1?highlight=%28%28WP1.1%29%29) +VOC_RATIO_PATH = '/esarchive/recon/jrc/htapv2/original_files/retro_nmvoc_ratio_2000_01x01' +VOC_RATIO_NAME = 'retro_nmvoc_ratio__2000_0.1deg.nc' +VOC_RATIO_AIR_NAME = 'VOC_split_AIR.csv' +VOC_RATIO_SHIPS_NAME = 'VOC_split_SHIP.csv' +# ============================================================== + + +def do_transformation_annual(filename, out_path, pollutant, sector, year): + """ + Re-write the HTAPv2 inputs following ES anc CF-1.6 conventions for annual inventories. + + :param filename: Path to the input file. + :type filename: str + + :param out_path: Path to store the output. + :type out_path: str + + :param pollutant: Pollutant name. + :type pollutant: str + + :param sector: Name of the sector. + :type sector: str + + :param year: Year. + :type year: int + + :return: + """ + from hermesv3_gr.tools.netcdf_tools import extract_vars, write_netcdf, get_grid_area + from hermesv3_gr.tools.coordinates_tools import create_bounds + print filename + [c_lats, c_lons] = extract_vars(filename, ['lat', 'lon']) + + if pollutant == 'pm25': + [data] = extract_vars(filename, ['emi_pm2.5'], + attributes_list=['standard_name', 'units', 'cell_method', 'long_name']) + else: + [data] = extract_vars(filename, ['emi_{0}'.format(pollutant)], + attributes_list=['standard_name', 'units', 'cell_method', 'long_name']) + data['data'] = data['data'].reshape((1,) + data['data'].shape) + data['name'] = pollutant + + global_attributes = { + 'title': 'HTAPv2 inventory for the sector {0} and pollutant {1}'.format(sector, data['long_name']), + 'Conventions': 'CF-1.6', + 'institution': 'European Commission, Joint Research Centre (JRC)', + 'source': 'HTAPv2', + 'history': 'Re-writing of the HTAPv2 input to follow the CF 1.6 conventions;\n' + + '2017-04-04: Added time dimension (UNLIMITED);\n' + + '2017-04-04: Added boundaries;\n' + + '2017-04-04: Added global attributes;\n' + + '2017-04-04: Re-naming pollutant;\n' + + '2017-04-04: Added cell_area variable;\n', + 'references': 'EC, JRC / US EPA, HTAP_V2. ' + + 'http://edgar.jrc.ec.europa.eu/htap/EDGAR-HTAP_v1_final_jan2012.pdf\n ' + + 'http://edgar.jrc.ec.europa.eu/htap_v2/', + 'comment': 'Re-writing done by Carles Tena (carles.tena@bsc.es) from the BSC-CNS ' + + '(Barcelona Supercomputing Center)', + } + + out_path = os.path.join(out_path, pollutant + '_' + sector.lower()) + if not os.path.exists(out_path): + os.makedirs(out_path) + + out_path = os.path.join(out_path, '{0}_{1}.nc'.format(pollutant, year)) + print out_path + write_netcdf(out_path, c_lats['data'], c_lons['data'], [data], + boundary_latitudes=create_bounds(c_lats['data']), boundary_longitudes=create_bounds(c_lons['data']), + cell_area=get_grid_area(filename), global_attributes=global_attributes,) + return True + + +def do_transformation(filename_list, out_path, pollutant, sector, year): + """ + Re-write the HTAPv2 inputs following ES anc CF-1.6 conventions. + + :param filename_list: List of input file names. + :type filename_list: list + + :param out_path: Path to store the output. + :type out_path: str + + :param pollutant: Pollutant name. + :type pollutant: str + + :param sector: Name of the sector. + :type sector: str + + :param year: Year. + :type year: int + + :return: + """ + from hermesv3_gr.tools.netcdf_tools import extract_vars, write_netcdf, get_grid_area + from hermesv3_gr.tools.coordinates_tools import create_bounds + + for month in xrange(1, 13): + print filename_list[month - 1] + [c_lats, c_lons] = extract_vars(filename_list[month - 1], ['lat', 'lon']) + + if pollutant == 'pm25': + [data] = extract_vars(filename_list[month - 1], ['emi_pm2.5'], + attributes_list=['standard_name', 'units', 'cell_method', 'long_name']) + else: + [data] = extract_vars(filename_list[month - 1], ['emi_{0}'.format(pollutant)], + attributes_list=['standard_name', 'units', 'cell_method', 'long_name']) + data['data'] = data['data'].reshape((1,) + data['data'].shape) + data['name'] = pollutant + + global_attributes = { + 'title': 'HTAPv2 inventory for the sector {0} and pollutant {1}'.format(sector, data['long_name']), + 'Conventions': 'CF-1.6', + 'institution': 'European Commission, Joint Research Centre (JRC)', + 'source': 'HTAPv2', + 'history': 'Re-writing of the HTAPv2 input to follow the CF 1.6 conventions;\n' + + '2017-04-04: Added time dimension (UNLIMITED);\n' + + '2017-04-04: Added boundaries;\n' + + '2017-04-04: Added global attributes;\n' + + '2017-04-04: Re-naming pollutant;\n' + + '2017-04-04: Added cell_area variable;\n', + 'references': 'publication: Janssens-Maenhout, G., et al.: HTAP_v2.2: a mosaic of regional and global ' + + 'emission grid maps for 2008 and 2010 to study hemispheric transport of air pollution, ' + + 'Atmos. Chem. Phys., 15, 11411-11432, https://doi.org/10.5194/acp-15-11411-2015, 2015.\n ' + + 'web: http://edgar.jrc.ec.europa.eu/htap_v2/index.php', + 'comment': 'Re-writing done by Carles Tena (carles.tena@bsc.es) from the BSC-CNS ' + + '(Barcelona Supercomputing Center)', + } + + out_path_aux = os.path.join(out_path, pollutant + '_' + sector.lower()) + if not os.path.exists(out_path_aux): + os.makedirs(out_path_aux) + + out_path_aux = os.path.join(out_path_aux, '{0}_{1}{2}.nc'.format(pollutant, year, str(month).zfill(2))) + write_netcdf(out_path_aux, c_lats['data'], c_lons['data'], [data], + boundary_latitudes=create_bounds(c_lats['data']), boundary_longitudes=create_bounds(c_lons['data']), + cell_area=get_grid_area(filename_list[month - 1]), global_attributes=global_attributes,) + return True + + +def do_ratio_list(sector=None): + # TODO Documentation + """ + + :param sector: + :return: + """ + if sector == 'SHIPS': + return {'all': os.path.join(VOC_RATIO_PATH, VOC_RATIO_SHIPS_NAME)} + elif sector == 'AIR_CDS': + return {'all': os.path.join(VOC_RATIO_PATH, VOC_RATIO_AIR_NAME)} + elif sector == 'AIR_CRS': + return {'all': os.path.join(VOC_RATIO_PATH, VOC_RATIO_AIR_NAME)} + elif sector == 'AIR_LTO': + return {'all': os.path.join(VOC_RATIO_PATH, VOC_RATIO_AIR_NAME)} + return { + 'voc01': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '01')), + 'voc02': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '02')), + 'voc03': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '03')), + 'voc04': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '04')), + 'voc05': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '05')), + 'voc06': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '06')), + 'voc07': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '07')), + 'voc08': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '08')), + 'voc09': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '09')), + 'voc12': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '12')), + 'voc13': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '13')), + 'voc14': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '14')), + 'voc15': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '15')), + 'voc16': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '16')), + 'voc17': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '17')), + 'voc18': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '18')), + 'voc19': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '19')), + 'voc20': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '20')), + 'voc21': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '21')), + 'voc22': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '22')), + 'voc23': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '23')), + 'voc24': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '24')), + 'voc25': os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', '25')), + } + + +def do_nmvoc_month_transformation(filename_list, out_path, sector, year): + # TODO Docuemtnation + """ + + :param filename_list: + :param out_path: + :param sector: + :param year: + :return: + """ + from hermesv3_gr.tools.netcdf_tools import extract_vars, write_netcdf + from hermesv3_gr.tools.coordinates_tools import create_bounds + + nmvoc_ratio_list = do_ratio_list() + + print sector + if sector == 'ENERGY': + ratio_var = 'pow' + + nmvoc_ratio_list.pop('voc18', None) + nmvoc_ratio_list.pop('voc19', None) + nmvoc_ratio_list.pop('voc20', None) + + elif sector == 'RESIDENTIAL': + ratio_var = 'res' + + nmvoc_ratio_list.pop('voc18', None) + nmvoc_ratio_list.pop('voc20', None) + + elif sector == 'TRANSPORT': + ratio_var = 'tra' + + nmvoc_ratio_list.pop('voc01', None) + nmvoc_ratio_list.pop('voc18', None) + nmvoc_ratio_list.pop('voc19', None) + nmvoc_ratio_list.pop('voc20', None) + nmvoc_ratio_list.pop('voc24', None) + nmvoc_ratio_list.pop('voc25', None) + + print type(nmvoc_ratio_list), nmvoc_ratio_list + + for month in xrange(1, 13): + print filename_list[month - 1] + c_lats, c_lons = extract_vars(filename_list[month - 1], ['lat', 'lon']) + + [data] = extract_vars(filename_list[month - 1], ['emi_nmvoc']) + + for voc, ratio_file in nmvoc_ratio_list.iteritems(): + print voc, ratio_file + + pollutant = voc + [ratio] = extract_vars(ratio_file, [ratio_var]) + + data_aux = data.copy() + data_aux['data'] = data['data'] * ratio['data'] + data_aux['data'] = data_aux['data'].reshape((1,) + data_aux['data'].shape) + data_aux['name'] = voc + data_aux['units'] = 'kg m-2 s-1' + global_attributes = { + 'title': 'HTAPv2 inventory for the sector {0} and pollutant {1}'.format(sector, pollutant), + 'Conventions': 'CF-1.6', + 'institution': 'European Commission, Joint Research Centre (JRC)', + 'source': 'HTAPv2', + 'history': 'Re-writing of the HTAPv2 input to follow the CF 1.6 conventions;\n' + + '2017-04-28: ...', + 'references': 'publication: Janssens-Maenhout, G., et al.: HTAP_v2.2: a mosaic of regional and ' + + 'global emission grid maps for 2008 and 2010 to study hemispheric transport of air ' + + 'pollution, Atmos. Chem. Phys., 15, 11411-11432, ' + + 'https://doi.org/10.5194/acp-15-11411-2015, 2015.\n ' + + 'web: http://edgar.jrc.ec.europa.eu/htap_v2/index.php', + 'comment': 'Re-writing done by Carles Tena (carles.tena@bsc.es) from the BSC-CNS ' + + '(Barcelona Supercomputing Center)', + } + + out_path_aux = os.path.join(out_path, pollutant + '_' + sector.lower()) + if not os.path.exists(out_path_aux): + os.makedirs(out_path_aux) + + out_path_aux = os.path.join(out_path_aux, '{0}_{1}{2}.nc'.format(pollutant, year, str(month).zfill(2))) + print out_path_aux + write_netcdf(out_path_aux, c_lats['data'], c_lons['data'], [data_aux], + boundary_latitudes=create_bounds(c_lats['data']), + boundary_longitudes=create_bounds(c_lons['data']), global_attributes=global_attributes,) + return True + + +def do_nmvoc_industry_month_transformation(filename_list, out_path, sector, year): + # TODO Documentation + """ + + :param filename_list: + :param out_path: + :param sector: + :param year: + :return: + """ + from hermesv3_gr.tools.netcdf_tools import extract_vars, write_netcdf + from hermesv3_gr.tools.coordinates_tools import create_bounds + + nmvoc_ratio_list = do_ratio_list() + + print sector + + print type(nmvoc_ratio_list), nmvoc_ratio_list + + for month in xrange(1, 13): + print filename_list[month - 1] + c_lats, c_lons = extract_vars(filename_list[month - 1], ['lat', 'lon']) + + [ind, exf, sol] = extract_vars(filename_list[month - 1], ['emiss_ind', 'emiss_exf', 'emiss_sol']) + + for voc, ratio_file in nmvoc_ratio_list.iteritems(): + print voc, ratio_file + data = { + 'name': voc, + 'units': 'kg m-2 s-1', + } + if voc in ['voc02', 'voc03', 'voc04', 'voc05', 'voc07', 'voc08', 'voc12', 'voc13']: + [r_inc, r_exf] = extract_vars(ratio_file, ['inc', 'exf']) + data.update({'data': ind['data'] * r_inc['data'] + exf['data'] * r_exf['data']}) + elif voc in ['voc01', 'voc23', 'voc25']: + [r_inc, r_sol] = extract_vars(ratio_file, ['inc', 'sol']) + data.update({'data': ind['data'] * r_inc['data'] + sol['data'] * r_sol['data']}) + elif voc in ['voc09', 'voc16', 'voc21', 'voc22', 'voc24']: + [r_inc] = extract_vars(ratio_file, ['inc']) + data.update({'data': ind['data'] * r_inc['data']}) + # elif voc in []: + # [r_exf, r_sol] = extract_vars(ratio_file, ['exf', 'sol']) + # data.update({'data': exf['data']*r_exf['data'] + sol['data']*r_sol['data']}) + elif voc in ['voc18', 'voc19', 'voc20']: + [r_sol] = extract_vars(ratio_file, ['sol']) + data.update({'data': sol['data'] * r_sol['data']}) + else: + [r_inc, r_exf, r_sol] = extract_vars(ratio_file, ['inc', 'exf', 'sol']) + data.update({'data': ind['data'] * r_inc['data'] + exf['data'] * r_exf['data'] + + sol['data'] * r_sol['data']}) + + global_attributes = { + 'title': 'HTAPv2 inventory for the sector {0} and pollutant {1}'.format(sector, voc), + 'Conventions': 'CF-1.6', + 'institution': 'European Commission, Joint Research Centre (JRC)', + 'source': 'HTAPv2', + 'history': 'Re-writing of the HTAPv2 input to follow the CF 1.6 conventions;\n' + + '2017-04-28: ...', + 'references': 'publication: Janssens-Maenhout, G., et al.: HTAP_v2.2: a mosaic of regional and ' + + 'global emission grid maps for 2008 and 2010 to study hemispheric transport of air ' + + 'pollution, Atmos. Chem. Phys., 15, 11411-11432, ' + + 'https://doi.org/10.5194/acp-15-11411-2015, 2015.\n ' + + 'web: http://edgar.jrc.ec.europa.eu/htap_v2/index.php', + 'comment': 'Re-writing done by Carles Tena (carles.tena@bsc.es) from the BSC-CNS ' + + '(Barcelona Supercomputing Center)', + } + + out_path_aux = os.path.join(out_path, voc + '_industry') + if not os.path.exists(out_path_aux): + os.makedirs(out_path_aux) + + out_path_aux = os.path.join(out_path_aux, '{0}_{1}{2}.nc'.format(voc, year, str(month).zfill(2))) + print out_path_aux + write_netcdf(out_path_aux, c_lats['data'], c_lons['data'], [data], + boundary_latitudes=create_bounds(c_lats['data']), + boundary_longitudes=create_bounds(c_lons['data']), global_attributes=global_attributes,) + + +def do_nmvoc_year_transformation(filename, out_path, sector, year): + # TODO Documentation + """ + + :param filename: + :param out_path: + :param sector: + :param year: + :return: + """ + import pandas as pd + from hermesv3_gr.tools.netcdf_tools import extract_vars, write_netcdf + from hermesv3_gr.tools.coordinates_tools import create_bounds + + nmvoc_ratio_file = do_ratio_list(sector)['all'] + nmvoc_ratio_list = pd.read_csv(nmvoc_ratio_file, sep=';') + + c_lats, c_lons = extract_vars(filename, ['lat', 'lon']) + + [data] = extract_vars(filename, ['emi_nmvoc']) + + for i, voc_ratio in nmvoc_ratio_list.iterrows(): + pollutant = voc_ratio['voc_group'] + ratio = voc_ratio['factor'] + + data_aux = data.copy() + data_aux['data'] = data['data'] * ratio + data_aux['data'] = data_aux['data'].reshape((1,) + data_aux['data'].shape) + data_aux['name'] = pollutant + data_aux['units'] = 'kg m-2 s-1' + global_attributes = { + 'title': 'HTAPv2 inventory for the sector {0} and pollutant {1}'.format(sector, pollutant), + 'Conventions': 'CF-1.6', + 'institution': 'European Commission, Joint Research Centre (JRC)', + 'source': 'HTAPv2', + 'history': 'Re-writing of the HTAPv2 input to follow the CF 1.6 conventions;\n' + + '2017-04-28: ...', + 'references': 'publication: Janssens-Maenhout, G., et al.: HTAP_v2.2: a mosaic of regional and global ' + + 'emission grid maps for 2008 and 2010 to study hemispheric transport of air pollution, ' + + 'Atmos. Chem. Phys., 15, 11411-11432, https://doi.org/10.5194/acp-15-11411-2015, 2015.\n ' + + 'web: http://edgar.jrc.ec.europa.eu/htap_v2/index.php', + 'comment': 'Re-writing done by Carles Tena (carles.tena@bsc.es) from the BSC-CNS ' + + '(Barcelona Supercomputing Center)\n ' + + 'HTAP contact: greet.maenhout@jrc.ec.europa.eu', + } + + out_path_aux = os.path.join(out_path, pollutant + '_' + sector.lower()) + if not os.path.exists(out_path_aux): + os.makedirs(out_path_aux) + + out_path_aux = os.path.join(out_path_aux, '{0}_{1}.nc'.format(pollutant, year)) + print out_path_aux + write_netcdf(out_path_aux, c_lats['data'], c_lons['data'], [data_aux], + boundary_latitudes=create_bounds(c_lats['data']), + boundary_longitudes=create_bounds(c_lons['data']), + global_attributes=global_attributes,) + + +def get_pollutant_dict(): + # TODO Documentation + """ + + :return: + """ + p_dict = { + 'bc': 'BC', + 'co': 'CO', + 'nh3': 'NH3', + 'nox_no2': 'NOx', + 'oc': 'OC', + 'pm10': 'PM10', + 'pm25': 'PM2.5', + 'so2': 'SO2', + 'nmvoc': 'NMVOC' + } + return p_dict + + +def get_sector_dict(): + # TODO Documentation + """ + + :return: + """ + common_dict = { + 'month': ['ENERGY', 'INDUSTRY', 'RESIDENTIAL', 'TRANSPORT'], + 'year': ['SHIPS', 'AIR_CDS', 'AIR_CRS', 'AIR_LTO'] + } + sector_dict = { + 'bc': common_dict, + 'co': common_dict, + 'nh3': {'month': ['AGRICULTURE', 'ENERGY', 'INDUSTRY', 'RESIDENTIAL', 'TRANSPORT'], + 'year': []}, + 'nox_no2': common_dict, + 'oc': common_dict, + 'pm10': common_dict, + 'pm25': common_dict, + 'so2': common_dict, + 'nmvoc': common_dict, + } + return sector_dict + + +def get_nmvoc_sector_dict(): + # TODO Documentation + """ + + :return: + """ + nmvoc_sectors = {'month': ['ENERGY', 'INDUSTRY_3subsectors', 'RESIDENTIAL', 'TRANSPORT'], + 'year': ['SHIPS', 'AIR_CDS', 'AIR_CRS', 'AIR_LTO']} + return nmvoc_sectors + + +def check_vocs(year): + # TODO Documentation + """ + + :param year: + :return: + """ + from hermesv3_gr.tools.netcdf_tools import extract_vars + for month in xrange(1, 12 + 1, 1): + for snap in ['ENERGY', 'INDUSTRY', 'RESIDENTIAL', 'TRANSPORT']: + nmvoc_path = os.path.join(OUTPUT_PATH, 'monthly_mean', 'nmvoc_{0}'.format(snap.lower()), + 'nmvoc_{0}{1}.nc'.format(year, str(month).zfill(2))) + [new_voc] = extract_vars(nmvoc_path, ['nmvoc']) + nmvoc_sum = new_voc['data'].sum() + + voc_sum = 0 + for voc in ['voc{0}'.format(str(x).zfill(2)) for x in xrange(1, 25 + 1, 1)]: + voc_path = os.path.join(OUTPUT_PATH, 'monthly_mean', '{0}_{1}'.format(voc, snap.lower()), + '{0}_{1}{2}.nc'.format(voc, year, str(month).zfill(2))) + if os.path.exists(voc_path): + [new_voc] = extract_vars(voc_path, [voc]) + voc_sum += new_voc['data'].sum() + + print '{0} month: {4}; NMVOC sum: {1}; VOCs sum: {2}; %diff: {3}'.format( + snap, nmvoc_sum, voc_sum, 100 * (nmvoc_sum - voc_sum) / nmvoc_sum, month) + + +if __name__ == '__main__': + for y in LIST_YEARS: + for pollutant_dict in get_pollutant_dict().iteritems(): + for current_sector in get_sector_dict()[pollutant_dict[0]]['month']: + input_name_aux = INPUT_NAME.replace('', current_sector) + input_name_aux = input_name_aux.replace('', str(y)) + input_name_aux = input_name_aux.replace('', pollutant_dict[1]) + file_list = [os.path.join(INPUT_PATH, input_name_aux.replace('', str(aux_month))) + for aux_month in xrange(1, 13)] + + do_transformation(file_list, os.path.join(OUTPUT_PATH, 'monthly_mean'), pollutant_dict[0], + current_sector, y) + # annual inventories + for current_sector in get_sector_dict()[pollutant_dict[0]]['year']: + if current_sector[0:3] == 'AIR': + input_name_aux = INPUT_NAME_AIR + else: + input_name_aux = INPUT_NAME_SHIPS + input_name_aux = input_name_aux.replace('', current_sector) + input_name_aux = input_name_aux.replace('', str(y)) + input_name_aux = input_name_aux.replace('', pollutant_dict[1]) + input_name_aux = os.path.join(INPUT_PATH, input_name_aux) + + do_transformation_annual(input_name_aux, os.path.join(OUTPUT_PATH, 'yearly_mean', ), pollutant_dict[0], + current_sector, y) + + for current_sector in get_nmvoc_sector_dict()['month']: + if current_sector == 'INDUSTRY_3subsectors': + input_name_aux = INPUT_NAME_NMVOC_INDUSTRY + else: + input_name_aux = INPUT_NAME + input_name_aux = input_name_aux.replace('', 'NMVOC') + input_name_aux = input_name_aux.replace('', current_sector) + input_name_aux = input_name_aux.replace('', str(y)) + file_list = [os.path.join(INPUT_PATH, input_name_aux.replace('', str(aux_month))) + for aux_month in xrange(1, 13)] + + if current_sector == 'INDUSTRY_3subsectors': + do_nmvoc_industry_month_transformation(file_list, os.path.join(OUTPUT_PATH, 'monthly_mean'), + current_sector, y) + else: + do_nmvoc_month_transformation(file_list, os.path.join(OUTPUT_PATH, 'monthly_mean'), current_sector, y) + for current_sector in get_nmvoc_sector_dict()['year']: + if current_sector[0:3] == 'AIR': + input_name_aux = INPUT_NAME_AIR + else: + input_name_aux = INPUT_NAME_SHIPS + input_name_aux = input_name_aux.replace('', 'NMVOC') + input_name_aux = input_name_aux.replace('', current_sector) + input_name_aux = input_name_aux.replace('', str(y)) + input_name_aux = os.path.join(INPUT_PATH, input_name_aux) + print input_name_aux + do_nmvoc_year_transformation(input_name_aux, os.path.join(OUTPUT_PATH, 'yearly_mean'), current_sector, y) diff --git a/preproc/tno_mac_iii_preproc.py b/preproc/tno_mac_iii_preproc.py new file mode 100755 index 0000000000000000000000000000000000000000..73c62820ade5f9a656f9c617a9c74f5de0f275cc --- /dev/null +++ b/preproc/tno_mac_iii_preproc.py @@ -0,0 +1,368 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os + + +# ============== CONFIGURATION PARAMETERS ====================== +INPUT_PATH = '/esarchive/recon/tno/tno_macc_iii/original_files/ascii' +OUTPUT_PATH = '/esarchive/recon/tno/tno_macc_iii/yearly_mean' +INPUT_NAME = 'TNO_MACC_III_emissions_v1_1_.txt' +# list_years = [2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011] +LIST_YEARS = [2011] +VOC_RATIO_PATH = '/esarchive/recon/tno/tno_macc_iii/original_files/nmvoc' +VOC_RATIO_NAME = 'ratio_.nc' +# ============================================================== + + +def get_pollutants(in_path): + """ + Find the pollutants on the ASCII emissions table. + + :param in_path: Path to the ASCII file that contains the information of the TNO_MAC-III emissions. + :type in_path: str + + :return: List of the acronyms of the pollutants. + :rtype: list + """ + import pandas as pd + + columns = list(pd.read_table(in_path, sep=';', nrows=1).columns) + return columns[6:] + + +def calculate_grid_definition(in_path): + """ + Calculate the latitude and longitude coordinates of the cell. + + :param in_path: Path to the file that contains all the information. + :type in_path: str + + :return: Latitudes array, Longitudes array, Latitude interval, Longitude interval. + :rtype: numpy.array, numpy.array, float, float + """ + import pandas as pd + import numpy as np + + dataframe = pd.read_table(in_path, sep=';') + dataframe = dataframe[dataframe.SourceType != 'P'] + + # Longitudes + lons = np.sort(np.unique(dataframe.Lon)) + lons_interval = lons[1:] - lons[:-1] + print 'Lon min: {0}; Lon max: {1}; Lon inc: {2}; Lon num: {3}'.format( + dataframe.Lon.min(), dataframe.Lon.max(), lons_interval.min(), len(lons)) + + # Latitudes + lats = np.sort(np.unique(dataframe.Lat)) + lats_interval = lats[1:] - lats[:-1] + print 'Lat min: {0}; Lat max: {1}; Lat inc: {2}; Lat num: {3}'.format( + dataframe.Lat.min(), dataframe.Lat.max(), lats_interval.min(), len(lats)) + + lats = np.arange(-90 + lats_interval.min() / 2, 90, lats_interval.min(), dtype=np.float64) + lons = np.arange(-180 + lons_interval.min() / 2, 180, lons_interval.min(), dtype=np.float64) + + return lats, lons, lats_interval.min(), lons_interval.min() + + +def create_pollutant_empty_list(in_path, len_c_lats, len_c_lons): + """ + Crate an empty pollutant list. + + :param in_path: Path to the file that conains the information. + :type in_path: str + + :param len_c_lats: Number of elements on the latitude array + :type len_c_lats: int + + :param len_c_lons: Number of elements on the longitude array + :type len_c_lons: int + + :return: Pollutant list + :rtype: list + """ + import numpy as np + + pollutant_list = [] + for pollutant in get_pollutants(in_path): + aux_dict = {} + if pollutant == 'PM2_5': + aux_dict['name'] = 'pm25' + elif pollutant == 'NOX': + aux_dict['name'] = 'nox_no2' + else: + aux_dict['name'] = pollutant.lower() + aux_dict['TNO_name'] = pollutant + aux_dict['units'] = 'kg.m-2.s-1' + # aux_dict['units'] = 'Mg.km-2.year-1' + aux_dict['data'] = np.zeros((len_c_lats, len_c_lons)) + # aux_dict['data'] = np.zeros((len_c_lons, len_c_lats)) + pollutant_list.append(aux_dict) + return pollutant_list + + +def do_transformation(year): + """ + Make al the process to transform the emissions of the current year. + + :param year: year to process. + :type year: int + + :return: True when everything finish well. + :rtype: Bool + """ + from hermesv3_gr.tools.netcdf_tools import write_netcdf, get_grid_area + from hermesv3_gr.tools.coordinates_tools import create_bounds + from datetime import datetime + import pandas as pd + import numpy as np + + in_file = os.path.join(INPUT_PATH, INPUT_NAME.replace('', str(year))) + + unit_factor = 1000. / (365. * 24. * 3600.) # To pass from Mg/year to Kg/s + # unit_factor = 1000000 # To pass from Mg/m2.year to Mg/Km2.year + + c_lats, c_lons, lat_interval, lon_interval = calculate_grid_definition(in_file) + + b_lats = create_bounds(c_lats, number_vertices=2) + b_lons = create_bounds(c_lons, number_vertices=2) + + dataframe = pd.read_table(in_file, sep=';') + + df_np = dataframe[dataframe.SourceType != 'P'] + df_p = dataframe[dataframe.SourceType == 'P'] + + df_np.loc[:, 'row_lat'] = np.array((df_np.Lat - (-90 + lat_interval / 2)) / lat_interval, dtype=np.int32) + df_np.loc[:, 'col_lon'] = np.array((df_np.Lon - (-180 + lon_interval / 2)) / lon_interval, dtype=np.int32) + + df_p.loc[:, 'row_lat'] = abs(np.array([c_lats] * len(df_p.Lat)) - df_p.Lat.values[:, None]).argmin(axis=1) + df_p.loc[:, 'col_lon'] = abs(np.array([c_lons] * len(df_p.Lon)) - df_p.Lon.values[:, None]).argmin(axis=1) + + dataframe = pd.concat([df_np, df_p]) + + for name, group in dataframe.groupby('SNAP'): + print 'snap', name + pollutant_list = create_pollutant_empty_list(in_file, len(c_lats), len(c_lons)) + + # Other mobile sources ignoring sea cells (shipping emissions) + if name == 8: + for sea in ['ATL', 'BAS', 'BLS', 'MED', 'NOS']: + group = group[group.ISO3 != sea] + + group = group.groupby(['row_lat', 'col_lon']).sum().reset_index() + + for i in xrange(len(pollutant_list)): + # pollutant_list[i]['data'][group.col_lon, group.row_lat] = group[pollutant_list[i]['TNO_name']] + pollutant_list[i]['data'][group.row_lat, group.col_lon] += group[pollutant_list[i]['TNO_name']] + pollutant_list[i]['data'] = pollutant_list[i]['data'].reshape((1,) + pollutant_list[i]['data'].shape) + # print pollutant_list[i]['data'].max() + + aux_output_path = os.path.join(OUTPUT_PATH, '{0}_snap{1}'.format(pollutant_list[i]['name'], name)) + if not os.path.exists(aux_output_path): + os.makedirs(aux_output_path) + aux_output_path = os.path.join(aux_output_path, '{0}_{1}.nc'.format(pollutant_list[i]['name'], year)) + write_netcdf(aux_output_path, c_lats, c_lons, [pollutant_list[i]], date=datetime(year, month=1, day=1), + boundary_latitudes=b_lats, boundary_longitudes=b_lons) + cell_area = get_grid_area(aux_output_path) + + pollutant_list[i]['data'] = pollutant_list[i]['data'] * unit_factor/cell_area + + write_netcdf(aux_output_path, c_lats, c_lons, [pollutant_list[i]], date=datetime(year, month=1, day=1), + boundary_latitudes=b_lats, boundary_longitudes=b_lons, cell_area=cell_area, + global_attributes={ + 'references': 'J. J. P. Kuenen, A. J. H. Visschedijk, M. Jozwicka, and H. A. C. ' + + 'Denier van der Gon TNO-MACC_II emission inventory; a multi-year ' + + '(2003-2009) consistent high-resolution European emission inventory ' + + 'for air quality modelling Atmospheric Chemistry and Physics 14 ' + + '10963-10976 2014', + 'comment': 'Re-writing done by Carles Tena (carles.tena@bsc.es) from the BSC-CNS ' + + '(Barcelona Supercomputing Center)'}) + return True + + +def extract_vars(netcdf_path, variables_list, attributes_list=()): + """ + Get the data from the list of variabbles. + + :param netcdf_path: Path to the netCDF file + :type netcdf_path: str + + :param variables_list: List of the names of the variables to get. + :type variables_list: list + + :param attributes_list: List of the names of the variable attributes to get. + :type attributes_list: list + + :return: List of the variables from the netCDF as a dictionary with data as values and with the other keys their + attributes. + :rtype: list. + """ + from netCDF4 import Dataset + data_list = [] + # print netcdf_path + netcdf = Dataset(netcdf_path, mode='r') + for var in variables_list: + if var == 'emi_nox_no2': + var1 = var + var2 = 'emi_nox' + else: + var1 = var2 = var + dict_aux = \ + { + 'name': var1, + 'data': netcdf.variables[var2][:], + } + for attribute in attributes_list: + dict_aux.update({attribute: netcdf.variables[var2].getncattr(attribute)}) + data_list.append(dict_aux) + netcdf.close() + + return data_list + + +def get_voc_ratio(ratio_path, snap): + """ + Get the ratio of the VOC for the current SNAP. + + :param ratio_path: Path to the file with the ratios. + :type ratio_path: str + + :param snap: SNAP to get the ratio. + :type snap: str + + :return: VOC Ratio + :rtype: dict + """ + if snap == 'snap34': + snap = 'snap3' + try: + [data_list] = extract_vars(ratio_path, [snap]) + except KeyError: + return None + return data_list + + +def get_voc_list(): + """ + Get the VOC list. + + :return: VOC list + :rtype: list + """ + return ['voc{0}'.format(str(x).zfill(2)) for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25]] + + +def get_sector_list(): + """ + Get the sector list. + + :return: Sector list + :rtype: list + """ + return ['snap{0}'.format(x) for x in [1, 2, 34, 5, 6, 71, 72, 73, 74, 8, 9]] + + +def do_voc_transformation(year): + """ + Make al the process to transform the VOC emissions of the current year. + + :param year: year to process. + :type year: int + + :return: True when everything finish well. + :rtype: Bool + """ + from warnings import warn as warning + from hermesv3_gr.tools.netcdf_tools import write_netcdf, extract_vars + from hermesv3_gr.tools.coordinates_tools import create_bounds + + for snap in get_sector_list(): + in_path = os.path.join(OUTPUT_PATH, 'nmvoc_{0}'.format(snap), 'nmvoc_{0}.nc'.format(year)) + [nmvoc, c_lats, c_lons, cell_area] = extract_vars(in_path, ['nmvoc', 'lat', 'lon', 'cell_area']) + for voc in get_voc_list(): + ratio_path = os.path.join(VOC_RATIO_PATH, VOC_RATIO_NAME.replace('', voc)) + ratios_dict = get_voc_ratio(ratio_path, snap) + if ratios_dict is not None: + new_voc = { + 'name': voc, + 'units': 'kg.m-2.s-2' + } + + b_lats = create_bounds(c_lats['data'], number_vertices=2) + b_lons = create_bounds(c_lons['data'], number_vertices=2) + mask = ratios_dict['data'] + + new_voc['data'] = nmvoc['data'] * mask + + out_dir_aux = os.path.join(OUTPUT_PATH, '{0}_{1}'.format(voc, snap)) + if not os.path.exists(out_dir_aux): + os.makedirs(out_dir_aux) + # print os.path.join(out_dir_aux, '{0}_{1}.nc'.format(voc, year)) + write_netcdf(os.path.join(out_dir_aux, '{0}_{1}.nc'.format(voc, year)), c_lats['data'], + c_lons['data'], [new_voc], boundary_latitudes=b_lats, boundary_longitudes=b_lons, + cell_area=cell_area['data'], + global_attributes={ + 'references': 'J. J. P. Kuenen, A. J. H. Visschedijk, M. Jozwicka, and H. A. C. ' + + 'Denier van der Gon TNO-MACC_II emission inventory; a multi-year ' + + '(2003-2009) consistent high-resolution European emission inventory ' + + 'for air quality modelling Atmospheric Chemistry and Physics 14 ' + + '10963-10976 2014', + 'comment': 'Re-writing done by Carles Tena (carles.tena@bsc.es) from the BSC-CNS ' + + '(Barcelona Supercomputing Center)'}) + else: + warning("The pollutant {0} for the sector {1} does not exist.\n SNAP not found: {2}".format( + voc, snap, ratio_path)) + + return True + + +def check_vocs(year): + """ + Check that the VOCs are calculated correctly. + + :param year: Year to evaluate + :type year: int + + :return: True when finish. + :rtype: bool + """ + for snap in get_sector_list(): + nmvoc_path = os.path.join(OUTPUT_PATH, 'nmvoc_{0}'.format(snap), 'nmvoc_{0}.nc'.format(year)) + [new_voc] = extract_vars(nmvoc_path, ['nmvoc']) + nmvoc_sum = new_voc['data'].sum() + + voc_sum = 0 + for voc in get_voc_list(): + voc_path = os.path.join(OUTPUT_PATH, '{0}_{1}'.format(voc, snap), '{0}_{1}.nc'.format(voc, year)) + if os.path.exists(voc_path): + [new_voc] = extract_vars(voc_path, [voc]) + voc_sum += new_voc['data'].sum() + + print '{0} NMVOC sum: {1}; VOCs sum: {2}; %diff: {3}'.format( + snap, nmvoc_sum, voc_sum, 100*(nmvoc_sum - voc_sum) / nmvoc_sum) + return True + + +if __name__ == '__main__': + for y in LIST_YEARS: + do_transformation(y) + do_voc_transformation(y) + # check_vocs(y) diff --git a/preproc/tno_mac_iii_preproc_voc_ratios.py b/preproc/tno_mac_iii_preproc_voc_ratios.py new file mode 100755 index 0000000000000000000000000000000000000000..9aea92690893c217c28bd069905ab604ff598147 --- /dev/null +++ b/preproc/tno_mac_iii_preproc_voc_ratios.py @@ -0,0 +1,532 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import sys +import os + + +# ============== CONFIGURATION PARAMETERS ====================== +OUTPUT_PATH = '/esarchive/recon/tno/tno_macc_iii/original_files/nmvoc' +WORLD_INFO_PATH = '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/temporal/tz_world_country_iso3166.csv' +TNO_WORLD_MASK = '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/TNO_MACC-III_WorldMask.nc' +CSV_PATH = '/esarchive/recon/tno/tno_macc_iii/original_files/TNO_MACC_NMVOC profile_country_SNAP_12_05_2010.csv' +# ============================================================== + + +def extract_vars(netcdf_path, variables_list, attributes_list=()): + # TODO Docuemtnation + """ + + :param netcdf_path: + :param variables_list: + :param attributes_list: + :return: + """ + from netCDF4 import Dataset + data_list = [] + # print netcdf_path + netcdf = Dataset(netcdf_path, mode='r') + for var in variables_list: + if var == 'emi_nox_no2': + var1 = var + var2 = 'emi_nox' + else: + var1 = var2 = var + dict_aux = \ + { + 'name': var1, + 'data': netcdf.variables[var2][:], + } + for attribute in attributes_list: + dict_aux.update({attribute: netcdf.variables[var2].getncattr(attribute)}) + data_list.append(dict_aux) + netcdf.close() + + return data_list + + +def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, + levels=None, date=None, hours=None, + boundary_latitudes=None, boundary_longitudes=None, cell_area=None, global_attributes=None, + regular_latlon=False, + rotated=False, rotated_lats=None, rotated_lons=None, north_pole_lat=None, north_pole_lon=None, + lcc=False, lcc_x=None, lcc_y=None, lat_1_2=None, lon_0=None, lat_0=None): + # TODO Docuemtnation + """ + + :param netcdf_path: + :param center_latitudes: + :param center_longitudes: + :param data_list: + :param levels: + :param date: + :param hours: + :param boundary_latitudes: + :param boundary_longitudes: + :param cell_area: + :param global_attributes: + :param regular_latlon: + :param rotated: + :param rotated_lats: + :param rotated_lons: + :param north_pole_lat: + :param north_pole_lon: + :param lcc: + :param lcc_x: + :param lcc_y: + :param lat_1_2: + :param lon_0: + :param lat_0: + :return: + """ + from cf_units import Unit, encode_time + from netCDF4 import Dataset + + if not (regular_latlon or lcc or rotated): + regular_latlon = True + print netcdf_path + netcdf = Dataset(netcdf_path, mode='w', format="NETCDF4") + + # ===== Dimensions ===== + if regular_latlon: + var_dim = ('lat', 'lon',) + + # Latitude + if len(center_latitudes.shape) == 1: + netcdf.createDimension('lat', center_latitudes.shape[0]) + lat_dim = ('lat',) + elif len(center_latitudes.shape) == 2: + netcdf.createDimension('lat', center_latitudes.shape[0]) + lat_dim = ('lon', 'lat',) + else: + print 'ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape)) + sys.exit(1) + + # Longitude + if len(center_longitudes.shape) == 1: + netcdf.createDimension('lon', center_longitudes.shape[0]) + lon_dim = ('lon',) + elif len(center_longitudes.shape) == 2: + netcdf.createDimension('lon', center_longitudes.shape[1]) + lon_dim = ('lon', 'lat',) + else: + print 'ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format(len(center_longitudes.shape)) + sys.exit(1) + elif rotated: + var_dim = ('rlat', 'rlon',) + + # Rotated Latitude + if rotated_lats is None: + print 'ERROR: For rotated grids is needed the rotated latitudes.' + sys.exit(1) + netcdf.createDimension('rlat', len(rotated_lats)) + lat_dim = ('rlat', 'rlon',) + + # Rotated Longitude + if rotated_lons is None: + print 'ERROR: For rotated grids is needed the rotated longitudes.' + sys.exit(1) + netcdf.createDimension('rlon', len(rotated_lons)) + lon_dim = ('rlat', 'rlon',) + + elif lcc: + var_dim = ('y', 'x',) + + netcdf.createDimension('y', len(lcc_y)) + lat_dim = ('y', 'x',) + + netcdf.createDimension('x', len(lcc_x)) + lon_dim = ('y', 'x',) + else: + lat_dim = None + lon_dim = None + var_dim = None + + # Levels + if levels is not None: + netcdf.createDimension('lev', len(levels)) + + # Bounds + if boundary_latitudes is not None: + # print boundary_latitudes.shape + # print len(boundary_latitudes[0, 0]) + netcdf.createDimension('nv', len(boundary_latitudes[0, 0])) + # sys.exit() + + # Time + netcdf.createDimension('time', None) + + # ===== Variables ===== + # Time + if date is None: + time = netcdf.createVariable('time', 'd', ('time',), zlib=True) + time.units = "months since 2000-01-01 00:00:00" + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = [0.] + else: + time = netcdf.createVariable('time', 'd', ('time',), zlib=True) + u = Unit('hours') + # print u.offset_by_time(encode_time(date.year, date.month, date.day, date.hour, date.minute, date.second)) + # Unit('hour since 1970-01-01 00:00:00.0000000 UTC') + time.units = str( + u.offset_by_time(encode_time(date.year, date.month, date.day, date.hour, date.minute, date.second))) + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = hours + + # Latitude + lats = netcdf.createVariable('lat', 'f', lat_dim, zlib=True) + lats.units = "degrees_north" + lats.axis = "Y" + lats.long_name = "latitude coordinate" + lats.standard_name = "latitude" + lats[:] = center_latitudes + + if boundary_latitudes is not None: + lats.bounds = "lat_bnds" + lat_bnds = netcdf.createVariable('lat_bnds', 'f', lat_dim + ('nv',), zlib=True) + # print lat_bnds[:].shape, boundary_latitudes.shape + lat_bnds[:] = boundary_latitudes + + # Longitude + lons = netcdf.createVariable('lon', 'f', lon_dim, zlib=True) + + lons.units = "degrees_east" + lons.axis = "X" + lons.long_name = "longitude coordinate" + lons.standard_name = "longitude" + print 'lons:', lons[:].shape, center_longitudes.shape + lons[:] = center_longitudes + if boundary_longitudes is not None: + lons.bounds = "lon_bnds" + lon_bnds = netcdf.createVariable('lon_bnds', 'f', lon_dim + ('nv',), zlib=True) + # print lon_bnds[:].shape, boundary_longitudes.shape + lon_bnds[:] = boundary_longitudes + + if rotated: + # Rotated Latitude + rlat = netcdf.createVariable('rlat', 'f', ('rlat',), zlib=True) + rlat.long_name = "latitude in rotated pole grid" + rlat.units = Unit("degrees").symbol + rlat.standard_name = "grid_latitude" + rlat[:] = rotated_lats + + # Rotated Longitude + rlon = netcdf.createVariable('rlon', 'f', ('rlon',), zlib=True) + rlon.long_name = "longitude in rotated pole grid" + rlon.units = Unit("degrees").symbol + rlon.standard_name = "grid_longitude" + rlon[:] = rotated_lons + if lcc: + x = netcdf.createVariable('x', 'd', ('x',), zlib=True) + x.units = Unit("km").symbol + x.long_name = "x coordinate of projection" + x.standard_name = "projection_x_coordinate" + x[:] = lcc_x + + y = netcdf.createVariable('y', 'd', ('y',), zlib=True) + y.units = Unit("km").symbol + y.long_name = "y coordinate of projection" + y.standard_name = "projection_y_coordinate" + y[:] = lcc_y + + cell_area_dim = var_dim + # Levels + if levels is not None: + var_dim = ('lev',) + var_dim + lev = netcdf.createVariable('lev', 'f', ('lev',), zlib=True) + lev.units = Unit("m").symbol + lev.positive = 'up' + lev[:] = levels + + # All variables + if len(data_list) is 0: + var = netcdf.createVariable('aux_var', 'f', ('time',) + var_dim, zlib=True) + var[:] = 0 + for variable in data_list: + # print ('time',) + var_dim + var = netcdf.createVariable(variable['name'], 'f', ('time',) + var_dim, zlib=True) + var.units = Unit(variable['units']).symbol + if 'long_name' in variable: + var.long_name = str(variable['long_name']) + if 'standard_name' in variable: + var.standard_name = str(variable['standard_name']) + if 'cell_method' in variable: + var.cell_method = str(variable['cell_method']) + var.coordinates = "lat lon" + if cell_area is not None: + var.cell_measures = 'area: cell_area' + if regular_latlon: + var.grid_mapping = 'crs' + elif rotated: + var.grid_mapping = 'rotated_pole' + elif lcc: + var.grid_mapping = 'Lambert_conformal' + # if variable['data'] is not 0: + # print var[:].shape, variable['data'].shape + try: + var[:] = variable['data'] + except Exception: + print 'VAR ERROR, netcdf shape: {0}, variable shape: {1}'.format(var[:].shape, variable['data'].shape) + + # Grid mapping + if regular_latlon: + # CRS + mapping = netcdf.createVariable('crs', 'i') + mapping.grid_mapping_name = "latitude_longitude" + mapping.semi_major_axis = 6371000.0 + mapping.inverse_flattening = 0 + elif rotated: + # Rotated pole + mapping = netcdf.createVariable('rotated_pole', 'c') + mapping.grid_mapping_name = 'rotated_latitude_longitude' + mapping.grid_north_pole_latitude = north_pole_lat + mapping.grid_north_pole_longitude = north_pole_lon + elif lcc: + # CRS + mapping = netcdf.createVariable('Lambert_conformal', 'i') + mapping.grid_mapping_name = "lambert_conformal_conic" + mapping.standard_parallel = lat_1_2 + mapping.longitude_of_central_meridian = lon_0 + mapping.latitude_of_projection_origin = lat_0 + + # Cell area + if cell_area is not None: + c_area = netcdf.createVariable('cell_area', 'f', cell_area_dim) + c_area.long_name = "area of the grid cell" + c_area.standard_name = "cell_area" + c_area.units = Unit("m2").symbol + # print c_area[:].shape, cell_area.shape + c_area[:] = cell_area + + if global_attributes is not None: + netcdf.setncatts(global_attributes) + + netcdf.close() + return True + + +def get_grid_area(filename): + """ + Calculate the area of each cell. + + :param filename: Full path to the NetCDF to calculate the cell areas. + :type filename: str + + :return: Returns the area of each cell. + :rtype: numpy.array + """ + from cdo import Cdo + from netCDF4 import Dataset + + cdo = Cdo() + src = cdo.gridarea(input=filename) + nc_aux = Dataset(src, mode='r') + grid_area = nc_aux.variables['cell_area'][:] + nc_aux.close() + + return grid_area + + +def create_bounds(coords, number_vertices=2): + """ + Calculate the vertices coordinates. + + :param coords: Coordinates in degrees (latitude or longitude) + :type coords: numpy.array + + :param number_vertices: Non mandatory parameter that informs the number of vertices that must have the boundaries. + (by default 2) + :type number_vertices: int + + :return: Array with as many elements as vertices for each value of coords. + :rtype: numpy.array + """ + import numpy as np + + interval = coords[1] - coords[0] + + coords_left = coords - interval / 2 + coords_right = coords + interval / 2 + if number_vertices == 2: + bound_coords = np.dstack((coords_left, coords_right)) + elif number_vertices == 4: + bound_coords = np.dstack((coords_left, coords_right, coords_right, coords_left)) + else: + raise ValueError('The number of vertices of the boudaries must be 2 or 4') + + return bound_coords + + +def create_voc_ratio(voc): + # TODO Docuemtnation + """ + + :param voc: + :return: + """ + import numpy as np + [country_values, lat, lon] = extract_vars(TNO_WORLD_MASK, ['timezone_id', 'lat', 'lon']) + country_values = country_values['data'].reshape((country_values['data'].shape[1], country_values['data'].shape[1])) + print OUTPUT_PATH + if not os.path.exists(OUTPUT_PATH): + os.makedirs(OUTPUT_PATH) + + complete_output_path = os.path.join(OUTPUT_PATH, 'ratio_{0}.nc'.format(voc)) + if not os.path.exists(complete_output_path): + print 'Creating ratio file for {0}\npath: {1}'.format(voc, complete_output_path) + data_list = [] + for snap in get_sector_list(voc): + print snap + mask_factor = np.zeros(country_values.shape) + iso_codes = get_iso_codes() + for country_code, factor in get_country_code_and_factor(voc, snap).iteritems(): + try: + mask_factor[country_values == iso_codes[country_code]] = factor + except Exception: + pass + # To fulfill the blanks on the map + mask_factor[mask_factor <= 0] = get_default_ratio(voc, snap) + + data_list.append({ + 'name': 'snap{0}'.format(snap), + 'units': '', + 'data': mask_factor.reshape((1,) + mask_factor.shape) + }) + write_netcdf(complete_output_path, lat['data'], lon['data'], data_list) + else: + print 'Ratio file for {0} already created\npath: {1}'.format(voc, complete_output_path) + return True + + +def get_default_ratio(voc, snap): + # TODO Documentation + """ + + :param voc: + :param snap: + :return: + """ + import pandas as pd + + df = pd.read_csv(CSV_PATH, sep=';') + + df = df.loc[df['vcode'] == voc.replace('voc', 'v'), :] + df = df.loc[df['snap'] == snap, :] + + return df.loc[df['ISO3'] == 'EUR', 'fr'].item() + + +def get_iso_codes(): + # TODO Documentation + """ + + :return: + """ + import pandas as pd + + # df = pd.read_csv(self.world_info, sep=';', index_col=False, names=["country", "country_code"]) + df = pd.read_csv(WORLD_INFO_PATH, sep=';') + del df['time_zone'], df['time_zone_code'] + df = df.drop_duplicates().dropna() + df = df.set_index('country_code_alpha') + codes_dict = df.to_dict() + codes_dict = codes_dict['country_code'] + + return codes_dict + + +def get_voc_list(): + # TODO Documentation + """ + + :return: + """ + import pandas as pd + + df = pd.read_csv(CSV_PATH, sep=';') + del df['ISO3'], df['snap'], df['output substance name'], df['fr'] + df = df.drop_duplicates().dropna() + voc_list = df.vcode.values + for i in xrange(len(voc_list)): + voc_list[i] = voc_list[i].replace('v', 'voc') + return df.vcode.values + + +def get_sector_list(voc): + # TODO Documentation + """ + + :param voc: + :return: + """ + import pandas as pd + voc = voc.replace('voc', 'v') + df = pd.read_csv(CSV_PATH, sep=';') + df = df[df.vcode == voc] + del df['ISO3'], df['vcode'], df['output substance name'], df['fr'] + df = df.drop_duplicates().dropna() + return df.snap.values + + +def get_sector_list_text(voc): + # TODO Documentation + """ + + :param voc: + :return: + """ + voc = voc.replace('voc', 'v') + sector_list = get_sector_list(voc) + new_list = [] + for int_sector in sector_list: + new_list.append('snap{0}'.format(int_sector)) + return new_list + + +def get_country_code_and_factor(voc, snap): + # TODO Documentation + """ + + :param voc: + :param snap: + :return: + """ + import pandas as pd + voc = voc.replace('voc', 'v') + df = pd.read_csv(CSV_PATH, sep=';') + df = df[df.vcode == voc] + df = df[df.snap == snap] + del df['snap'], df['vcode'], df['output substance name'] + df = df.drop_duplicates().dropna() + df = df.set_index('ISO3') + + country_dict = df.to_dict() + country_dict = country_dict['fr'] + + return country_dict + + +if __name__ == '__main__': + for voc_name in get_voc_list(): + create_voc_ratio(voc_name) diff --git a/preproc/wiedinmyer_preproc.py b/preproc/wiedinmyer_preproc.py new file mode 100755 index 0000000000000000000000000000000000000000..5af455f941d1c40e82b44f1307b61a0acf265236 --- /dev/null +++ b/preproc/wiedinmyer_preproc.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + +import os +import timeit +from netCDF4 import Dataset + + +# ============== CONFIGURATION PARAMETERS ====================== +INPUT_PATH = '/esarchive/recon/ucar/wiedinmyer/original_files/' +OUTPUT_PATH = '/esarchive/recon/ucar/wiedinmyer/' +LIST_POLLUTANTS = ['co2', 'co', 'so2', 'nox_no', 'nh3', 'ch4', 'c2h2', 'c2h4', 'c3h6', 'ch3oh', 'ch2o', 'ch3cooh', + 'hcn', 'c6h6', 'pcb', 'pah', 'pcdd', 'pbdd', 'nmoc', 'hcl', 'hg', 'pm25', 'pm10', 'oc', 'bc'] + +INPUT_NAME = 'ALL_Emiss_04282014.nc' +YEAR = 2010 +# ============================================================== + + +def out_pollutant_to_in_pollutant(out_p): + # TODO Documentation + pollutant_dict = { + 'co2': 'CO2grid', + 'co': 'COgrid', + 'so2': 'SO2grid', + 'nox_no': 'NOxgrid', + 'nh3': 'NH3grid', + 'ch4': 'CH4grid', + 'c2h2': 'C2H2grid', + 'c2h4': 'C2H4grid', + 'c3h6': 'C3H6grid', + 'ch3oh': 'MEOHgrid', + 'ch2o': 'FORMgrid', + 'ch3cooh': 'AcetAcidgrid', + 'hcn': 'HCNgrid', + 'c6h6': 'BENZgrid', + 'pcb': 'PCBgrid', + 'pah': 'PAHgrid', + 'pcdd': 'PCDDgrid', + 'pbdd': 'PBDDgrid', + 'nmoc': 'NMOCgrid', + 'hcl': 'HClgrid', + 'hg': 'Hggrid', + 'pm25': 'PM25grid', + 'pm10': 'PM10grid', + 'oc': 'OCgrid', + 'bc': 'BCgrid', + } + + return pollutant_dict[out_p] + + +def do_transformation(filename): + """ + Re-write the WIEDINMYER inputs following ES anc CF-1.6 conventions. + + :param filename: Name of the input file. + :type filename: str + """ + import numpy as np + print filename + from hermesv3_gr.tools.netcdf_tools import get_grid_area + from cf_units import Unit + + grid_area = get_grid_area(filename) + + nc_in = Dataset(filename, mode='r') + + # Reading lat, lon + lats = nc_in.variables['lat'][:] + lons = nc_in.variables['lon'][:] + + factor = 1000000./(365.*24.*3600.) # To pass from Gg/m2.year to Kg/m2.s + + for output_pollutant in LIST_POLLUTANTS: + input_pollutant = out_pollutant_to_in_pollutant(output_pollutant) + + data = nc_in.variables[input_pollutant][:] + data = np.nan_to_num(data) + data = data/grid_area # To pass from Gg/year to Gg/m2.year + data = data*factor + data_attributes = {'name': output_pollutant, + 'long_name': nc_in.variables[input_pollutant].long_name, + 'units': Unit('kg.m-2.s-1').symbol, + 'coordiantes': 'lat lon', + 'grid_mapping': 'crs'} + data = np.array(data) + + out_path_aux = os.path.join(OUTPUT_PATH, 'yearly_mean', output_pollutant) + if not os.path.exists(out_path_aux): + os.makedirs(out_path_aux) + write_netcdf(os.path.join(out_path_aux, '{0}_{1}.nc'.format(output_pollutant, YEAR)), + data, data_attributes, lats, lons, grid_area, YEAR, 01) + nc_in.close() + + +def write_netcdf(output_name_path, data, data_atts, center_lats, center_lons, grid_cell_area, time_year, time_month): + """ + Write a NetCDF with the given information. + + :param output_name_path: Complete path to the output NetCDF to be stored. + :type output_name_path: str + + :param data: Data of the variable to be stored. + :type data: numpy.array + + :param data_atts: Information of the data to fill the data attributes of the NetCDF variable. + 'long_name': Name of the pollutant. + 'units': Units of the pollutant. + 'coordiantes': Variables that contains the coordinates of the data. + 'grid_mapping': Mapping variable + :type data_atts: dict + + :param center_lats: Latitudes of the center of each cell. + :type center_lats: numpy.array + + :param center_lons: Longitudes of the center of each cell. + :type center_lons: numpy.array + + :param grid_cell_area: Area of each cell of the grid. + :type: numpy.array + + :param time_year: Year. + :type time_year: int + + :param time_month: Number of the month. + :type time_month: int + """ + from hermesv3_gr.tools.coordinates_tools import create_bounds + + print output_name_path + # Creating NetCDF & Dimensions + nc_output = Dataset(output_name_path, mode='w', format="NETCDF4") + nc_output.createDimension('nv', 2) + nc_output.createDimension('lon', center_lons.shape[0]) + nc_output.createDimension('lat', center_lats.shape[0]) + nc_output.createDimension('time', None) + + # TIME + time = nc_output.createVariable('time', 'd', ('time',), zlib=True) + # time.units = "{0} since {1}".format(tstep_units, global_atts['Start_DateTime'].strftime('%Y-%m-%d %H:%M:%S')) + time.units = "months since {0}-{1}-01 00:00:00".format(time_year, str(time_month).zfill(2)) + time.standard_name = "time" + time.calendar = "gregorian" + time.long_name = "time" + time[:] = [0] + + # LATITUDE + lat = nc_output.createVariable('lat', 'f', ('lat',), zlib=True) + lat.bounds = "lat_bnds" + lat.units = "degrees_north" + lat.axis = "Y" + lat.long_name = "latitude" + lat.standard_name = "latitude" + lat[:] = center_lats + + lat_bnds = nc_output.createVariable('lat_bnds', 'f', ('lat', 'nv',), zlib=True) + lat_bnds[:] = create_bounds(center_lats) + + # LONGITUDE + lon = nc_output.createVariable('lon', 'f', ('lon',), zlib=True) + lon.bounds = "lon_bnds" + lon.units = "degrees_east" + lon.axis = "X" + lon.long_name = "longitude" + lon.standard_name = "longitude" + lon[:] = center_lons + + lon_bnds = nc_output.createVariable('lon_bnds', 'f', ('lon', 'nv',), zlib=True) + lon_bnds[:] = create_bounds(center_lons) + + # VARIABLE + nc_var = nc_output.createVariable(data_atts['name'], 'f', ('time', 'lat', 'lon',), zlib=True) + nc_var.units = data_atts['units'] + nc_var.long_name = data_atts['long_name'] + nc_var.coordinates = data_atts['coordiantes'] + nc_var.grid_mapping = data_atts['grid_mapping'] + nc_var.cell_measures = 'area: cell_area' + nc_var[:] = data.reshape((1,) + data.shape) + + # CELL AREA + cell_area = nc_output.createVariable('cell_area', 'f', ('lat', 'lon',)) + cell_area.long_name = "area of the grid cell" + cell_area.standard_name = "area" + cell_area.units = "m2" + cell_area[:] = grid_cell_area + + # CRS + crs = nc_output.createVariable('crs', 'i') + crs.grid_mapping_name = "latitude_longitude" + crs.semi_major_axis = 6371000.0 + crs.inverse_flattening = 0 + + nc_output.setncattr('title', 'Annual trash burning emissions', ) + nc_output.setncattr('Conventions', 'CF-1.6', ) + nc_output.setncattr('institution', 'UCAR', ) + nc_output.setncattr('source', 'WIEDINMYER', ) + nc_output.setncattr('history', 'Re-writing of the WIEDINMYER input to follow the CF 1.6 conventions;\n' + + '2014-04-28: Created by C. Wiedinmyer;\n' + + '2017-04-04: Added time dimension (UNLIMITED);\n' + + '2017-04-04: Added boundaries;\n' + + '2017-04-04: Added global attributes;\n' + + '2017-04-04: Re-naming pollutant;\n' + + '2017-04-04: Added cell_area variable;\n') + nc_output.setncattr('references', '', ) + nc_output.setncattr('comment', 'Re-writing done by Carles Tena (carles.tena@bsc.es) from the BSC-CNS ' + + '(Barcelona Supercomputing Center); Original file from C. Wiedinmyer', ) + + nc_output.close() + + +if __name__ == '__main__': + starting_time = timeit.default_timer() + + do_transformation(os.path.join(INPUT_PATH, INPUT_NAME)) + + print 'Time(s):', timeit.default_timer() - starting_time diff --git a/run_test.py b/run_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6eb2926793f63a9c62eee79c06273d12f95fab82 --- /dev/null +++ b/run_test.py @@ -0,0 +1,24 @@ +# coding=utf-8 +"""Script to run the tests for EarthDiagnostics and generate the code coverage report""" + +import os +import sys +import pytest + + +work_path = os.path.abspath(os.path.join(os.path.dirname(__file__))) +os.chdir(work_path) +print(work_path) + + +version = sys.version_info[0] +report_dir = 'tests/report/python{}'.format(version) +errno = pytest.main([ + 'tests', + '--ignore=tests/report', + '--cov=hermesv3_gr', + '--cov-report=term', + '--cov-report=html:{}/coverage_html'.format(report_dir), + '--cov-report=xml:{}/coverage.xml'.format(report_dir), +]) +sys.exit(errno) diff --git a/setup.py b/setup.py index 45941a941fbbdf02f8e1902e72d04d0c69d69e69..1778db762ceb53f79ceb1a380f8a6fea29ae21f1 100644 --- a/setup.py +++ b/setup.py @@ -1,14 +1,33 @@ #!/usr/bin/env python -from os import path +# Copyright 2018 Earth Sciences Department, BSC-CNS +# +# This file is part of HERMESv3_GR. +# +# HERMESv3_GR is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# HERMESv3_GR is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with HERMESv3_GR. If not, see . + + from setuptools import find_packages from setuptools import setup +from hermesv3_gr import __version__ -here = path.abspath(path.dirname(__file__)) # Get the version number from the relevant file -with open(path.join(here, 'VERSION')) as f: - version = f.read().strip() +version = __version__ + +with open("README.md", "r") as f: + long_description = f.read() setup( name='hermesv3_gr', @@ -16,9 +35,11 @@ setup( # platforms=['GNU/Linux Debian'], version=version, description='HERMESv3 Global/Regional', + long_description=long_description, + long_description_content_type="text/markdown", author='Carles Tena Medina', author_email='carles.tena@bsc.es', - # url='http://www.bsc.es/projects/earthscience/autosubmit/', + url='https://earth.bsc.es/gitlab/es/hermesv3_gr', # download_url='https://earth.bsc.es/wiki/doku.php?id=tools:autosubmit', keywords=['emissions', 'cmaq', 'monarch', 'wrf-chem', 'atmospheric composition', 'air quality', 'earth science'], @@ -26,24 +47,74 @@ setup( install_requires=[ 'numpy', 'netCDF4>=1.3.1', - 'cdo>=1.3.4', + 'cdo>=1.3.3', 'pandas', + 'fiona', + 'Rtree', 'geopandas', 'pyproj', + 'configargparse', + 'cf_units>=1.1.3', + 'ESMPy>=7.1.0.dev0', + 'holidays', + 'pytz', + 'timezonefinder', + 'mpi4py', + 'pytest', ], packages=find_packages(), + classifiers=[ + "Programming Language :: Python :: 2.7", + "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + "Operating System :: OS Independent", + "Topic :: Scientific/Engineering :: Atmospheric Science" + ], + package_data={'': [ + 'README.md', + 'CHANGELOG', + 'LICENSE', + ] + }, + data_files=[('.', ['LICENSE', 'CHANGELOG', ]), + ('conf', ['conf/hermes.conf', + 'conf/EI_configuration.csv', ]), + ('data', ['data/global_attributes.csv', ]), + ('data/profiles', []), + ('data/profiles/speciation', [ + 'data/profiles/speciation/MolecularWeights.csv', + 'data/profiles/speciation/Speciation_profile_cb05_aero5_CMAQ.csv', + 'data/profiles/speciation/Speciation_profile_cb05_aero5_MONARCH.csv', + 'data/profiles/speciation/Speciation_profile_cb05_aero6_CMAQ.csv', + 'data/profiles/speciation/Speciation_profile_cb05e51_aero6_CMAQ.csv', + 'data/profiles/speciation/Speciation_profile_radm2_madesorgam_WRF_CHEM.csv', ]), + ('data/profiles/temporal', [ + 'data/profiles/temporal/TemporalProfile_Daily.csv', + 'data/profiles/temporal/TemporalProfile_Hourly.csv', + 'data/profiles/temporal/TemporalProfile_Monthly.csv', + 'data/profiles/temporal/tz_world_country_iso3166.csv', ]), + ('data/profiles/vertical', [ + 'data/profiles/vertical/Benchmark_15layers_vertical_description.csv', + 'data/profiles/vertical/Vertical_profile.csv', ]), + ('preproc', ['preproc/ceds_preproc.py', + 'preproc/eclipsev5a_preproc.py', + 'preproc/edgarv432_ap_preproc.py', + 'preproc/edgarv432_voc_preproc.py', + 'preproc/emep_preproc.py', + 'preproc/gfas12_preproc.py', + 'preproc/htapv2_preproc.py', + 'preproc/tno_mac_iii_preproc.py', + 'preproc/tno_mac_iii_preproc_voc_ratios.py', + 'preproc/wiedinmyer_preproc.py', ]), + ], + include_package_data=True, - # package_data={'hermes_v3': [ - # 'autosubmit/config/files/autosubmit.conf', - # 'autosubmit/config/files/expdef.conf', - # 'autosubmit/database/data/autosubmit.sql', - # 'README', - # 'CHANGELOG', - # 'VERSION', - # 'LICENSE', - # 'docs/autosubmit.pdf' - # ] - # }, - - scripts=['bin/hermes_gr'] + + entry_points={ + 'console_scripts': [ + 'hermesv3_gr = hermesv3_gr.hermes:run', + 'hermesv3_gr_copy_config_files = hermesv3_gr.tools.sample_files:copy_config_files', + 'hermesv3_gr_copy_preproc_files = hermesv3_gr.tools.sample_files:copy_preproc_files', + 'hermesv3_gr_download_benchmark = hermesv3_gr.tools.download_benchmark:download_benchmark', + ], + }, ) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/unit/test_lint.py b/tests/unit/test_lint.py new file mode 100644 index 0000000000000000000000000000000000000000..5a5fd7679dbee05f170d2a09184e7e086f3473b7 --- /dev/null +++ b/tests/unit/test_lint.py @@ -0,0 +1,33 @@ +""" Lint tests """ +import os +import unittest + +import pycodestyle # formerly known as pep8 + + +class TestLint(unittest.TestCase): + + def test_pep8_conformance(self): + """Test that we conform to PEP-8.""" + + check_paths = [ + 'hermesv3_gr', + 'tests', + ] + exclude_paths = [ + + ] + + print("PEP8 check of directories: {}\n".format(', '.join(check_paths))) + + # Get paths wrt package root + package_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + for paths in (check_paths, exclude_paths): + for i, path in enumerate(paths): + paths[i] = os.path.join(package_root, path) + + style = pycodestyle.StyleGuide() + style.options.exclude.extend(exclude_paths) + style.options.max_line_length = 120 + + self.assertEqual(style.check_files(check_paths).total_errors, 0) diff --git a/tests/unit/test_temporal.py b/tests/unit/test_temporal.py new file mode 100644 index 0000000000000000000000000000000000000000..6174229171f50de40ad585709123516a2b0b7446 --- /dev/null +++ b/tests/unit/test_temporal.py @@ -0,0 +1,428 @@ +#!/usr/bin/env python + +# Copyright 2018 Earth Sciences Department, BSC-CNS + +# This file is part of HERMESv3. + +# HERMESv3 is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# HERMESv3 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with HERMESv3. If not, see . + +import os +from datetime import datetime, timedelta +import unittest +import hermesv3_gr.config.settings as settings + +from hermesv3_gr.modules.temporal.temporal import TemporalDistribution + + +class TestTemporalDistribution(unittest.TestCase): + def setUp(self): + pass + + # def testing_calculate_ending_date_1hour(self): + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01, hour=0, minute=0, second=0), 'hourly', 1, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/global_1.0_1.40625') + # self.assertEqual( + # temporal.calculate_ending_date(), + # datetime(year=2016, month=01, day=01, hour=0, minute=0, second=0)) + # + # def testing_calculate_ending_date_24hours(self): + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01, hour=0, minute=0, second=0), 'hourly', 24, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/global_1.0_1.40625') + # self.assertEqual( + # temporal.calculate_ending_date(), + # datetime(year=2016, month=01, day=01, hour=23, minute=0, second=0)) + # + # def testing_calculate_ending_date_3hour_each2(self): + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01, hour=0, minute=0, second=0), 'hourly', 3, 2, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/global_1.0_1.40625') + # self.assertEqual( + # temporal.calculate_ending_date(), + # datetime(year=2016, month=01, day=01, hour=4, minute=0, second=0)) + # + # def testing_def_calculate_timedelta_3hour_each2(self): + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01, hour=0, minute=0, second=0), 'hourly', 3, 2, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/global_1.0_1.40625') + # self.assertEqual( + # temporal.calculate_timedelta(datetime(year=2016, month=01, day=01, hour=0, minute=0, second=0)), + # timedelta(hours=2)) + # + # def testing_def_calculate_timedelta_month(self): + # temporal = TemporalDistribution( + # datetime(year=2017, month=02, day=01, hour=0, minute=0, second=0), 'monthly', 1, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/global_1.0_1.40625') + # self.assertEqual( + # temporal.calculate_timedelta(datetime(year=2017, month=02, day=01, hour=0, minute=0, second=0)), + # timedelta(hours=24*28)) + # + # def testing_def_calculate_timedelta_month_leapyear(self): + # temporal = TemporalDistribution( + # datetime(year=2016, month=02, day=01, hour=0, minute=0, second=0), 'monthly', 1, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/global_1.0_1.40625') + # self.assertEqual( + # temporal.calculate_timedelta(datetime(year=2016, month=02, day=01, hour=0, minute=0, second=0)), + # timedelta(hours=24*29)) + # + # def testing_get_tz_from_id(self): + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/global_1.0_1.40625') + # + # self.assertEqual( + # temporal.get_tz_from_id(309), + # "Europe/Andorra") + # + # def testing_get_id_from_tz(self): + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/global_1.0_1.40625') + # + # self.assertEqual( + # temporal.get_id_from_tz("Europe/Andorra"), + # 309) + # + # def testing_parse_tz(self): + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/global_1.0_1.40625') + # + # self.assertEqual( + # temporal.parse_tz("America/Fort_Nelson"), + # 'America/Vancouver') + # + # def testing_find_closest_timezone_BCN(self): + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/global_1.0_1.40625') + # + # self.assertEqual( + # temporal.find_closest_timezone(41.390205, 2.154007), + # 'Europe/Madrid') + # + # def testing_find_closest_timezone_MEX(self): + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/global_1.0_1.40625') + # + # self.assertEqual( + # temporal.find_closest_timezone(19.451054, -99.125519), + # "America/Mexico_City") + # + # def testing_find_closest_timezone_Kuwait(self): + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/global_1.0_1.40625') + # + # self.assertEqual( + # temporal.find_closest_timezone(29.378586, 47.990341), + # "Asia/Kuwait") + # + # def testing_find_closest_timezone_Shanghai(self): + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/global_1.0_1.40625') + # + # self.assertEqual( + # temporal.find_closest_timezone(31.267401, 121.522179), + # "Asia/Shanghai") + # + # def testing_create_netcdf_timezones(self): + # import numpy as np + # from hermesv3_gr.modules.grids.grid import Grid + # from hermesv3_gr.tools.netcdf_tools import extract_vars + # + # aux_path = '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/testing' + # if not os.path.exists(aux_path): + # os.makedirs(aux_path) + # + # grid = Grid('global', aux_path) + # grid.center_latitudes = np.array([[41.390205, 19.451054], [29.378586, 31.267401]]) + # grid.center_longitudes = np.array([[2.154007, -99.125519], [47.990341, 121.522179]]) + # + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # aux_path) + # + # self.assertTrue(temporal.create_netcdf_timezones(grid)) + # + # [timezones] = extract_vars(temporal.netcdf_timezones, ['timezone_id']) + # timezones = list(timezones['data'][0, :].astype(int).flatten()) + # + # self.assertEqual(timezones, + # [335, 147, 247, 268]) + # + # def testing_calculate_timezones(self): + # self.testing_create_netcdf_timezones() + # + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/testing') + # self.assertEqual(temporal.calculate_timezones().tolist(), + # [['Europe/Madrid', "America/Mexico_City"], ["Asia/Kuwait", "Asia/Shanghai"]]) + # + # def testing_calculate_2d_temporal_factors(self): + # self.testing_create_netcdf_timezones() + # + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/testing') + # timezones = temporal.calculate_timezones() + # + # temporal.monthly_profile = {1: 1., + # 2: 1., + # 3: 1., + # 4: 1., + # 5: 1., + # 6: 1., + # 7: 1., + # 8: 1., + # 9: 1., + # 10: 1., + # 11: 1., + # 12: 1.} + # temporal.daily_profile_id = {0: 1., + # 1: 1., + # 2: 1., + # 3: 1., + # 4: 1., + # 5: 1., + # 6: 1.} + # temporal.hourly_profile = {0: 1., + # 1: 1., + # 2: 1., + # 3: 1., + # 4: 1., + # 5: 1., + # 6: 1., + # 7: 1., + # 8: 1., + # 9: 1., + # 10: 1., + # 11: 1., + # 12: 1., + # 13: 20., + # 14: 1., + # 15: 1., + # 16: 1., + # 17: 1., + # 18: 1., + # 19: 1., + # 20: 1., + # 21: 1., + # 22: 1., + # 23: 1.} + # + # self.assertEqual( + # temporal.calculate_2d_temporal_factors( + # datetime(year=2017, month=6, day=23, hour=11, minute=0, second=0), timezones).tolist(), + # [[20., 1.], [1., 1.]]) + # + # def testing_do_temporal(self): + # import numpy as np + # from hermesv3_gr.modules.grids.grid import Grid + # self.testing_create_netcdf_timezones() + # + # aux_path = '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/testing' + # + # temporal = TemporalDistribution( + # datetime(year=2017, month=6, day=23, hour=11, minute=0, second=0), 'hourly', 1, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # aux_path) + # temporal.monthly_profile = {1: 1., + # 2: 1., + # 3: 1., + # 4: 1., + # 5: 1., + # 6: 1., + # 7: 1., + # 8: 1., + # 9: 1., + # 10: 1., + # 11: 1., + # 12: 1.} + # temporal.daily_profile_id = {0: 1., + # 1: 1., + # 2: 1., + # 3: 1., + # 4: 1., + # 5: 1., + # 6: 1.} + # temporal.hourly_profile = {0: 1., + # 1: 1., + # 2: 1., + # 3: 1., + # 4: 1., + # 5: 1., + # 6: 1., + # 7: 1., + # 8: 1., + # 9: 1., + # 10: 1., + # 11: 1., + # 12: 1., + # 13: 20., + # 14: 1., + # 15: 1., + # 16: 1., + # 17: 1., + # 18: 1., + # 19: 1., + # 20: 1., + # 21: 1., + # 22: 1., + # 23: 1.} + # + # grid = Grid('global', aux_path) + # grid.center_latitudes = np.array([[41.390205, 19.451054], [29.378586, 31.267401]]) + # grid.center_longitudes = np.array([[2.154007, -99.125519], [47.990341, 121.522179]]) + # data_in = [{'data': np.array([[10., 10.], [10., 10.]])}] + # # data_out = [{'data': np.array([[200., 10.], [10., 10.]])}] + # data_out = temporal.do_temporal(data_in, grid) + # + # self.assertEqual(data_out[0]['data'].tolist(), [[[200., 10.], [10., 10.]]]) + # + # def testing_calculate_weekdays_no_leap_year(self): + # from datetime import datetime + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/testing') + # self.assertEqual(temporal.calculate_weekdays(datetime(year=2017, month=02, day=1)), + # {0: 4, 1: 4, 2: 4, 3: 4, 4: 4, 5: 4, 6: 4}) + # + # def testing_calculate_weekdays_leap_year(self): + # from datetime import datetime + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/testing') + # self.assertEqual(temporal.calculate_weekdays(datetime(year=2016, month=02, day=1)), + # {0: 5, 1: 4, 2: 4, 3: 4, 4: 4, 5: 4, 6: 4}) + # + # def testing_calculate_weekdays_factors_full_month(self): + # from datetime import datetime + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/testing') + # + # self.assertEqual(round(temporal.calculate_weekday_factor_full_month( + # {0: 0.8, 1: 1.2, 2: 0.5, 3: 1.5, 4: 0.9, 5: 0.9, 6: 1.2}, {0: 5, 1: 4, 2: 4, 3: 4, 4: 4, 5: 4, 6: 4}), 5), + # round(0.2/29, 5)) + # + # def testing_calculate_rebalance_factor(self): + # from datetime import datetime + # temporal = TemporalDistribution( + # datetime(year=2016, month=01, day=01), 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/testing') + # + # self.assertEqual(round(temporal.calculate_rebalance_factor( + # {0: 0.8, 1: 1.2, 2: 0.5, 3: 1.5, 4: 0.9, 5: 0.9, 6: 1.2}, datetime(year=2016, month=02, day=1)), 5), + # round(0.2/29, 5)) + + # def testing_get_temporal_daily_profile(self): + # from datetime import datetime + # from calendar import monthrange + # date = datetime(year=2016, month=02, day=1) + # temporal = TemporalDistribution( + # date, 'monthly', 48, 1, + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Monthly.csv', 'M001', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Daily.csv', 'D000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/TemporalProfile_Hourly.csv', 'H000', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/profiles/tz_world_country_iso3166.csv', + # '/home/Earth/ctena/Models/HERMESv3/IN/data/auxiliar_files/testing') + # + # print temporal.get_temporal_daily_profile(date)