diff --git a/data/profiles/temporal/tz_world_country_iso3166.csv b/data/profiles/temporal/tz_world_country_iso3166.csv index 4c8218028d6d827280913bdebf0816a4dcd908d1..b6ddfd777ff6fcec6b2e23c380f8dbba02438a2f 100755 --- a/data/profiles/temporal/tz_world_country_iso3166.csv +++ b/data/profiles/temporal/tz_world_country_iso3166.csv @@ -420,4 +420,17 @@ "Samoa";882;"Pacific/Apia";378;"WSM" "Yemen";887;"Asia/Aden";203;"YEM" "Zambia";894;"Africa/Lusaka";36;"ZMB" -"Namibia";"264";"Africa/Windhoek";52;"NAM" \ No newline at end of file +"Namibia";"264";"Africa/Windhoek";52;"NAM" +"Antarctica";10;"Antarctica/McMurdo";424;"ATA" +"Antarctica";10;"Antarctica/Mawson";425;"ATA" +"Antarctica";10;"Antarctica/Davis";426;"ATA" +"Antarctica";10;"Antarctica/Rothera";427;"ATA" +"Antarctica";10;"Antarctica/Troll";428;"ATA" +"Antarctica";10;"Antarctica/Syowa";429;"ATA" +"Antarctica";10;"Antarctica/Vostok";430;"ATA" +"Antarctica";10;"Antarctica/Casey";431;"ATA" +"Antarctica";10;"Antarctica/DumontDUrville";432;"ATA" +"Antarctica";10;"Antarctica/Palmer";433;"ATA" +"Myanmar";104;"Asia/Yangon";434;"MMR" +"Kazakhstan";398;"Asia/Qostanay";435;"KAZ" +"UTC";0;"Etc/UTC";999;"UTC" \ No newline at end of file diff --git a/hermesv3_gr/config/config.py b/hermesv3_gr/config/config.py index d174e04ab52e2125c975864146702e3b3704c886..f8d1d4acdde44858f55501290fcc387a763110eb 100755 --- a/hermesv3_gr/config/config.py +++ b/hermesv3_gr/config/config.py @@ -32,7 +32,7 @@ class Config(ArgParser): def read_options(self): """ - Reads all the options from command line or from the configuration file. + Reads all the arguments from command line or from the configuration file. The value of an argument given by command line has high priority that the one that appear in the configuration file. @@ -76,17 +76,17 @@ class Config(ArgParser): p.add_argument('--vertical_description', required=True, help='Path to the file that contains the vertical description of the desired output.') - # Global options + # Global arguments p.add_argument('--inc_lat', required=False, help='Latitude resolution for a global domain.', type=float) p.add_argument('--inc_lon', required=False, help='Longitude resolution for a global domain.', type=float) - # Regular lat-lon options: + # Regular lat-lon arguments: p.add_argument('--lat_orig', required=False, help='Latitude of the corner of the first cell.', type=float) p.add_argument('--lon_orig', required=False, help='Longitude of the corner of the first cell.', type=float) p.add_argument('--n_lat', required=False, help='Number of latitude elements.', type=float) p.add_argument('--n_lon', required=False, help='Number of longitude elements.', type=float) - # Rotated options + # Rotated arguments p.add_argument('--centre_lat', required=False, help='Central geographic latitude of grid (non-rotated degrees). Corresponds to the TPH0D ' + 'parameter in NMMB-MONARCH.', type=float) @@ -106,7 +106,7 @@ class Config(ArgParser): help='Longitudinal grid resolution (rotated degrees). Corresponds to the DLMD parameter ' + 'in NMMB-MONARCH.', type=float) - # Lambert conformal conic options + # Lambert conformal conic arguments p.add_argument('--lat_1', required=False, help='Standard parallel 1 (in deg). Corresponds to the P_ALP parameter of the GRIDDESC file.', type=float) @@ -157,30 +157,30 @@ class Config(ArgParser): p.add_argument('--world_info', required=True, help='Path to the file that contains the world information like timezones, ISO codes, ...') - options = p.parse_args() - for item in vars(options): - is_str = False - exec ("is_str = str == type(options.{0})".format(item)) + arguments = p.parse_args() + for item in vars(arguments): + is_str = isinstance(arguments.__dict__[item], str) if is_str: - exec("options.{0} = options.{0}.replace('', options.input_dir)".format(item)) - exec("options.{0} = options.{0}.replace('', options.domain_type)".format(item)) - if options.domain_type == 'global' or options.domain_type == 'regular': - exec("options.{0} = options.{0}.replace('', '{1}_{2}')".format( - item, options.inc_lat, options.inc_lon)) - elif options.domain_type == 'rotated': - exec("options.{0} = options.{0}.replace('', '{1}_{2}')".format( - item, options.inc_rlat, options.inc_rlon)) - elif options.domain_type == 'lcc' or options.domain_type == 'mercator': - exec("options.{0} = options.{0}.replace('', '{1}_{2}')".format( - item, options.inc_x, options.inc_y)) - - options.start_date = self._parse_start_date(options.start_date) - options.end_date = self._parse_end_date(options.end_date, options.start_date) - - self.create_dir(options.output_dir) - self.create_dir(options.auxiliar_files_path) - - return options + arguments.__dict__[item] = arguments.__dict__[item].replace('', arguments.data_path) + arguments.__dict__[item] = arguments.__dict__[item].replace('', arguments.input_dir) + arguments.__dict__[item] = arguments.__dict__[item].replace('', arguments.domain_type) + if arguments.domain_type in ['global', 'regular']: + arguments.__dict__[item] = arguments.__dict__[item].replace('', '{1}_{2}'.format( + item, arguments.inc_lat, arguments.inc_lon)) + elif arguments.domain_type == 'rotated': + arguments.__dict__[item] = arguments.__dict__[item].replace('', '{1}_{2}'.format( + item, arguments.inc_rlat, arguments.inc_rlon)) + elif arguments.domain_type == 'lcc' or arguments.domain_type == 'mercator': + arguments.__dict__[item] = arguments.__dict__[item].replace('', '{1}_{2}'.format( + item, arguments.inc_x, arguments.inc_y)) + + arguments.start_date = self._parse_start_date(arguments.start_date) + arguments.end_date = self._parse_end_date(arguments.end_date, arguments.start_date) + + self.create_dir(arguments.output_dir) + self.create_dir(arguments.auxiliar_files_path) + + return arguments def get_output_name(self, date): """ @@ -247,10 +247,10 @@ class Config(ArgParser): elif str_bool in false_options: return False else: - print 'WARNING: Boolean value not contemplated use {0} for True values and {1} for the False ones'.format( + print('WARNING: Boolean value not contemplated use {0} for True values and {1} for the False ones'.format( true_options, false_options - ) - print '/t Using False as default' + )) + print('/t Using False as default') return False @staticmethod @@ -277,7 +277,7 @@ class Config(ArgParser): date = datetime.strptime(str_date, date_format) break except ValueError as e: - if e.message == 'day is out of range for month': + if str(e) == 'day is out of range for month': raise ValueError(e) if date is None: @@ -308,10 +308,10 @@ class Config(ArgParser): """ Defines the log_level using the common script settings. """ - import settings + from . import settings settings.define_global_vars(self.options.log_level) if __name__ == '__main__': config = Config() - print config.options + print(config.options) diff --git a/hermesv3_gr/config/settings.py b/hermesv3_gr/config/settings.py index 1b93cfa1182ba517531a30737ae119b8c81f5942..18b6a6a65a2bfd481fcb7c1d8c48389e6f094cc7 100755 --- a/hermesv3_gr/config/settings.py +++ b/hermesv3_gr/config/settings.py @@ -20,6 +20,7 @@ import os import numpy as np +from functools import reduce global refresh_log @@ -118,9 +119,9 @@ def finish_logs(output_dir, date): os.remove(times_path) df_merged = reduce(lambda left, right: pd.merge(left, right, on=['Class', 'Function'], how='outer'), data_frames) - df_merged['min'] = df_merged.loc[:, range(size)].min(axis=1) - df_merged['max'] = df_merged.loc[:, range(size)].max(axis=1) - df_merged['mean'] = df_merged.loc[:, range(size)].mean(axis=1) + df_merged['min'] = df_merged.loc[:, list(range(size))].min(axis=1) + df_merged['max'] = df_merged.loc[:, list(range(size))].max(axis=1) + df_merged['mean'] = df_merged.loc[:, list(range(size))].mean(axis=1) df_merged.to_csv(times_path) comm.Barrier() diff --git a/hermesv3_gr/modules/emision_inventories/emission_inventory.py b/hermesv3_gr/modules/emision_inventories/emission_inventory.py index ed0a8377db316da63c710b91b92c96c6f2d83557..43e0db7b102637b11e6d43791960a948e182c47f 100755 --- a/hermesv3_gr/modules/emision_inventories/emission_inventory.py +++ b/hermesv3_gr/modules/emision_inventories/emission_inventory.py @@ -286,9 +286,9 @@ class EmissionInventory(object): """ import pandas as pd import re - from point_gfas_emission_inventory import PointGfasEmissionInventory - from gfas_emission_inventory import GfasEmissionInventory - from point_source_emission_inventory import PointSourceEmissionInventory + from .point_gfas_emission_inventory import PointGfasEmissionInventory + from .gfas_emission_inventory import GfasEmissionInventory + from .point_source_emission_inventory import PointSourceEmissionInventory st_time = timeit.default_timer() settings.write_log('Loading emissions') diff --git a/hermesv3_gr/modules/emision_inventories/gfas_emission_inventory.py b/hermesv3_gr/modules/emision_inventories/gfas_emission_inventory.py index b237bc50fb6dcc1a102d00e3de45e81e5c3056d3..33a532844a47aafc823a39759ebf0474763a6ad1 100755 --- a/hermesv3_gr/modules/emision_inventories/gfas_emission_inventory.py +++ b/hermesv3_gr/modules/emision_inventories/gfas_emission_inventory.py @@ -21,7 +21,7 @@ import os import timeit import hermesv3_gr.config.settings as settings -from emission_inventory import EmissionInventory +from .emission_inventory import EmissionInventory class GfasEmissionInventory(EmissionInventory): @@ -132,7 +132,7 @@ class GfasEmissionInventory(EmissionInventory): else: alt_var = None - print "ERROR: Only 'sovief' and 'prm' methods are accepted." + print("ERROR: Only 'sovief' and 'prm' methods are accepted.") [alt] = extract_vars(self.get_input_path(), [alt_var]) @@ -211,7 +211,7 @@ class GfasEmissionInventory(EmissionInventory): st_time = timeit.default_timer() settings.write_log("\tRegridding", level=2) - for i in xrange(len(self.emissions)): + for i in range(len(self.emissions)): self.emissions[i]["data"] = self.do_vertical_allocation(self.emissions[i]["data"]) regridded_emissions = self.regrid.start_regridding(gfas=True, vertical=self.vertical) diff --git a/hermesv3_gr/modules/emision_inventories/point_gfas_emission_inventory.py b/hermesv3_gr/modules/emision_inventories/point_gfas_emission_inventory.py index 0eb27d300a8f419d2e22cf9153ccb1f528b966db..78167bba64179b2432ccf1ea76004e749854d5c4 100755 --- a/hermesv3_gr/modules/emision_inventories/point_gfas_emission_inventory.py +++ b/hermesv3_gr/modules/emision_inventories/point_gfas_emission_inventory.py @@ -23,7 +23,7 @@ import timeit import warnings import hermesv3_gr.config.settings as settings -from emission_inventory import EmissionInventory +from .emission_inventory import EmissionInventory class PointGfasEmissionInventory(EmissionInventory): @@ -148,7 +148,7 @@ class PointGfasEmissionInventory(EmissionInventory): else: alt_var = None - print "ERROR: Only 'sovief' and 'prm' methods are accepted." + print("ERROR: Only 'sovief' and 'prm' methods are accepted.") [alt] = extract_vars(self.get_input_path(), [alt_var]) @@ -213,7 +213,7 @@ class PointGfasEmissionInventory(EmissionInventory): :return: Emissions already allocated on the top altitude of each fire. :rtype: numpy.array """ - print 'do_vertical_allocation' + print('do_vertical_allocation') sys.exit() st_time = timeit.default_timer() @@ -336,7 +336,7 @@ class PointGfasEmissionInventory(EmissionInventory): # sys.exit() gdf = gpd.sjoin(gdf.to_crs(grid_shp.crs), grid_shp, how='inner') - print gdf + print(gdf) gdf = np.array_split(gdf, settings.size) else: gdf = None @@ -346,7 +346,7 @@ class PointGfasEmissionInventory(EmissionInventory): for num, pollutant in enumerate(self.pollutant_dicts): settings.write_log('\t\tPollutant {0} ({1}/{2})'.format( pollutant['name'], num + 1, len(self.pollutant_dicts)), level=3) - print ('\t\tPollutant {0} ({1}/{2})'.format(pollutant['name'], num + 1, len(self.pollutant_dicts))) + print(('\t\tPollutant {0} ({1}/{2})'.format(pollutant['name'], num + 1, len(self.pollutant_dicts)))) aux = netcdf.variables[pollutant['name']][:].flatten()[gdf['src_index']] gdf[pollutant['name']] = aux * gdf['src_area'] # print 'masa {0}: {1} '.format(pollutant['name'], gdf[pollutant['name']].sum()) @@ -357,7 +357,7 @@ class PointGfasEmissionInventory(EmissionInventory): netcdf.close() settings.write_time('PointGfasEmissionInventory', 'do_regrid', timeit.default_timer() - st_time, level=2) - print 'regrid done' + print('regrid done') del gdf['src_index'], gdf['index_right'] self.emissions = gdf @@ -393,7 +393,7 @@ class PointGfasEmissionInventory(EmissionInventory): warnings.warn('WARNING: One or more fires have an altitude of fire emission injection higher than the top' + ' layer of the model defined in the {0} file'.format(vertical_description_path)) - print self.emissions.loc[~self.emissions['altitude'].isna()] + print(self.emissions.loc[~self.emissions['altitude'].isna()]) del self.emissions['altitude'] self.emissions = self.emissions.groupby(['FID', 'layer']).sum() diff --git a/hermesv3_gr/modules/emision_inventories/point_source_emission_inventory.py b/hermesv3_gr/modules/emision_inventories/point_source_emission_inventory.py index 60e32b82c19136c630a701e7fed1c6c99ab76541..791f60fa09dc5b0bbf5e2a8442223860d8be5c23 100755 --- a/hermesv3_gr/modules/emision_inventories/point_source_emission_inventory.py +++ b/hermesv3_gr/modules/emision_inventories/point_source_emission_inventory.py @@ -20,7 +20,7 @@ import timeit import hermesv3_gr.config.settings as settings -from emission_inventory import EmissionInventory +from .emission_inventory import EmissionInventory class PointSourceEmissionInventory(EmissionInventory): diff --git a/hermesv3_gr/modules/grids/grid.py b/hermesv3_gr/modules/grids/grid.py index ce1133bcb4df82e9b8b18004adcc6ef0e07d7ccc..dca201366752607cc310f379c699d3129dea1273 100755 --- a/hermesv3_gr/modules/grids/grid.py +++ b/hermesv3_gr/modules/grids/grid.py @@ -499,13 +499,13 @@ class Grid(object): df = pd.concat([df_lats, df_lons], axis=1) # Substituate 8 columns by 4 with the two coordinates - df['p1'] = zip(df.b_lon_1, df.b_lat_1) + df['p1'] = list(zip(df.b_lon_1, df.b_lat_1)) del df['b_lat_1'], df['b_lon_1'] - df['p2'] = zip(df.b_lon_2, df.b_lat_2) + df['p2'] = list(zip(df.b_lon_2, df.b_lat_2)) del df['b_lat_2'], df['b_lon_2'] - df['p3'] = zip(df.b_lon_3, df.b_lat_3) + df['p3'] = list(zip(df.b_lon_3, df.b_lat_3)) del df['b_lat_3'], df['b_lon_3'] - df['p4'] = zip(df.b_lon_4, df.b_lat_4) + df['p4'] = list(zip(df.b_lon_4, df.b_lat_4)) del df['b_lat_4'], df['b_lon_4'] # Make a list of list of tuples diff --git a/hermesv3_gr/modules/grids/grid_global.py b/hermesv3_gr/modules/grids/grid_global.py index ee0b1d17464f9cb4dc2b9e3744bc04935e7e0878..a3ca2368e8134a9bf3690c460597fc2a803b6c6b 100755 --- a/hermesv3_gr/modules/grids/grid_global.py +++ b/hermesv3_gr/modules/grids/grid_global.py @@ -23,7 +23,7 @@ import sys import timeit import hermesv3_gr.config.settings as settings -from grid import Grid +from .grid import Grid class GlobalGrid(Grid): diff --git a/hermesv3_gr/modules/grids/grid_latlon.py b/hermesv3_gr/modules/grids/grid_latlon.py index f66f80e1f963c8a2522b0cc4219a114365514f4c..0409d7bc63d90cbc3c99235f9d02c115593257a4 100755 --- a/hermesv3_gr/modules/grids/grid_latlon.py +++ b/hermesv3_gr/modules/grids/grid_latlon.py @@ -23,7 +23,7 @@ import sys import timeit import hermesv3_gr.config.settings as settings -from grid import Grid +from .grid import Grid class LatLonGrid(Grid): diff --git a/hermesv3_gr/modules/grids/grid_lcc.py b/hermesv3_gr/modules/grids/grid_lcc.py index c0b4d4e3266d647d98df229ab480c4f931022dbb..d360d74087dbc7c312ae0b8d887d747e8647b1df 100755 --- a/hermesv3_gr/modules/grids/grid_lcc.py +++ b/hermesv3_gr/modules/grids/grid_lcc.py @@ -22,7 +22,7 @@ import os import sys import timeit import hermesv3_gr.config.settings as settings -from grid import Grid +from .grid import Grid class LccGrid(Grid): diff --git a/hermesv3_gr/modules/grids/grid_mercator.py b/hermesv3_gr/modules/grids/grid_mercator.py index c715b8e4ba8d02b2362c6a7c09ff2bba03f4f453..38bb722d883faf92ebebcc4a64cf32d2feeadf7b 100755 --- a/hermesv3_gr/modules/grids/grid_mercator.py +++ b/hermesv3_gr/modules/grids/grid_mercator.py @@ -22,7 +22,7 @@ import os import sys import timeit import hermesv3_gr.config.settings as settings -from grid import Grid +from .grid import Grid class MercatorGrid(Grid): diff --git a/hermesv3_gr/modules/grids/grid_rotated.py b/hermesv3_gr/modules/grids/grid_rotated.py index eabf8e7b72cdedf51e7c6759b5da7ba6cf98a92d..24c0f9a83ffc580e1dd34ae4deb8050dcade7d16 100755 --- a/hermesv3_gr/modules/grids/grid_rotated.py +++ b/hermesv3_gr/modules/grids/grid_rotated.py @@ -22,7 +22,7 @@ import sys import os import timeit import hermesv3_gr.config.settings as settings -from grid import Grid +from .grid import Grid class RotatedGrid(Grid): diff --git a/hermesv3_gr/modules/masking/masking.py b/hermesv3_gr/modules/masking/masking.py index 36b1c93f85a709e9e5e7c771e9c39b9a3248ae09..f02f73697b494eaf4ba38fae8dd07ef66589a7b0 100755 --- a/hermesv3_gr/modules/masking/masking.py +++ b/hermesv3_gr/modules/masking/masking.py @@ -103,7 +103,7 @@ class Masking(object): # Partition @lst in @n balanced parts, in given order parts, rest = divmod(len(lst), num) lstiter = iter(lst) - for j in xrange(num): + for j in range(num): plen = len(lst) / num + (1 if rest > 0 else 0) rest -= 1 yield list(itertools.islice(lstiter, plen)) @@ -123,7 +123,7 @@ class Masking(object): dst_var = [] num = 0 - points = np.array(zip(lat.flatten(), lon.flatten())) + points = np.array(list(zip(lat.flatten(), lon.flatten()))) points_list = list(self.partlst(points, settings.size)) @@ -289,7 +289,7 @@ class Masking(object): values = values['data'] mask = np.ones(values.shape) - for code, factor in self.factors_mask_values.iteritems(): + for code, factor in self.factors_mask_values.items(): mask[values == code] = factor settings.write_time('Masking', 'custom_scale_mask', timeit.default_timer() - st_time, level=3) diff --git a/hermesv3_gr/modules/regrid/regrid_conservative.py b/hermesv3_gr/modules/regrid/regrid_conservative.py index f2de4d1ed0b96bac1bd6ea35928daf0d7be1e7bf..35c718f0cad174da3de337dbc07b0f0541f40ef9 100755 --- a/hermesv3_gr/modules/regrid/regrid_conservative.py +++ b/hermesv3_gr/modules/regrid/regrid_conservative.py @@ -23,7 +23,7 @@ import timeit import ESMF import hermesv3_gr.config.settings as settings -from regrid import Regrid +from .regrid import Regrid class ConservativeRegrid(Regrid): @@ -151,18 +151,18 @@ class ConservativeRegrid(Regrid): if os.path.exists(self.weight_matrix_file): pre_size = 0 post_size = 1 - print "I'm {0}".format(settings.rank), 'Writing Weight Matrix {0}'.format(self.weight_matrix_file) + print("I'm {0}".format(settings.rank), 'Writing Weight Matrix {0}'.format(self.weight_matrix_file)) # find = True while pre_size != post_size: - print "I'm {0}".format(settings.rank), pre_size, post_size + print("I'm {0}".format(settings.rank), pre_size, post_size) pre_size = post_size post_size = os.path.getsize(self.weight_matrix_file) time.sleep(1) find = True - print "I'm {0}".format(settings.rank), 'FINISHED' + print("I'm {0}".format(settings.rank), 'FINISHED') else: time.sleep(5) - print "I'm {0}".format(settings.rank), 'Waiting Weight Matrix' + print("I'm {0}".format(settings.rank), 'Waiting Weight Matrix') def apply_weights(self, values): """ diff --git a/hermesv3_gr/modules/speciation/speciation.py b/hermesv3_gr/modules/speciation/speciation.py index 6084c44978eb0db59489bd7fbf3862592a61fdd9..9d0c1b8d63363971a46c41eb8c9807d5b6fc3e71 100755 --- a/hermesv3_gr/modules/speciation/speciation.py +++ b/hermesv3_gr/modules/speciation/speciation.py @@ -81,7 +81,7 @@ class Speciation(object): long_name_dict = df.loc[df[df.ID == 'short_description'].index[0]].to_dict() long_name_dict.pop('ID', None) profile_list = [] - for key in formulas_dict.iterkeys(): + for key in formulas_dict.keys(): profile_list.append({ 'name': key, 'formula': formulas_dict[key], diff --git a/hermesv3_gr/modules/temporal/temporal.py b/hermesv3_gr/modules/temporal/temporal.py index 858edd5b76399fa965e7bfbffac3c82b36947115..457d0c87e06adb06bf15fee4963886dad2b9883f 100755 --- a/hermesv3_gr/modules/temporal/temporal.py +++ b/hermesv3_gr/modules/temporal/temporal.py @@ -359,12 +359,12 @@ class TemporalDistribution(object): weekdays_count = self.calculate_weekdays(date) if isinstance(profile, dict): factor = self.calculate_weekday_factor(profile, weekdays_count) - for dict_key in profile.iterkeys(): + for dict_key in profile.keys(): profile[dict_key] = profile[dict_key] + factor elif isinstance(profile, np.ndarray): # Gridded factor = self.calculate_weekday_gridded_factor(profile, weekdays_count) - for weekday in xrange(7): + for weekday in range(7): profile[weekday, :] = profile[weekday, :] + factor else: settings.write_log('ERROR: Check the .err file to get more info.') @@ -460,7 +460,7 @@ class TemporalDistribution(object): profile = self.parse_hourly_profile_id(profile_id) if profile is not None: - for profile_type, profile_id_aux in profile.iteritems(): + for profile_type, profile_id_aux in profile.items(): # Gridded monthly profile if os.path.exists(profile_id_aux): profile_aux = self.get_gridded_temporal_profile('Fhour', profile_id_aux) @@ -564,7 +564,7 @@ class TemporalDistribution(object): 'Asia/Famagusta': 'Asia/Nicosia', } - if timezone in tz_dict.iterkeys(): + if timezone in iter(tz_dict.keys()): timezone = tz_dict[timezone] return timezone @@ -625,7 +625,7 @@ class TemporalDistribution(object): dst_var = [] num = 0 - points = zip(lat.flatten(), lon.flatten()) + points = list(zip(lat.flatten(), lon.flatten())) for lat_aux, lon_aux in points: num += 1 @@ -688,16 +688,17 @@ class TemporalDistribution(object): nc_in = Dataset(self.netcdf_timezones) timezones = nc_in.variables['timezone_id'][:, self.grid.x_lower_bound:self.grid.x_upper_bound, - self.grid.y_lower_bound:self.grid.y_upper_bound].astype(int) + self.grid.y_lower_bound:self.grid.y_upper_bound].astype(int) nc_in.close() tz_list = np.chararray(timezones.shape, itemsize=32) - for id_aux in xrange(timezones.min(), timezones.max() + 1): + for id_aux in range(timezones.min(), timezones.max() + 1): try: timezone = self.get_tz_from_id(id_aux) tz_list[timezones == id_aux] = timezone except IndexError: pass + settings.write_time('TemporalDistribution', 'calculate_timezones', timeit.default_timer() - st_time, level=3) return tz_list @@ -706,8 +707,8 @@ class TemporalDistribution(object): """ Calculate the temporal factor to correct the input data of the given date for each cell. - :param date: Date of the current timestep. - :type date: datetime.datetime + :param date_aux: Date of the current timestep. + :type date_aux: datetime.datetime :return: 2D array with the factors to correct the input data to the date of this timestep. :rtype: numpy.array @@ -720,14 +721,13 @@ class TemporalDistribution(object): df = pd.DataFrame(self.timezones_array.flatten(), columns=['tz']) - df['utc'] = pd.to_datetime(date_aux) + df['local'] = pd.to_datetime(date_aux, utc=True) try: - df['local'] = df.groupby('tz')['utc'].apply( - lambda x: pd.to_datetime(x).dt.tz_localize(pytz.utc).dt.tz_convert(x.name).dt.tz_localize(None)) + df['local'] = df.groupby('tz')['local'].apply( + lambda x: x.dt.tz_convert(x.name.decode("utf-8")).dt.tz_localize(None)) except pytz.exceptions.UnknownTimeZoneError: df['local'] = df.groupby('tz')['utc'].apply( - lambda x: pd.to_datetime(x).dt.tz_localize(pytz.utc).dt.tz_convert( - self.parse_tz(x.name)).dt.tz_localize(None)) + lambda x: x.dt.tz_convert(self.parse_tz(x.name.decode("utf-8"))).dt.tz_localize(None)) # ===== HOURLY PROFILES ===== df['weekday'] = df['local'].dt.weekday @@ -739,7 +739,7 @@ class TemporalDistribution(object): if isinstance(weekday_profile, dict): df['weekday_factor'] = df['hour'].map(weekday_profile) else: - for hour in xrange(24): + for hour in range(24): df.loc[df['hour'] == hour, 'weekday_factor'] = weekday_profile[ hour, df[df['hour'] == hour].index] # SATURDAY @@ -747,7 +747,7 @@ class TemporalDistribution(object): if isinstance(saturday_profile, dict): df['saturday_factor'] = df['hour'].map(saturday_profile) else: - for hour in xrange(24): + for hour in range(24): df.loc[df['hour'] == hour, 'saturday_factor'] = saturday_profile[ hour, df[df['hour'] == hour].index] # SUNDAY @@ -755,7 +755,7 @@ class TemporalDistribution(object): if isinstance(sunday_profile, dict): df['sunday_factor'] = df['hour'].map(sunday_profile) else: - for hour in xrange(24): + for hour in range(24): df.loc[df['hour'] == hour, 'sunday_factor'] = sunday_profile[ hour, df[df['hour'] == hour].index] @@ -782,7 +782,7 @@ class TemporalDistribution(object): if isinstance(self.weekly_profile[month], dict): df.loc[df['month'] == month, 'weekly_factor'] = df['weekday'].map(self.weekly_profile[month]) else: - for weekday in xrange(7): + for weekday in range(7): df.loc[df['weekday'] == weekday, 'weekly_factor'] = self.weekly_profile[month][ weekday, df[df['weekday'] == weekday].index] @@ -871,7 +871,7 @@ class TemporalDistribution(object): weekdays_factors = 0 num_days = 0 - for weekday in xrange(7): + for weekday in range(7): weekdays_factors += profile[weekday] * weekdays[weekday] num_days += weekdays[weekday] @@ -894,7 +894,7 @@ class TemporalDistribution(object): weekdays_factors = np.zeros((profile.shape[-1])) num_days = 0 - for weekday in xrange(7): + for weekday in range(7): weekdays_factors += profile[weekday, :] * weekdays[weekday] num_days += weekdays[weekday] @@ -918,7 +918,7 @@ class TemporalDistribution(object): st_time = timeit.default_timer() weekdays = [MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY] - days = [weekday(date.year, date.month, d + 1) for d in xrange(monthrange(date.year, date.month)[1])] + days = [weekday(date.year, date.month, d + 1) for d in range(monthrange(date.year, date.month)[1])] weekdays_dict = {} count = 0 for day in weekdays: diff --git a/hermesv3_gr/modules/vertical/vertical.py b/hermesv3_gr/modules/vertical/vertical.py index 1b309ea4be040924ab3f08aa4078ac1ceb1aed42..34baa1a738d33c198841d2f702639e96864f8d60 100755 --- a/hermesv3_gr/modules/vertical/vertical.py +++ b/hermesv3_gr/modules/vertical/vertical.py @@ -84,7 +84,7 @@ class VerticalDistribution(object): " The v_profile '{0}' of the '{1}' file doesn't match.".format(self.id, path)) sys.exit(1) else: - return_value = zip(v_profile['layers'], v_profile['weights']) + return_value = list(zip(v_profile['layers'], v_profile['weights'])) settings.write_time('VerticalDistribution', 'get_vertical_profile', timeit.default_timer() - st_time, level=3) @@ -140,7 +140,7 @@ class VerticalDistribution(object): index = len([s for s in output_vertical_profile if s < prev_layer]) origin_diff_factor = in_weight / (layer - prev_layer) weight_list = [] - for i in xrange(len(output_vertical_profile_aux) - 1): + for i in range(len(output_vertical_profile_aux) - 1): weight = (abs(output_vertical_profile_aux[i] - output_vertical_profile_aux[i + 1])) * origin_diff_factor weight_list.append({'index': index, 'weight': weight}) index += 1 diff --git a/hermesv3_gr/modules/vertical/vertical_gfas.py b/hermesv3_gr/modules/vertical/vertical_gfas.py index 309092613d01be5355d7ad8f21ced18110aaa288..5e65c19720f22e921a30c83eb6a99c0af23d22e3 100755 --- a/hermesv3_gr/modules/vertical/vertical_gfas.py +++ b/hermesv3_gr/modules/vertical/vertical_gfas.py @@ -20,7 +20,7 @@ import timeit import hermesv3_gr.config.settings as settings -from vertical import VerticalDistribution +from .vertical import VerticalDistribution class GfasVerticalDistribution(VerticalDistribution): @@ -68,7 +68,7 @@ class GfasVerticalDistribution(VerticalDistribution): st_time = timeit.default_timer() widths = [] - for i in xrange(len(heights_list)): + for i in range(len(heights_list)): if i == 0: widths.append(heights_list[i]) else: @@ -126,10 +126,10 @@ class GfasVerticalDistribution(VerticalDistribution): st_time = timeit.default_timer() fires = np.zeros(top_fires.shape) - for i in xrange(len(self.output_heights)): + for i in range(len(self.output_heights)): if top_fires[i].sum() != 0: weight_list = self.get_weights(list(self.output_heights[0: i + 1])) - for i_weight in xrange(len(weight_list)): + for i_weight in range(len(weight_list)): fires[i_weight] += top_fires[i] * weight_list[i_weight] settings.write_time('GfasVerticalDistribution', 'apply_approach', timeit.default_timer() - st_time, level=3) @@ -185,7 +185,7 @@ class GfasVerticalDistribution(VerticalDistribution): return fire_list def calculate_weight_layer_dict(self, layer): - weight_layer_dict = {x: None for x in xrange(layer + 1)} + weight_layer_dict = {x: None for x in range(layer + 1)} if self.approach == '50_top': weight_layer_dict[layer] = 0.5 to_distribute = 0.5 @@ -213,7 +213,7 @@ class GfasVerticalDistribution(VerticalDistribution): vert_emissions.append(emis) else: weight_layer_dict = self.calculate_weight_layer_dict(layer) - for layer, weight in weight_layer_dict.iteritems(): + for layer, weight in weight_layer_dict.items(): aux_emis = emis.copy() aux_emis.loc[:, 'layer'] = layer aux_emis.loc[:, input_pollutant_list] = aux_emis[input_pollutant_list].multiply(weight) diff --git a/hermesv3_gr/modules/writing/writer.py b/hermesv3_gr/modules/writing/writer.py index 06e6f34b4136aa34365b2f02fee263ce579477ed..8cb46f5e95356b65340544f079ce4e82eace43ee 100755 --- a/hermesv3_gr/modules/writing/writer.py +++ b/hermesv3_gr/modules/writing/writer.py @@ -24,6 +24,7 @@ import numpy as np from mpi4py import MPI from netCDF4 import Dataset from hermesv3_gr.config import settings +from functools import reduce class Writer(object): @@ -128,7 +129,7 @@ class Writer(object): settings.write_log("\t\tParallel NetCDF file ready to write.", level=2) index = 0 # print "Rank {0} 2".format(rank) - for var_name in self.variables_attributes.iterkeys(): + for var_name in self.variables_attributes.keys(): data = self.calculate_data_by_var(var_name, emission_list, self.grid.shape) st_time = timeit.default_timer() @@ -179,7 +180,7 @@ class Writer(object): dict_aux['data'] = None empty_dict[emi['name']] = dict_aux - self.variables_attributes = empty_dict.values() + self.variables_attributes = list(empty_dict.values()) settings.write_time('Writer', 'set_variable_attributes', timeit.default_timer() - st_time, level=3) @@ -386,7 +387,7 @@ class Writer(object): netcdf.createDimension('lat', center_latitudes.shape[0]) lat_dim = ('lon', 'lat',) else: - print 'ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape)) + print('ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape))) sys.exit(1) # Longitude @@ -397,22 +398,22 @@ class Writer(object): netcdf.createDimension('lon', center_longitudes.shape[1]) lon_dim = ('lon', 'lat',) else: - print 'ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format( - len(center_longitudes.shape)) + print('ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format( + len(center_longitudes.shape))) sys.exit(1) elif roated: var_dim = ('rlat', 'rlon',) # Rotated Latitude if rotated_lats is None: - print 'ERROR: For rotated grids is needed the rotated latitudes.' + print('ERROR: For rotated grids is needed the rotated latitudes.') sys.exit(1) netcdf.createDimension('rlat', len(rotated_lats)) lat_dim = ('rlat', 'rlon',) # Rotated Longitude if rotated_lons is None: - print 'ERROR: For rotated grids is needed the rotated longitudes.' + print('ERROR: For rotated grids is needed the rotated longitudes.') sys.exit(1) netcdf.createDimension('rlon', len(rotated_lons)) lon_dim = ('rlat', 'rlon',) @@ -560,7 +561,7 @@ class Writer(object): try: var[:] = variable['data'] except ValueError: - print 'VAR ERROR, netcdf shape: {0}, variable shape: {1}'.format(var[:].shape, variable['data'].shape) + print('VAR ERROR, netcdf shape: {0}, variable shape: {1}'.format(var[:].shape, variable['data'].shape)) # Grid mapping if regular_latlon: diff --git a/hermesv3_gr/modules/writing/writer_cmaq.py b/hermesv3_gr/modules/writing/writer_cmaq.py index 7b3480c780f442559400412192c7367b0a61b0e6..3c88760895bcf5e8fbe29dcbffca58454b2e39ad 100755 --- a/hermesv3_gr/modules/writing/writer_cmaq.py +++ b/hermesv3_gr/modules/writing/writer_cmaq.py @@ -198,7 +198,7 @@ class WriterCmaq(Writer): if self.global_attributes_path is not None: df = pd.read_csv(self.global_attributes_path) - for att in atts_dict.iterkeys(): + for att in atts_dict.keys(): try: if att in int_atts: atts_dict[att] = np.int32(df.loc[df['attribute'] == att, 'value'].item()) @@ -220,7 +220,7 @@ class WriterCmaq(Writer): settings.write_log('WARNING: Check the .err file to get more information.') message = 'WARNING: No output attributes defined, check the output_attributes' message += ' parameter of the configuration file.\nUsing default values:' - for key, value in atts_dict.iteritems(): + for key, value in atts_dict.items(): message += '\n\t{0} = {1}'.format(key, value) if settings.rank == 0: warning(message) @@ -431,7 +431,7 @@ class WriterCmaq(Writer): index = 0 # data_list, var_list = self.change_variable_attributes(self.variables_attributes) - for var_name in self.variables_attributes.iterkeys(): + for var_name in self.variables_attributes.keys(): index += 1 var = netcdf.createVariable(var_name, 'f', ('TSTEP', 'LAY', 'ROW', 'COL',), zlib=self.compress) var.setncatts(self.variables_attributes[var_name]) @@ -440,7 +440,7 @@ class WriterCmaq(Writer): # ===== Global attributes ===== settings.write_log("\t\tCreating NetCDF metadata.", level=2) - global_attributes = self.create_global_attributes(self.variables_attributes.keys()) + global_attributes = self.create_global_attributes(list(self.variables_attributes.keys())) for attribute in self.global_attributes_order: netcdf.setncattr(attribute, global_attributes[attribute]) @@ -504,7 +504,7 @@ class WriterCmaq(Writer): full_shape = None index = 0 # data_list, var_list = self.change_variable_attributes(self.variables_attributes) - for var_name in self.variables_attributes.iterkeys(): + for var_name in self.variables_attributes.keys(): if settings.size != 1: settings.write_log("\t\t\tGathering {0} data.".format(var_name), level=3) rank_data = self.calculate_data_by_var(var_name, emission_list, self.grid.shape) @@ -575,7 +575,7 @@ class WriterCmaq(Writer): if mpi_numpy: data = np.ones(var[:].shape, dtype=settings.precision) * 100 - for i in xrange(settings.size): + for i in range(settings.size): try: if i == 0: var[:, :, :, :full_position[i][3]] = recvbuf[i] @@ -595,7 +595,7 @@ class WriterCmaq(Writer): elif mpi_vector: if rank_data is not None: data = np.empty(var[:].shape, dtype=settings.precision) - for i in xrange(settings.size): + for i in range(settings.size): # print 'Resizeing {0}'.format(i) if not i == settings.size - 1: data[:, :, full_position[i][0]:full_position[i][1], @@ -615,7 +615,7 @@ class WriterCmaq(Writer): settings.write_log("\t\tCreating NetCDF metadata.", level=2) if settings.rank == 0: # ===== Global attributes ===== - global_attributes = self.create_global_attributes(self.variables_attributes.keys()) + global_attributes = self.create_global_attributes(list(self.variables_attributes.keys())) for attribute in self.global_attributes_order: netcdf.setncattr(attribute, global_attributes[attribute]) diff --git a/hermesv3_gr/modules/writing/writer_monarch.py b/hermesv3_gr/modules/writing/writer_monarch.py index 8749ed5c9f245d4316b62ed3509977d17d634ea0..1bd675827d67b5d5ed7c6b63818dd000968bf497 100755 --- a/hermesv3_gr/modules/writing/writer_monarch.py +++ b/hermesv3_gr/modules/writing/writer_monarch.py @@ -154,8 +154,8 @@ class WriterMonarch(Writer): settings.write_log("\t\t\t'lat' dimension: {0}".format(self.grid.center_latitudes.shape[0]), level=3) lat_dim = ('lon', 'lat', ) else: - print 'ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format( - len(self.grid.center_latitudes.shape)) + print('ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format( + len(self.grid.center_latitudes.shape))) sys.exit(1) # Longitude @@ -168,15 +168,15 @@ class WriterMonarch(Writer): settings.write_log("\t\t\t'lon' dimension: {0}".format(self.grid.center_longitudes.shape[1]), level=3) lon_dim = ('lon', 'lat', ) else: - print 'ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format( - len(self.grid.center_longitudes.shape)) + print('ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format( + len(self.grid.center_longitudes.shape))) sys.exit(1) elif Rotated: var_dim = ('rlat', 'rlon',) # Rotated Latitude if self.grid.rlat is None: - print 'ERROR: For rotated grids is needed the rotated latitudes.' + print('ERROR: For rotated grids is needed the rotated latitudes.') sys.exit(1) netcdf.createDimension('rlat', len(self.grid.rlat)) settings.write_log("\t\t\t'rlat' dimension: {0}".format(len(self.grid.rlat)), level=3) @@ -184,7 +184,7 @@ class WriterMonarch(Writer): # Rotated Longitude if self.grid.rlon is None: - print 'ERROR: For rotated grids is needed the rotated longitudes.' + print('ERROR: For rotated grids is needed the rotated longitudes.') sys.exit(1) netcdf.createDimension('rlon', len(self.grid.rlon)) settings.write_log("\t\t\t'rlon' dimension: {0}".format(len(self.grid.rlon)), level=3) @@ -333,7 +333,7 @@ class WriterMonarch(Writer): var[:] = 0 index = 0 - for var_name, variable in self.variables_attributes.iteritems(): + for var_name, variable in self.variables_attributes.items(): index += 1 var = netcdf.createVariable(var_name, 'f', ('time',) + var_dim, zlib=self.compress) @@ -632,7 +632,7 @@ class WriterMonarch(Writer): full_shape = None index = 0 - for var_name in self.variables_attributes.iterkeys(): + for var_name in self.variables_attributes.keys(): if settings.size != 1: settings.write_log("\t\t\tGathering {0} data.".format(var_name), level=3) rank_data = self.calculate_data_by_var(var_name, emission_list, self.grid.shape) @@ -719,7 +719,7 @@ class WriterMonarch(Writer): if mpi_numpy: data = np.ones(var[:].shape, dtype=settings.precision) * 100 - for i in xrange(settings.size): + for i in range(settings.size): try: if i == 0: var[:, :, :, :full_position[i][3]] = recvbuf[i] @@ -739,7 +739,7 @@ class WriterMonarch(Writer): elif mpi_vector: if rank_data is not None: data = np.empty(var[:].shape, dtype=settings.precision) - for i in xrange(settings.size): + for i in range(settings.size): if not i == settings.size - 1: data[:, :, full_position[i][0]:full_position[i][1], full_position[i][2]:full_position[i][3]] = \ diff --git a/hermesv3_gr/modules/writing/writer_wrf_chem.py b/hermesv3_gr/modules/writing/writer_wrf_chem.py index 364dbcdf1fca6383aa1f7e2d07bad10e4d1706d8..5f27d7998d5c8fc4e7491dbcd77c483fdf33ecc2 100755 --- a/hermesv3_gr/modules/writing/writer_wrf_chem.py +++ b/hermesv3_gr/modules/writing/writer_wrf_chem.py @@ -216,7 +216,7 @@ class WriterWrfChem(Writer): if self.global_attributes_path is not None: df = pd.read_csv(self.global_attributes_path) - for att in atts_dict.iterkeys(): + for att in atts_dict.keys(): try: if att in int_atts: atts_dict[att] = np.int32(df.loc[df['attribute'] == att, 'value'].item()) @@ -225,7 +225,7 @@ class WriterWrfChem(Writer): elif att in str_atts: atts_dict[att] = str(df.loc[df['attribute'] == att, 'value'].item()) except ValueError: - print 'A warning has occurred. Check the .err file to get more information.' + print('A warning has occurred. Check the .err file to get more information.') if settings.rank == 0: warning('The global attribute {0} is not defined; Using default value {1}'.format( att, atts_dict[att])) @@ -234,7 +234,7 @@ class WriterWrfChem(Writer): settings.write_log('WARNING: Check the .err file to get more information.') message = 'WARNING: No output attributes defined, check the output_attributes' message += ' parameter of the configuration file.\nUsing default values:' - for key, value in atts_dict.iteritems(): + for key, value in atts_dict.items(): message += '\n\t{0} = {1}'.format(key, value) if settings.rank == 0: warning(message) @@ -299,16 +299,13 @@ class WriterWrfChem(Writer): :return: """ from datetime import timedelta - import netCDF4 - aux_times_list = [] + aux_times = np.chararray((len(self.hours), 19), itemsize=1) - for hour in self.hours: + for i, hour in enumerate(self.hours): aux_date = self.date + timedelta(hours=hour) - aux_times_list.append(aux_date.strftime("%Y-%m-%d_%H:%M:%S")) - - str_out = netCDF4.stringtochar(np.array(aux_times_list)) - return str_out + aux_times[i] = list(aux_date.strftime("%Y-%m-%d_%H:%M:%S")) + return aux_times def create_parallel_netcdf(self): # TODO Documentation @@ -343,7 +340,7 @@ class WriterWrfChem(Writer): settings.write_log("\t\t\t'Times' variable created with size: {0}".format(times[:].shape), level=3) index = 0 - for var_name in self.variables_attributes.iterkeys(): + for var_name in self.variables_attributes.keys(): index += 1 var = netcdf.createVariable(var_name, 'f', ('Time', 'emissions_zdim', 'south_north', 'west_east',), zlib=self.compress) @@ -402,6 +399,7 @@ class WriterWrfChem(Writer): # ===== Variables ===== settings.write_log("\t\tCreating NetCDF variables.", level=2) times = netcdf.createVariable('Times', 'S1', ('Time', 'DateStrLen', )) + print(times[:].shape, self.create_times_var().shape) times[:] = self.create_times_var() settings.write_log("\t\t\t'Times' variable created with size: {0}".format(times[:].shape), level=3) @@ -410,7 +408,7 @@ class WriterWrfChem(Writer): # self.change_variable_attributes() - for var_name in self.variables_attributes.iterkeys(): + for var_name in self.variables_attributes.keys(): if settings.size != 1: settings.write_log("\t\t\tGathering {0} data.".format(var_name), level=3) rank_data = self.calculate_data_by_var(var_name, emission_list, self.grid.shape) @@ -458,7 +456,7 @@ class WriterWrfChem(Writer): if rank_data is not None: data = np.empty(var[:].shape, dtype=settings.precision) - for i in xrange(settings.size): + for i in range(settings.size): # print 'Resizeing {0}'.format(i) if not i == settings.size - 1: data[:, :, full_position[i][0]:full_position[i][1], diff --git a/hermesv3_gr/tools/coordinates_tools.py b/hermesv3_gr/tools/coordinates_tools.py index 56d929adacdf4388307d2d307a46d17fcf57a53b..00baa67df654e5175ed5c59b8853a6d907f02fbf 100755 --- a/hermesv3_gr/tools/coordinates_tools.py +++ b/hermesv3_gr/tools/coordinates_tools.py @@ -296,9 +296,9 @@ if __name__ == '__main__': import numpy as np new_pole_lon_d = 20.0 # lonpole tlm0d new_pole_lat_d = 35.0 # latpole tph0d - print latlon2rotated(new_pole_lon_d, new_pole_lat_d, 20.0, 35.0) - print latlon2rotated(new_pole_lon_d, new_pole_lat_d, -20.2485, -9.9036) - print rotated2latlon_single(new_pole_lon_d, new_pole_lat_d, 0, 0) - print rotated2latlon_single(new_pole_lon_d, new_pole_lat_d, -51., -35.) - print rotated2latlon(new_pole_lon_d, new_pole_lat_d, np.array([-51., -51., -51., -51.]), - np.array([-35., -34.9, -34.8, -34.7])) + print(latlon2rotated(new_pole_lon_d, new_pole_lat_d, 20.0, 35.0)) + print(latlon2rotated(new_pole_lon_d, new_pole_lat_d, -20.2485, -9.9036)) + print(rotated2latlon_single(new_pole_lon_d, new_pole_lat_d, 0, 0)) + print(rotated2latlon_single(new_pole_lon_d, new_pole_lat_d, -51., -35.)) + print(rotated2latlon(new_pole_lon_d, new_pole_lat_d, np.array([-51., -51., -51., -51.]), + np.array([-35., -34.9, -34.8, -34.7]))) diff --git a/hermesv3_gr/tools/download_benchmark.py b/hermesv3_gr/tools/download_benchmark.py index 5d58bc8331a1a907235a16519c8e8e83d4dc1960..ef08a180d59f574e87a8c3429e96da19509cb06a 100755 --- a/hermesv3_gr/tools/download_benchmark.py +++ b/hermesv3_gr/tools/download_benchmark.py @@ -36,7 +36,7 @@ def query_yes_no(question, default="yes"): while True: sys.stdout.write(question + prompt) - choice = raw_input().lower() + choice = input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: @@ -47,12 +47,12 @@ def query_yes_no(question, default="yes"): def check_args(args, exe_str): if len(args) == 0: - print("Missing destination path after '{0}'. e.g.:".format(exe_str) + - "\n\t{0} /home/user/HERMES".format(exe_str)) + print(("Missing destination path after '{0}'. e.g.:".format(exe_str) + + "\n\t{0} /home/user/HERMES".format(exe_str))) sys.exit(1) elif len(args) > 1: - print("Too much arguments through '{0}'. Only destination path is needed e.g.:".format(exe_str) + - "\n\t{0} /home/user/HERMES".format(exe_str)) + print(("Too much arguments through '{0}'. Only destination path is needed e.g.:".format(exe_str) + + "\n\t{0} /home/user/HERMES".format(exe_str))) sys.exit(1) else: dir_path = args[0] diff --git a/hermesv3_gr/tools/netcdf_tools.py b/hermesv3_gr/tools/netcdf_tools.py index 955468ed7fa610be247a54e718c96628bbe11e4e..48869a1d4c86b80da769416b995a9627038cc37f 100755 --- a/hermesv3_gr/tools/netcdf_tools.py +++ b/hermesv3_gr/tools/netcdf_tools.py @@ -21,6 +21,7 @@ import sys from netCDF4 import Dataset from mpi4py import MPI +from functools import reduce ICOMM = MPI.COMM_WORLD COMM = ICOMM.Split(color=0, key=0) @@ -166,9 +167,9 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, lat_dim = ('lat',) elif len(center_latitudes.shape) == 2: netcdf.createDimension('lat', center_latitudes.shape[0]) - lat_dim = ('lon', 'lat', ) + lat_dim = ('lat', 'lon', ) else: - print 'ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape)) + print('ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape))) sys.exit(1) # Longitude @@ -177,23 +178,23 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, lon_dim = ('lon',) elif len(center_longitudes.shape) == 2: netcdf.createDimension('lon', center_longitudes.shape[1]) - lon_dim = ('lon', 'lat', ) + lon_dim = ('lat', 'lon', ) else: - print 'ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format(len(center_longitudes.shape)) + print('ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format(len(center_longitudes.shape))) sys.exit(1) elif rotated: var_dim = ('rlat', 'rlon',) # Rotated Latitude if rotated_lats is None: - print 'ERROR: For rotated grids is needed the rotated latitudes.' + print('ERROR: For rotated grids is needed the rotated latitudes.') sys.exit(1) netcdf.createDimension('rlat', len(rotated_lats)) lat_dim = ('rlat', 'rlon',) # Rotated Longitude if rotated_lons is None: - print 'ERROR: For rotated grids is needed the rotated longitudes.' + print('ERROR: For rotated grids is needed the rotated longitudes.') sys.exit(1) netcdf.createDimension('rlon', len(rotated_lons)) lon_dim = ('rlat', 'rlon',) @@ -337,7 +338,7 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, try: var[:] = variable['data'] except ValueError: - print 'VAR ERROR, netcdf shape: {0}, variable shape: {1}'.format(var[:].shape, variable['data'].shape) + print('VAR ERROR, netcdf shape: {0}, variable shape: {1}'.format(var[:].shape, variable['data'].shape)) # Grid mapping if regular_latlon: @@ -435,7 +436,7 @@ def create_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, netcdf.createDimension('lat', center_latitudes.shape[0]) lat_dim = ('lon', 'lat', ) else: - print 'ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape)) + print('ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape))) sys.exit(1) # Longitude @@ -446,21 +447,21 @@ def create_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, netcdf.createDimension('lon', center_longitudes.shape[1]) lon_dim = ('lon', 'lat', ) else: - print 'ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format(len(center_longitudes.shape)) + print('ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format(len(center_longitudes.shape))) sys.exit(1) elif rotated: var_dim = ('rlat', 'rlon',) # Rotated Latitude if rotated_lats is None: - print 'ERROR: For rotated grids is needed the rotated latitudes.' + print('ERROR: For rotated grids is needed the rotated latitudes.') sys.exit(1) netcdf.createDimension('rlat', len(rotated_lats)) lat_dim = ('rlat', 'rlon',) # Rotated Longitude if rotated_lons is None: - print 'ERROR: For rotated grids is needed the rotated longitudes.' + print('ERROR: For rotated grids is needed the rotated longitudes.') sys.exit(1) netcdf.createDimension('rlon', len(rotated_lons)) lon_dim = ('rlat', 'rlon',) @@ -607,7 +608,7 @@ def create_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, shape = tuple() exec ("shape = (len(hours), {0}.size, {1}.size, {2}.size)".format(var_dim[0], var_dim[1], var_dim[2])) # exit() - print shape + print(shape) var[:] = np.zeros(shape) # Grid mapping diff --git a/hermesv3_gr/tools/sample_files.py b/hermesv3_gr/tools/sample_files.py index d8fee3fedd73e61643223ba7837addd38ff04065..17c7eac88ce876a1fa9b35511ccc8c7991a71ef0 100755 --- a/hermesv3_gr/tools/sample_files.py +++ b/hermesv3_gr/tools/sample_files.py @@ -131,7 +131,7 @@ def query_yes_no(question, default="yes"): while True: sys.stdout.write(question + prompt) - choice = raw_input().lower() + choice = input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: @@ -142,12 +142,12 @@ def query_yes_no(question, default="yes"): def check_args(args, exe_str): if len(args) == 0: - print("Missing destination path after '{0}'. e.g.:".format(exe_str) + - "\n\t{0} /home/user/HERMES/HERMES_IN".format(exe_str)) + print(("Missing destination path after '{0}'. e.g.:".format(exe_str) + + "\n\t{0} /home/user/HERMES/HERMES_IN".format(exe_str))) sys.exit(1) elif len(args) > 1: - print("Too much arguments through '{0}'. Only destination path is needed e.g.:".format(exe_str) + - "\n\t{0} /home/user/HERMES/HERMES_IN".format(exe_str)) + print(("Too much arguments through '{0}'. Only destination path is needed e.g.:".format(exe_str) + + "\n\t{0} /home/user/HERMES/HERMES_IN".format(exe_str))) sys.exit(1) else: dir_path = args[0] @@ -169,7 +169,7 @@ def copy_files(file_list, directory): for element in file_list: if dict == type(element): - copy_files(element.values()[0], os.path.join(directory, element.keys()[0])) + copy_files(list(element.values())[0], os.path.join(directory, list(element.keys())[0])) else: copy2(element, directory) return True diff --git a/preproc/cams_glob_ant_preproc.py b/preproc/cams_glob_ant_preproc.py index 2d6ae51ac05dfa2b04b2ff0dbbeabe7eba886f3b..44113e5a34fcf0c6885820f42811b7f23d266786 100755 --- a/preproc/cams_glob_ant_preproc.py +++ b/preproc/cams_glob_ant_preproc.py @@ -213,7 +213,7 @@ def create_bounds(coordinates, number_vertices=2): def write_netcdf(output_name_path, data_list, center_lats, center_lons, grid_cell_area, date): # TODO Documentation - print output_name_path + print(output_name_path) # Creating NetCDF & Dimensions nc_output = Dataset(output_name_path, mode='w', format="NETCDF4") nc_output.createDimension('nv', 2) @@ -341,7 +341,7 @@ def do_transformation(year): if not os.path.exists(file_path): os.makedirs(file_path) - for month in xrange(1, 12 + 1, 1): + for month in range(1, 12 + 1, 1): emission = { 'name': pollutant_name, 'units': 'kg.m-2.s-1', diff --git a/preproc/cams_glob_ocean_preproc.py b/preproc/cams_glob_ocean_preproc.py index b4c42dbb50bfc9e211326239e0082930dd87392b..8b9fae21e60e5e33ca4d880c65d2cb5e1f04a812 100755 --- a/preproc/cams_glob_ocean_preproc.py +++ b/preproc/cams_glob_ocean_preproc.py @@ -184,7 +184,7 @@ def do_transformation(year): if not os.path.exists(file_path): os.makedirs(file_path) - for month in xrange(1, 12 + 1, 1): + for month in range(1, 12 + 1, 1): emission = { 'name': pollutant_name, 'units': 'kg.m-2.s-1', diff --git a/preproc/cams_glob_ship_preproc.py b/preproc/cams_glob_ship_preproc.py index 49e98d62a2872eab5a73424261f826d9d5213794..9e2a184dccace5876f303251ee037b618abb7897 100755 --- a/preproc/cams_glob_ship_preproc.py +++ b/preproc/cams_glob_ship_preproc.py @@ -190,7 +190,7 @@ def do_transformation(year): if not os.path.exists(file_path): os.makedirs(file_path) - for month in xrange(1, 12 + 1, 1): + for month in range(1, 12 + 1, 1): emission = { 'name': pollutant_name, 'units': 'kg.m-2.s-1', diff --git a/preproc/cams_glob_soil_preproc.py b/preproc/cams_glob_soil_preproc.py index cade121cba8934862a9c38c190f0682d7b7d628b..3fc4e7e436ea41167c2dea54a433159796e2c0a4 100755 --- a/preproc/cams_glob_soil_preproc.py +++ b/preproc/cams_glob_soil_preproc.py @@ -204,7 +204,7 @@ def do_transformation(year): if not os.path.exists(file_path): os.makedirs(file_path) - for month in xrange(1, 12 + 1, 1): + for month in range(1, 12 + 1, 1): emission = { 'name': pollutant_name, 'units': 'kg.m-2.s-1', diff --git a/preproc/cams_reg_ap_preproc.py b/preproc/cams_reg_ap_preproc.py index 57b37949e529cc7535a6fd832a73d8100d6e80b0..46eb47b90c87d1840ad8f7c80ec6d3560cdeac3e 100755 --- a/preproc/cams_reg_ap_preproc.py +++ b/preproc/cams_reg_ap_preproc.py @@ -78,14 +78,14 @@ def calculate_grid_definition(in_path): # Longitudes lons = np.sort(np.unique(dataframe.Lon_rounded)) lons_interval = lons[1:] - lons[:-1] - print 'Lon min: {0}; Lon max: {1}; Lon inc: {2}; Lon num: {3}'.format( - dataframe.Lon_rounded.min(), dataframe.Lon_rounded.max(), lons_interval.min(), len(lons)) + print('Lon min: {0}; Lon max: {1}; Lon inc: {2}; Lon num: {3}'.format( + dataframe.Lon_rounded.min(), dataframe.Lon_rounded.max(), lons_interval.min(), len(lons))) # Latitudes lats = np.sort(np.unique(dataframe.Lat_rounded)) lats_interval = lats[1:] - lats[:-1] - print 'Lat min: {0}; Lat max: {1}; Lat inc: {2}; Lat num: {3}'.format( - dataframe.Lat_rounded.min(), dataframe.Lat_rounded.max(), lats_interval.min(), len(lats)) + print('Lat min: {0}; Lat max: {1}; Lat inc: {2}; Lat num: {3}'.format( + dataframe.Lat_rounded.min(), dataframe.Lat_rounded.max(), lats_interval.min(), len(lats))) lats = np.arange(-90 + lats_interval.min() / 2, 90, lats_interval.min(), dtype=np.float64) lons = np.arange(-180 + lons_interval.min() / 2, 180, lons_interval.min(), dtype=np.float64) @@ -172,7 +172,7 @@ def do_transformation(year): #dataframe = pd.concat([df_np, df_p]) for name, group in dataframe.groupby('GNFR_Sector'): - print 'gnfr', name + print('gnfr', name) pollutant_list = create_pollutant_empty_list(in_file, len(c_lats), len(c_lons)) # Other mobile sources ignoring sea cells (shipping emissions) @@ -182,7 +182,7 @@ def do_transformation(year): group = group.groupby(['row_lat', 'col_lon']).sum().reset_index() - for i in xrange(len(pollutant_list)): + for i in range(len(pollutant_list)): # pollutant_list[i]['data'][group.col_lon, group.row_lat] = group[pollutant_list[i]['TNO_name']] pollutant_list[i]['data'][group.row_lat, group.col_lon] += group[pollutant_list[i]['TNO_name']] pollutant_list[i]['data'] = pollutant_list[i]['data'].reshape((1,) + pollutant_list[i]['data'].shape) @@ -396,8 +396,8 @@ def check_vocs(year): [new_voc] = extract_vars(voc_path, [voc]) voc_sum += new_voc['data'].sum() - print '{0} NMVOC sum: {1}; VOCs sum: {2}; %diff: {3}'.format( - gnfr, nmvoc_sum, voc_sum, 100*(nmvoc_sum - voc_sum) / nmvoc_sum) + print('{0} NMVOC sum: {1}; VOCs sum: {2}; %diff: {3}'.format( + gnfr, nmvoc_sum, voc_sum, 100*(nmvoc_sum - voc_sum) / nmvoc_sum)) return True def do_pm_transformation(year): @@ -475,8 +475,8 @@ def check_pm(year): [new_pm] = extract_vars(pm_path, [pm]) pm_sum += new_pm['data'].sum() - print '{0} PM2.5 sum: {1}; PM sum: {2}; %diff: {3}'.format( - gnfr, pm25_sum, pm_sum, 100*(pm25_sum - pm_sum) / pm25_sum) + print('{0} PM2.5 sum: {1}; PM sum: {2}; %diff: {3}'.format( + gnfr, pm25_sum, pm_sum, 100*(pm25_sum - pm_sum) / pm25_sum)) return True if __name__ == '__main__': diff --git a/preproc/cams_reg_ap_preproc_pm_ratios.py b/preproc/cams_reg_ap_preproc_pm_ratios.py index d2ff01c8cd5efb5aacdd8ce52cb286ec459d96ca..ee18136a25b2fbad7a409ff1a740831e1e939850 100755 --- a/preproc/cams_reg_ap_preproc_pm_ratios.py +++ b/preproc/cams_reg_ap_preproc_pm_ratios.py @@ -60,7 +60,7 @@ def extract_vars(netcdf_path, variables_list, attributes_list=()): data_list.append(dict_aux) netcdf.close() - print data_list + print(data_list) return data_list @@ -104,7 +104,7 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, if not (regular_latlon or lcc or rotated): regular_latlon = True - print netcdf_path + print(netcdf_path) netcdf = Dataset(netcdf_path, mode='w', format="NETCDF4") # ===== Dimensions ===== @@ -119,7 +119,7 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, netcdf.createDimension('lat', center_latitudes.shape[0]) lat_dim = ('lon', 'lat',) else: - print 'ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape)) + print('ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape))) sys.exit(1) # Longitude @@ -130,21 +130,21 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, netcdf.createDimension('lon', center_longitudes.shape[1]) lon_dim = ('lon', 'lat',) else: - print 'ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format(len(center_longitudes.shape)) + print('ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format(len(center_longitudes.shape))) sys.exit(1) elif rotated: var_dim = ('rlat', 'rlon',) # Rotated Latitude if rotated_lats is None: - print 'ERROR: For rotated grids is needed the rotated latitudes.' + print('ERROR: For rotated grids is needed the rotated latitudes.') sys.exit(1) netcdf.createDimension('rlat', len(rotated_lats)) lat_dim = ('rlat', 'rlon',) # Rotated Longitude if rotated_lons is None: - print 'ERROR: For rotated grids is needed the rotated longitudes.' + print('ERROR: For rotated grids is needed the rotated longitudes.') sys.exit(1) netcdf.createDimension('rlon', len(rotated_lons)) lon_dim = ('rlat', 'rlon',) @@ -218,7 +218,7 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, lons.axis = "X" lons.long_name = "longitude coordinate" lons.standard_name = "longitude" - print 'lons:', lons[:].shape, center_longitudes.shape + print('lons:', lons[:].shape, center_longitudes.shape) lons[:] = center_longitudes if boundary_longitudes is not None: lons.bounds = "lon_bnds" @@ -290,7 +290,7 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, try: var[:] = variable['data'] except Exception: - print 'VAR ERROR, netcdf shape: {0}, variable shape: {1}'.format(var[:].shape, variable['data'].shape) + print('VAR ERROR, netcdf shape: {0}, variable shape: {1}'.format(var[:].shape, variable['data'].shape)) # Grid mapping if regular_latlon: @@ -391,22 +391,22 @@ def create_pm_ratio(pm): import numpy as np [country_values, lat, lon] = extract_vars(TNO_WORLD_MASK, ['timezone_id', 'lat', 'lon']) country_values = country_values['data'].reshape((country_values['data'].shape[1], country_values['data'].shape[1])) - print country_values + print(country_values) #sys.exit() - print OUTPUT_PATH + print(OUTPUT_PATH) if not os.path.exists(OUTPUT_PATH): os.makedirs(OUTPUT_PATH) complete_output_path = os.path.join(OUTPUT_PATH, 'ratio_{0}_{1}.nc'.format(pm, YEAR)) if not os.path.exists(complete_output_path): - print 'Creating ratio file for {0}\npath: {1}'.format(pm, complete_output_path) + print('Creating ratio file for {0}\npath: {1}'.format(pm, complete_output_path)) data_list = [] for gnfr in get_sector_list(pm): - print gnfr + print(gnfr) mask_factor = np.zeros(country_values.shape) iso_codes = get_iso_codes() - for country_code, factor in get_country_code_and_factor(pm, gnfr, YEAR).iteritems(): + for country_code, factor in get_country_code_and_factor(pm, gnfr, YEAR).items(): try: mask_factor[country_values == iso_codes[country_code]] = factor except Exception: @@ -421,7 +421,7 @@ def create_pm_ratio(pm): }) write_netcdf(complete_output_path, lat['data'], lon['data'], data_list) else: - print 'Ratio file for {0} already created\npath: {1}'.format(pm, complete_output_path) + print('Ratio file for {0} already created\npath: {1}'.format(pm, complete_output_path)) return True @@ -475,7 +475,7 @@ def get_pm_list(): del df['ISO3'], df['gnfr'], df['fr'], df['year'] df = df.drop_duplicates().dropna() pm_list = df.pmcode.values - print df.pmcode.values + print(df.pmcode.values) return df.pmcode.values @@ -493,7 +493,7 @@ def get_sector_list(pm): df = df[df.pmcode == pm] del df['ISO3'], df['pmcode'], df['year'], df['fr'] df = df.drop_duplicates().dropna() - print df.gnfr.values + print(df.gnfr.values) return df.gnfr.values diff --git a/preproc/cams_reg_ap_preproc_voc_ratios.py b/preproc/cams_reg_ap_preproc_voc_ratios.py index a3bc49c9abd8e2f27cb20055903f36322e366eff..6833243a832d2834f81dbbdff3b2b5b09c9540d1 100755 --- a/preproc/cams_reg_ap_preproc_voc_ratios.py +++ b/preproc/cams_reg_ap_preproc_voc_ratios.py @@ -60,7 +60,7 @@ def extract_vars(netcdf_path, variables_list, attributes_list=()): data_list.append(dict_aux) netcdf.close() - print data_list + print(data_list) return data_list @@ -104,7 +104,7 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, if not (regular_latlon or lcc or rotated): regular_latlon = True - print netcdf_path + print(netcdf_path) netcdf = Dataset(netcdf_path, mode='w', format="NETCDF4") # ===== Dimensions ===== @@ -119,7 +119,7 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, netcdf.createDimension('lat', center_latitudes.shape[0]) lat_dim = ('lon', 'lat',) else: - print 'ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape)) + print('ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape))) sys.exit(1) # Longitude @@ -130,21 +130,21 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, netcdf.createDimension('lon', center_longitudes.shape[1]) lon_dim = ('lon', 'lat',) else: - print 'ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format(len(center_longitudes.shape)) + print('ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format(len(center_longitudes.shape))) sys.exit(1) elif rotated: var_dim = ('rlat', 'rlon',) # Rotated Latitude if rotated_lats is None: - print 'ERROR: For rotated grids is needed the rotated latitudes.' + print('ERROR: For rotated grids is needed the rotated latitudes.') sys.exit(1) netcdf.createDimension('rlat', len(rotated_lats)) lat_dim = ('rlat', 'rlon',) # Rotated Longitude if rotated_lons is None: - print 'ERROR: For rotated grids is needed the rotated longitudes.' + print('ERROR: For rotated grids is needed the rotated longitudes.') sys.exit(1) netcdf.createDimension('rlon', len(rotated_lons)) lon_dim = ('rlat', 'rlon',) @@ -218,7 +218,7 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, lons.axis = "X" lons.long_name = "longitude coordinate" lons.standard_name = "longitude" - print 'lons:', lons[:].shape, center_longitudes.shape + print('lons:', lons[:].shape, center_longitudes.shape) lons[:] = center_longitudes if boundary_longitudes is not None: lons.bounds = "lon_bnds" @@ -290,7 +290,7 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, try: var[:] = variable['data'] except Exception: - print 'VAR ERROR, netcdf shape: {0}, variable shape: {1}'.format(var[:].shape, variable['data'].shape) + print('VAR ERROR, netcdf shape: {0}, variable shape: {1}'.format(var[:].shape, variable['data'].shape)) # Grid mapping if regular_latlon: @@ -391,22 +391,22 @@ def create_voc_ratio(voc): import numpy as np [country_values, lat, lon] = extract_vars(TNO_WORLD_MASK, ['timezone_id', 'lat', 'lon']) country_values = country_values['data'].reshape((country_values['data'].shape[1], country_values['data'].shape[1])) - print country_values + print(country_values) #sys.exit() - print OUTPUT_PATH + print(OUTPUT_PATH) if not os.path.exists(OUTPUT_PATH): os.makedirs(OUTPUT_PATH) complete_output_path = os.path.join(OUTPUT_PATH, 'ratio_{0}_{1}.nc'.format(voc, YEAR)) if not os.path.exists(complete_output_path): - print 'Creating ratio file for {0}\npath: {1}'.format(voc, complete_output_path) + print('Creating ratio file for {0}\npath: {1}'.format(voc, complete_output_path)) data_list = [] for gnfr in get_sector_list(voc): - print gnfr + print(gnfr) mask_factor = np.zeros(country_values.shape) iso_codes = get_iso_codes() - for country_code, factor in get_country_code_and_factor(voc, gnfr, YEAR).iteritems(): + for country_code, factor in get_country_code_and_factor(voc, gnfr, YEAR).items(): try: mask_factor[country_values == iso_codes[country_code]] = factor except Exception: @@ -421,7 +421,7 @@ def create_voc_ratio(voc): }) write_netcdf(complete_output_path, lat['data'], lon['data'], data_list) else: - print 'Ratio file for {0} already created\npath: {1}'.format(voc, complete_output_path) + print('Ratio file for {0} already created\npath: {1}'.format(voc, complete_output_path)) return True @@ -472,14 +472,14 @@ def get_voc_list(): import pandas as pd df = pd.read_csv(CSV_PATH, sep=',') - print list(df.columns.values) + print(list(df.columns.values)) # sys.exit() del df['year'], df['ISO3'], df['gnfr'], df['fr'] df = df.drop_duplicates().dropna() voc_list = df.vcode.values - for i in xrange(len(voc_list)): + for i in range(len(voc_list)): voc_list[i] = voc_list[i].replace('v', 'voc') - print df.vcode.values + print(df.vcode.values) return df.vcode.values @@ -498,7 +498,7 @@ def get_sector_list(voc): df = df[df.vcode == voc] del df['ISO3'], df['vcode'], df['year'], df['fr'] df = df.drop_duplicates().dropna() - print df.gnfr.values + print(df.gnfr.values) return df.gnfr.values diff --git a/preproc/cams_reg_ghg_preproc.py b/preproc/cams_reg_ghg_preproc.py index 8447f4ccd90a83eb5be42af090e9ae7080b69e0d..59810213aa9d62ed603dbbdfa6d3f3e67203fdcb 100755 --- a/preproc/cams_reg_ghg_preproc.py +++ b/preproc/cams_reg_ghg_preproc.py @@ -74,14 +74,14 @@ def calculate_grid_definition(in_path): # Longitudes lons = np.sort(np.unique(dataframe.Lon_rounded)) lons_interval = lons[1:] - lons[:-1] - print 'Lon min: {0}; Lon max: {1}; Lon inc: {2}; Lon num: {3}'.format( - dataframe.Lon_rounded.min(), dataframe.Lon_rounded.max(), lons_interval.min(), len(lons)) + print('Lon min: {0}; Lon max: {1}; Lon inc: {2}; Lon num: {3}'.format( + dataframe.Lon_rounded.min(), dataframe.Lon_rounded.max(), lons_interval.min(), len(lons))) # Latitudes lats = np.sort(np.unique(dataframe.Lat_rounded)) lats_interval = lats[1:] - lats[:-1] - print 'Lat min: {0}; Lat max: {1}; Lat inc: {2}; Lat num: {3}'.format( - dataframe.Lat_rounded.min(), dataframe.Lat_rounded.max(), lats_interval.min(), len(lats)) + print('Lat min: {0}; Lat max: {1}; Lat inc: {2}; Lat num: {3}'.format( + dataframe.Lat_rounded.min(), dataframe.Lat_rounded.max(), lats_interval.min(), len(lats))) lats = np.arange(-90 + lats_interval.min() / 2, 90, lats_interval.min(), dtype=np.float64) lons = np.arange(-180 + lons_interval.min() / 2, 180, lons_interval.min(), dtype=np.float64) @@ -168,7 +168,7 @@ def do_transformation(year): #dataframe = pd.concat([df_np, df_p]) for name, group in dataframe.groupby('GNFR_Sector'): - print 'gnfr', name + print('gnfr', name) pollutant_list = create_pollutant_empty_list(in_file, len(c_lats), len(c_lons)) # Other mobile sources ignoring sea cells (shipping emissions) @@ -178,7 +178,7 @@ def do_transformation(year): group = group.groupby(['row_lat', 'col_lon']).sum().reset_index() - for i in xrange(len(pollutant_list)): + for i in range(len(pollutant_list)): # pollutant_list[i]['data'][group.col_lon, group.row_lat] = group[pollutant_list[i]['TNO_name']] pollutant_list[i]['data'][group.row_lat, group.col_lon] += group[pollutant_list[i]['TNO_name']] pollutant_list[i]['data'] = pollutant_list[i]['data'].reshape((1,) + pollutant_list[i]['data'].shape) @@ -392,8 +392,8 @@ def check_vocs(year): [new_voc] = extract_vars(voc_path, [voc]) voc_sum += new_voc['data'].sum() - print '{0} NMVOC sum: {1}; VOCs sum: {2}; %diff: {3}'.format( - gnfr, nmvoc_sum, voc_sum, 100*(nmvoc_sum - voc_sum) / nmvoc_sum) + print('{0} NMVOC sum: {1}; VOCs sum: {2}; %diff: {3}'.format( + gnfr, nmvoc_sum, voc_sum, 100*(nmvoc_sum - voc_sum) / nmvoc_sum)) return True def do_pm_transformation(year): @@ -471,8 +471,8 @@ def check_pm(year): [new_pm] = extract_vars(pm_path, [pm]) pm_sum += new_pm['data'].sum() - print '{0} PM2.5 sum: {1}; PM sum: {2}; %diff: {3}'.format( - gnfr, pm25_sum, pm_sum, 100*(pm25_sum - pm_sum) / pm25_sum) + print('{0} PM2.5 sum: {1}; PM sum: {2}; %diff: {3}'.format( + gnfr, pm25_sum, pm_sum, 100*(pm25_sum - pm_sum) / pm25_sum)) return True if __name__ == '__main__': diff --git a/preproc/ceds_preproc.py b/preproc/ceds_preproc.py index 5a5385b578f50b9a1e62ac6649cfe4c86a9eba11..7a3ee0b5afb564bc4c8f5da58c911cf075a70b90 100755 --- a/preproc/ceds_preproc.py +++ b/preproc/ceds_preproc.py @@ -248,7 +248,7 @@ def do_transformation(year): if not os.path.exists(file_path): os.makedirs(file_path) - for month in xrange(1, 12 + 1, 1): + for month in range(1, 12 + 1, 1): emission = { 'name': pollutant_name, 'units': 'kg.m-2.s-1', @@ -301,10 +301,10 @@ def do_air_transformation(year): elif sector == 'air_crs': data_aux = data[:, 15:24 + 1, :, :].sum(axis=1) else: - print 'ERROR' + print('ERROR') sys.exit(1) - for month in xrange(1, 12 + 1, 1): + for month in range(1, 12 + 1, 1): emission = { 'name': pollutant_name, 'units': 'kg.m-2.s-1', diff --git a/preproc/eclipsev5a_preproc.py b/preproc/eclipsev5a_preproc.py index 9dd76589f629e4f728c1fecdcc94d4e1b6429e1c..55c3b8b7c37ee46f9c27626e4dc90163e212231d 100755 --- a/preproc/eclipsev5a_preproc.py +++ b/preproc/eclipsev5a_preproc.py @@ -99,7 +99,7 @@ def create_bounds(coordinates, number_vertices=2): def write_netcdf(output_name_path, data_list, center_lats, center_lons, grid_cell_area, date): # TODO Documentation - print output_name_path + print(output_name_path) # Creating NetCDF & Dimensions nc_output = Dataset(output_name_path, mode='w', format="NETCDF4") nc_output.createDimension('nv', 2) @@ -236,9 +236,9 @@ def get_output_name(pollutant, sector, year, month): def do_single_transformation(pollutant, sector, data, c_lats, c_lons, cell_area): # TODO Docuemtnation - for i in xrange(len(LIST_YEARS)): + for i in range(len(LIST_YEARS)): - for month in xrange(12): + for month in range(12): # print i, list_years[i], month + 1 if pollutant == 'NOx': pollutant_name = 'nox_no2' @@ -270,7 +270,7 @@ def do_transformation(): # TODO Documentation for pollutant in LIST_POLLUTANTS: file_name = os.path.join(INPUT_PATH, INPUT_NAME.replace('', pollutant)) - print file_name + print(file_name) nc = Dataset(file_name, mode='r') c_lats = nc.variables['lat'][:] c_lons = nc.variables['lon'][:] @@ -325,7 +325,7 @@ def do_flaring_transformation(): if var_name is not None: data = nc_in.variables[var][:] data = np.nan_to_num(data) - for i in xrange(len(LIST_YEARS)): + for i in range(len(LIST_YEARS)): output_name = get_flaring_output_name(var_name, 'flaring', LIST_YEARS[i]) data_aux = data[i, :, :] data_aux = (data_aux * YEAR_FACTOR) / cell_area diff --git a/preproc/edgarv432_ap_preproc.py b/preproc/edgarv432_ap_preproc.py index 67ee72b251ef1069a26b152265113927716bf0de..6da9f0aab7ecd462d9f474850c0221502094bf0d 100755 --- a/preproc/edgarv432_ap_preproc.py +++ b/preproc/edgarv432_ap_preproc.py @@ -146,7 +146,7 @@ def write_netcdf(output_name_path, data, data_atts, center_lats, center_lons, gr month=None): # TODO Documentation # Creating NetCDF & Dimensions - print output_name_path + print(output_name_path) nc_output = Dataset(output_name_path, mode='w', format="NETCDF4") nc_output.createDimension('nv', 2) nc_output.createDimension('lon', center_lons.shape[0]) @@ -233,7 +233,7 @@ def write_netcdf(output_name_path, data, data_atts, center_lats, center_lons, gr def do_yearly_transformation(year): # TODO Documentation for pollutant in LIST_POLLUTANTS: - for ipcc in ipcc_to_sector_dict().keys(): + for ipcc in list(ipcc_to_sector_dict().keys()): file_path = os.path.join( INPUT_PATH, YEARLY_INPUT_NAME.replace('', pollutant).replace('', str(year)).replace('', @@ -241,7 +241,7 @@ def do_yearly_transformation(year): if os.path.exists(file_path): grid_area = get_grid_area(file_path) - print file_path + print(file_path) nc_in = Dataset(file_path, mode='r') if pollutant in ['PM2.5_bio', 'PM2.5_fossil']: @@ -285,7 +285,7 @@ def do_yearly_transformation(year): def do_monthly_transformation(year): # TODO Documentation for pollutant in LIST_POLLUTANTS: - for ipcc in ipcc_to_sector_dict().keys(): + for ipcc in list(ipcc_to_sector_dict().keys()): file_path = os.path.join( INPUT_PATH, YEARLY_INPUT_NAME.replace('', pollutant).replace('', str(year)).replace('', @@ -293,7 +293,7 @@ def do_monthly_transformation(year): if os.path.exists(file_path): grid_area = get_grid_area(file_path) - print file_path + print(file_path) nc_in = Dataset(file_path, mode='r') if pollutant in ['PM2.5_bio', 'PM2.5_fossil']: @@ -328,7 +328,7 @@ def do_monthly_transformation(year): nc_month_factors = Dataset(os.path.join(INPUT_PATH, MONTHLY_PATTERN_FILE.replace('', sector))) month_factors = nc_month_factors.variables[sector][:] - for month in xrange(1, 12 + 1, 1): + for month in range(1, 12 + 1, 1): data_aux = data * month_factors[month - 1, :, :] write_netcdf(os.path.join(out_path_aux, '{0}_{1}{2}.nc'.format(pollutant.lower(), year, str(month).zfill(2))), @@ -344,8 +344,8 @@ def do_monthly_transformation(year): def do_2010_monthly_transformation(): # TODO Documentation for pollutant in LIST_POLLUTANTS: - for ipcc in ipcc_to_sector_dict().keys(): - for month in xrange(1, 12 + 1, 1): + for ipcc in list(ipcc_to_sector_dict().keys()): + for month in range(1, 12 + 1, 1): file_path = os.path.join( INPUT_PATH, MONTHLY_INPUT_NAME.replace('', pollutant).replace('', @@ -353,7 +353,7 @@ def do_2010_monthly_transformation(): if os.path.exists(file_path): grid_area = get_grid_area(file_path) - print file_path + print(file_path) nc_in = Dataset(file_path, mode='r') # print pollutant # print pollutant in ['PM2.5_bio', 'PM2.5_fossil'] diff --git a/preproc/edgarv432_voc_preproc.py b/preproc/edgarv432_voc_preproc.py index 091ce93c54cfc2ab6ab47c4fdf93c9db9f94f16d..a300c80632ab94bc272484e2c82cf7b4cce6e755 100755 --- a/preproc/edgarv432_voc_preproc.py +++ b/preproc/edgarv432_voc_preproc.py @@ -138,7 +138,7 @@ def write_netcdf(output_name_path, data, data_atts, center_lats, center_lons, gr month=None): # TODO Documentation # Creating NetCDF & Dimensions - print output_name_path + print(output_name_path) nc_output = Dataset(output_name_path, mode='w', format="NETCDF4") nc_output.createDimension('nv', 2) nc_output.createDimension('lon', center_lons.shape[0]) @@ -227,9 +227,9 @@ def write_netcdf(output_name_path, data, data_atts, center_lats, center_lons, gr def do_yearly_transformation(year): # TODO Documentation - print year + print(year) for pollutant in LIST_POLLUTANTS: - for ipcc in ipcc_to_sector_dict().keys(): + for ipcc in list(ipcc_to_sector_dict().keys()): file_path = os.path.join( INPUT_PATH, YEARLY_INPUT_NAME.replace('', pollutant).replace('', str(year)).replace('', @@ -237,7 +237,7 @@ def do_yearly_transformation(year): if os.path.exists(file_path): grid_area = get_grid_area(file_path) - print file_path + print(file_path) nc_in = Dataset(file_path, mode='r') data = nc_in.variables['emi_{0}'.format(pollutant.lower())][:] @@ -250,7 +250,7 @@ def do_yearly_transformation(year): nc_in.close() sector = ipcc_to_sector_dict()[ipcc] - if pollutant in ['voc{0}'.format(x) for x in xrange(1, 9 + 1, 1)]: + if pollutant in ['voc{0}'.format(x) for x in range(1, 9 + 1, 1)]: pollutant_aux = pollutant.replace('voc', 'voc0') else: pollutant_aux = pollutant @@ -274,9 +274,9 @@ def do_yearly_transformation(year): def do_monthly_transformation(year): # TODO Documentation - print year + print(year) for pollutant in LIST_POLLUTANTS: - for ipcc in ipcc_to_sector_dict().keys(): + for ipcc in list(ipcc_to_sector_dict().keys()): file_path = os.path.join( INPUT_PATH, YEARLY_INPUT_NAME.replace('', pollutant).replace('', str(year)).replace('', @@ -284,7 +284,7 @@ def do_monthly_transformation(year): if os.path.exists(file_path): grid_area = get_grid_area(file_path) - print file_path + print(file_path) nc_in = Dataset(file_path, mode='r') data = nc_in.variables['emi_{0}'.format(pollutant.lower())][:] @@ -298,7 +298,7 @@ def do_monthly_transformation(year): sector = ipcc_to_sector_dict()[ipcc] - if pollutant in ['voc{0}'.format(x) for x in xrange(1, 9 + 1, 1)]: + if pollutant in ['voc{0}'.format(x) for x in range(1, 9 + 1, 1)]: pollutant_aux = pollutant.replace('voc', 'voc0') else: pollutant_aux = pollutant @@ -314,7 +314,7 @@ def do_monthly_transformation(year): nc_month_factors = Dataset(os.path.join(INPUT_PATH, MONTHLY_PATTERN_FILE.replace('', sector))) month_factors = nc_month_factors.variables[sector][:] - for month in xrange(1, 12 + 1, 1): + for month in range(1, 12 + 1, 1): data_aux = data * month_factors[month - 1, :, :] write_netcdf(os.path.join(out_path_aux, '{0}_{1}{2}.nc'.format( pollutant_aux.lower(), year, str(month).zfill(2))), @@ -330,8 +330,8 @@ def do_monthly_transformation(year): def do_2010_monthly_transformation(): # TODO Documentation for pollutant in LIST_POLLUTANTS: - for ipcc in ipcc_to_sector_dict().keys(): - for month in xrange(1, 12 + 1, 1): + for ipcc in list(ipcc_to_sector_dict().keys()): + for month in range(1, 12 + 1, 1): file_path = os.path.join( INPUT_PATH, MONTHLY_INPUT_NAME.replace('', pollutant).replace('', @@ -339,7 +339,7 @@ def do_2010_monthly_transformation(): if os.path.exists(file_path): grid_area = get_grid_area(file_path) - print file_path + print(file_path) nc_in = Dataset(file_path, mode='r') data = nc_in.variables['emi_{0}'.format(pollutant.lower())][:] @@ -353,7 +353,7 @@ def do_2010_monthly_transformation(): sector = ipcc_to_sector_dict()[ipcc] - if pollutant in ['voc{0}'.format(x) for x in xrange(1, 9 + 1, 1)]: + if pollutant in ['voc{0}'.format(x) for x in range(1, 9 + 1, 1)]: pollutant_aux = pollutant.replace('voc', 'voc0') else: pollutant_aux = pollutant diff --git a/preproc/emep_preproc.py b/preproc/emep_preproc.py index 15f44b0a9684f6ba1313477b9b552846be350a69..72c2ff3647e0bcc76386892ee5635e00b2012218 100755 --- a/preproc/emep_preproc.py +++ b/preproc/emep_preproc.py @@ -65,14 +65,14 @@ def calculate_grid_definition(in_path): # Longitudes lons = np.sort(np.unique(df.LONGITUDE)) lons_interval = lons[1:] - lons[:-1] - print 'Lon min: {0}; Lon max: {1}; Lon inc: {2}; Lon num: {3}'.format( - df.LONGITUDE.min(), df.LONGITUDE.max(), lons_interval.min(), len(lons)) + print('Lon min: {0}; Lon max: {1}; Lon inc: {2}; Lon num: {3}'.format( + df.LONGITUDE.min(), df.LONGITUDE.max(), lons_interval.min(), len(lons))) # Latitudes lats = np.sort(np.unique(df.LATITUDE)) lats_interval = lats[1:] - lats[:-1] - print 'Lat min: {0}; Lat max: {1}; Lat inc: {2}; Lat num: {3}'.format( - df.LATITUDE.min(), df.LATITUDE.max(), lats_interval.min(), len(lats)) + print('Lat min: {0}; Lat max: {1}; Lat inc: {2}; Lat num: {3}'.format( + df.LATITUDE.min(), df.LATITUDE.max(), lats_interval.min(), len(lats))) lats = np.arange(-90 + lats_interval.min()/2, 90, lats_interval.min(), dtype=np.float64) lons = np.arange(-180 + lons_interval.min()/2, 180, lons_interval.min(), dtype=np.float64) @@ -96,7 +96,7 @@ def do_transformation(year): INPUT_NAME.replace('', str(year)).replace('', sector).replace('', pollutant)) if os.path.exists(in_file): - print in_file + print(in_file) c_lats, c_lons, lat_interval, lon_interval = calculate_grid_definition(in_file) b_lats = create_bounds(c_lats, number_vertices=2) b_lons = create_bounds(c_lons, number_vertices=2) diff --git a/preproc/gfas12_h_preproc.py b/preproc/gfas12_h_preproc.py index 9cc93fef153e21e8666355ab350c815145f1cf56..e38d1d3b6f86ec88109841b3a992e39e181a6343 100755 --- a/preproc/gfas12_h_preproc.py +++ b/preproc/gfas12_h_preproc.py @@ -37,8 +37,8 @@ INPUT_PATH = '/esarchive/recon/ecmwf/gfas/original_files/ga_mc_sfc_gfas_ecmf/' INPUT_NAME = 'gfas_hourly_.grb' OUTPUT_PATH = '/esarchive/recon/ecmwf/gfas' -STARTING_DATE = datetime(year=2018, month=11, day=01) -ENDIND_DATE = datetime(year=2018, month=11, day=01) +STARTING_DATE = datetime(year=2018, month=11, day=0o1) +ENDIND_DATE = datetime(year=2018, month=11, day=0o1) PARAMETERS_FILE = '/esarchive/recon/ecmwf/gfas/original_files/ga_mc_sfc_gfas_ecmf/GFAS_hourly_Parameters.csv' # ============================================================== @@ -118,7 +118,7 @@ def write_netcdf(output_name_path, data_list, center_lats, center_lons, grid_cel :type date: datetime.datetime """ - print output_name_path + print(output_name_path) # Creating NetCDF & Dimensions nc_output = Dataset(output_name_path, mode='w', format="NETCDF4") nc_output.createDimension('nv', 2) @@ -277,6 +277,6 @@ if __name__ == '__main__': if os.path.isfile(f): do_transformation(f, date_aux, OUTPUT_PATH, var_list) else: - print 'ERROR: file {0} not found'.format(f) + print('ERROR: file {0} not found'.format(f)) date_aux = date_aux + timedelta(days=1) diff --git a/preproc/gfas12_preproc.py b/preproc/gfas12_preproc.py index 328cbc04fe86e2693536b63a7a62af2109f644c2..86ffcf9814641b344a4ef5744a6912cacb11e365 100755 --- a/preproc/gfas12_preproc.py +++ b/preproc/gfas12_preproc.py @@ -118,7 +118,7 @@ def write_netcdf(output_name_path, data_list, center_lats, center_lons, grid_cel :type date: datetime.datetime """ - print output_name_path + print(output_name_path) # Creating NetCDF & Dimensions nc_output = Dataset(output_name_path, mode='w', format="NETCDF4") nc_output.createDimension('nv', 2) @@ -276,6 +276,6 @@ if __name__ == '__main__': if os.path.isfile(f): do_transformation(f, date_aux, OUTPUT_PATH, var_list) else: - print 'ERROR: file {0} not found'.format(f) + print('ERROR: file {0} not found'.format(f)) date_aux = date_aux + timedelta(days=1) diff --git a/preproc/htapv2_preproc.py b/preproc/htapv2_preproc.py index a81aebe5a968ff8e068bff24918a984008d13fec..284fd6dec94ae87003e43f87db0e9d73a802022d 100755 --- a/preproc/htapv2_preproc.py +++ b/preproc/htapv2_preproc.py @@ -73,7 +73,7 @@ def do_transformation_annual(filename, out_path, pollutant, sector, year): """ from hermesv3_gr.tools.netcdf_tools import extract_vars, write_netcdf, get_grid_area from hermesv3_gr.tools.coordinates_tools import create_bounds - print filename + print(filename) [c_lats, c_lons] = extract_vars(filename, ['lat', 'lon']) if pollutant == 'pm25': @@ -108,7 +108,7 @@ def do_transformation_annual(filename, out_path, pollutant, sector, year): os.makedirs(out_path) out_path = os.path.join(out_path, '{0}_{1}.nc'.format(pollutant, year)) - print out_path + print(out_path) write_netcdf(out_path, c_lats['data'], c_lons['data'], [data], boundary_latitudes=create_bounds(c_lats['data']), boundary_longitudes=create_bounds(c_lons['data']), cell_area=get_grid_area(filename), global_attributes=global_attributes,) @@ -139,8 +139,8 @@ def do_transformation(filename_list, out_path, pollutant, sector, year): from hermesv3_gr.tools.netcdf_tools import extract_vars, write_netcdf, get_grid_area from hermesv3_gr.tools.coordinates_tools import create_bounds - for month in xrange(1, 13): - print filename_list[month - 1] + for month in range(1, 13): + print(filename_list[month - 1]) [c_lats, c_lons] = extract_vars(filename_list[month - 1], ['lat', 'lon']) if pollutant == 'pm25': @@ -239,7 +239,7 @@ def do_nmvoc_month_transformation(filename_list, out_path, sector, year): nmvoc_ratio_list = do_ratio_list() - print sector + print(sector) if sector == 'ENERGY': ratio_var = 'pow' @@ -263,16 +263,16 @@ def do_nmvoc_month_transformation(filename_list, out_path, sector, year): nmvoc_ratio_list.pop('voc24', None) nmvoc_ratio_list.pop('voc25', None) - print type(nmvoc_ratio_list), nmvoc_ratio_list + print(type(nmvoc_ratio_list), nmvoc_ratio_list) - for month in xrange(1, 13): - print filename_list[month - 1] + for month in range(1, 13): + print(filename_list[month - 1]) c_lats, c_lons = extract_vars(filename_list[month - 1], ['lat', 'lon']) [data] = extract_vars(filename_list[month - 1], ['emi_nmvoc']) - for voc, ratio_file in nmvoc_ratio_list.iteritems(): - print voc, ratio_file + for voc, ratio_file in nmvoc_ratio_list.items(): + print(voc, ratio_file) pollutant = voc [ratio] = extract_vars(ratio_file, [ratio_var]) @@ -303,7 +303,7 @@ def do_nmvoc_month_transformation(filename_list, out_path, sector, year): os.makedirs(out_path_aux) out_path_aux = os.path.join(out_path_aux, '{0}_{1}{2}.nc'.format(pollutant, year, str(month).zfill(2))) - print out_path_aux + print(out_path_aux) write_netcdf(out_path_aux, c_lats['data'], c_lons['data'], [data_aux], boundary_latitudes=create_bounds(c_lats['data']), boundary_longitudes=create_bounds(c_lons['data']), global_attributes=global_attributes,) @@ -325,18 +325,18 @@ def do_nmvoc_industry_month_transformation(filename_list, out_path, sector, year nmvoc_ratio_list = do_ratio_list() - print sector + print(sector) - print type(nmvoc_ratio_list), nmvoc_ratio_list + print(type(nmvoc_ratio_list), nmvoc_ratio_list) - for month in xrange(1, 13): - print filename_list[month - 1] + for month in range(1, 13): + print(filename_list[month - 1]) c_lats, c_lons = extract_vars(filename_list[month - 1], ['lat', 'lon']) [ind, exf, sol] = extract_vars(filename_list[month - 1], ['emiss_ind', 'emiss_exf', 'emiss_sol']) - for voc, ratio_file in nmvoc_ratio_list.iteritems(): - print voc, ratio_file + for voc, ratio_file in nmvoc_ratio_list.items(): + print(voc, ratio_file) data = { 'name': voc, 'units': 'kg m-2 s-1', @@ -382,7 +382,7 @@ def do_nmvoc_industry_month_transformation(filename_list, out_path, sector, year os.makedirs(out_path_aux) out_path_aux = os.path.join(out_path_aux, '{0}_{1}{2}.nc'.format(voc, year, str(month).zfill(2))) - print out_path_aux + print(out_path_aux) write_netcdf(out_path_aux, c_lats['data'], c_lons['data'], [data], boundary_latitudes=create_bounds(c_lats['data']), boundary_longitudes=create_bounds(c_lons['data']), global_attributes=global_attributes,) @@ -439,7 +439,7 @@ def do_nmvoc_year_transformation(filename, out_path, sector, year): os.makedirs(out_path_aux) out_path_aux = os.path.join(out_path_aux, '{0}_{1}.nc'.format(pollutant, year)) - print out_path_aux + print(out_path_aux) write_netcdf(out_path_aux, c_lats['data'], c_lons['data'], [data_aux], boundary_latitudes=create_bounds(c_lats['data']), boundary_longitudes=create_bounds(c_lons['data']), @@ -510,7 +510,7 @@ def check_vocs(year): :return: """ from hermesv3_gr.tools.netcdf_tools import extract_vars - for month in xrange(1, 12 + 1, 1): + for month in range(1, 12 + 1, 1): for snap in ['ENERGY', 'INDUSTRY', 'RESIDENTIAL', 'TRANSPORT']: nmvoc_path = os.path.join(OUTPUT_PATH, 'monthly_mean', 'nmvoc_{0}'.format(snap.lower()), 'nmvoc_{0}{1}.nc'.format(year, str(month).zfill(2))) @@ -518,26 +518,26 @@ def check_vocs(year): nmvoc_sum = new_voc['data'].sum() voc_sum = 0 - for voc in ['voc{0}'.format(str(x).zfill(2)) for x in xrange(1, 25 + 1, 1)]: + for voc in ['voc{0}'.format(str(x).zfill(2)) for x in range(1, 25 + 1, 1)]: voc_path = os.path.join(OUTPUT_PATH, 'monthly_mean', '{0}_{1}'.format(voc, snap.lower()), '{0}_{1}{2}.nc'.format(voc, year, str(month).zfill(2))) if os.path.exists(voc_path): [new_voc] = extract_vars(voc_path, [voc]) voc_sum += new_voc['data'].sum() - print '{0} month: {4}; NMVOC sum: {1}; VOCs sum: {2}; %diff: {3}'.format( - snap, nmvoc_sum, voc_sum, 100 * (nmvoc_sum - voc_sum) / nmvoc_sum, month) + print('{0} month: {4}; NMVOC sum: {1}; VOCs sum: {2}; %diff: {3}'.format( + snap, nmvoc_sum, voc_sum, 100 * (nmvoc_sum - voc_sum) / nmvoc_sum, month)) if __name__ == '__main__': for y in LIST_YEARS: - for pollutant_dict in get_pollutant_dict().iteritems(): + for pollutant_dict in get_pollutant_dict().items(): for current_sector in get_sector_dict()[pollutant_dict[0]]['month']: input_name_aux = INPUT_NAME.replace('', current_sector) input_name_aux = input_name_aux.replace('', str(y)) input_name_aux = input_name_aux.replace('', pollutant_dict[1]) file_list = [os.path.join(INPUT_PATH, input_name_aux.replace('', str(aux_month))) - for aux_month in xrange(1, 13)] + for aux_month in range(1, 13)] do_transformation(file_list, os.path.join(OUTPUT_PATH, 'monthly_mean'), pollutant_dict[0], current_sector, y) @@ -564,7 +564,7 @@ if __name__ == '__main__': input_name_aux = input_name_aux.replace('', current_sector) input_name_aux = input_name_aux.replace('', str(y)) file_list = [os.path.join(INPUT_PATH, input_name_aux.replace('', str(aux_month))) - for aux_month in xrange(1, 13)] + for aux_month in range(1, 13)] if current_sector == 'INDUSTRY_3subsectors': do_nmvoc_industry_month_transformation(file_list, os.path.join(OUTPUT_PATH, 'monthly_mean'), @@ -580,5 +580,5 @@ if __name__ == '__main__': input_name_aux = input_name_aux.replace('', current_sector) input_name_aux = input_name_aux.replace('', str(y)) input_name_aux = os.path.join(INPUT_PATH, input_name_aux) - print input_name_aux + print(input_name_aux) do_nmvoc_year_transformation(input_name_aux, os.path.join(OUTPUT_PATH, 'yearly_mean'), current_sector, y) diff --git a/preproc/tno_mac_iii_preproc.py b/preproc/tno_mac_iii_preproc.py index e96da9f64c991bc4c11a36852246f40e09a6931f..6c06388396d13f1d73b1a2b9d57a410dfd662b9b 100755 --- a/preproc/tno_mac_iii_preproc.py +++ b/preproc/tno_mac_iii_preproc.py @@ -73,14 +73,14 @@ def calculate_grid_definition(in_path): # Longitudes lons = np.sort(np.unique(dataframe.Lon)) lons_interval = lons[1:] - lons[:-1] - print 'Lon min: {0}; Lon max: {1}; Lon inc: {2}; Lon num: {3}'.format( - dataframe.Lon.min(), dataframe.Lon.max(), lons_interval.min(), len(lons)) + print('Lon min: {0}; Lon max: {1}; Lon inc: {2}; Lon num: {3}'.format( + dataframe.Lon.min(), dataframe.Lon.max(), lons_interval.min(), len(lons))) # Latitudes lats = np.sort(np.unique(dataframe.Lat)) lats_interval = lats[1:] - lats[:-1] - print 'Lat min: {0}; Lat max: {1}; Lat inc: {2}; Lat num: {3}'.format( - dataframe.Lat.min(), dataframe.Lat.max(), lats_interval.min(), len(lats)) + print('Lat min: {0}; Lat max: {1}; Lat inc: {2}; Lat num: {3}'.format( + dataframe.Lat.min(), dataframe.Lat.max(), lats_interval.min(), len(lats))) lats = np.arange(-90 + lats_interval.min() / 2, 90, lats_interval.min(), dtype=np.float64) lons = np.arange(-180 + lons_interval.min() / 2, 180, lons_interval.min(), dtype=np.float64) @@ -164,7 +164,7 @@ def do_transformation(year): dataframe = pd.concat([df_np, df_p]) for name, group in dataframe.groupby('SNAP'): - print 'snap', name + print('snap', name) pollutant_list = create_pollutant_empty_list(in_file, len(c_lats), len(c_lons)) # Other mobile sources ignoring sea cells (shipping emissions) @@ -174,7 +174,7 @@ def do_transformation(year): group = group.groupby(['row_lat', 'col_lon']).sum().reset_index() - for i in xrange(len(pollutant_list)): + for i in range(len(pollutant_list)): # pollutant_list[i]['data'][group.col_lon, group.row_lat] = group[pollutant_list[i]['TNO_name']] pollutant_list[i]['data'][group.row_lat, group.col_lon] += group[pollutant_list[i]['TNO_name']] pollutant_list[i]['data'] = pollutant_list[i]['data'].reshape((1,) + pollutant_list[i]['data'].shape) @@ -362,8 +362,8 @@ def check_vocs(year): [new_voc] = extract_vars(voc_path, [voc]) voc_sum += new_voc['data'].sum() - print '{0} NMVOC sum: {1}; VOCs sum: {2}; %diff: {3}'.format( - snap, nmvoc_sum, voc_sum, 100*(nmvoc_sum - voc_sum) / nmvoc_sum) + print('{0} NMVOC sum: {1}; VOCs sum: {2}; %diff: {3}'.format( + snap, nmvoc_sum, voc_sum, 100*(nmvoc_sum - voc_sum) / nmvoc_sum)) return True diff --git a/preproc/tno_mac_iii_preproc_voc_ratios.py b/preproc/tno_mac_iii_preproc_voc_ratios.py index 9aea92690893c217c28bd069905ab604ff598147..80b41feeb77e95e89e41bf9aaa1c8c7582f36c62 100755 --- a/preproc/tno_mac_iii_preproc_voc_ratios.py +++ b/preproc/tno_mac_iii_preproc_voc_ratios.py @@ -101,7 +101,7 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, if not (regular_latlon or lcc or rotated): regular_latlon = True - print netcdf_path + print(netcdf_path) netcdf = Dataset(netcdf_path, mode='w', format="NETCDF4") # ===== Dimensions ===== @@ -116,7 +116,7 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, netcdf.createDimension('lat', center_latitudes.shape[0]) lat_dim = ('lon', 'lat',) else: - print 'ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape)) + print('ERROR: Latitudes must be on a 1D or 2D array instead of {0}'.format(len(center_latitudes.shape))) sys.exit(1) # Longitude @@ -127,21 +127,21 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, netcdf.createDimension('lon', center_longitudes.shape[1]) lon_dim = ('lon', 'lat',) else: - print 'ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format(len(center_longitudes.shape)) + print('ERROR: Longitudes must be on a 1D or 2D array instead of {0}'.format(len(center_longitudes.shape))) sys.exit(1) elif rotated: var_dim = ('rlat', 'rlon',) # Rotated Latitude if rotated_lats is None: - print 'ERROR: For rotated grids is needed the rotated latitudes.' + print('ERROR: For rotated grids is needed the rotated latitudes.') sys.exit(1) netcdf.createDimension('rlat', len(rotated_lats)) lat_dim = ('rlat', 'rlon',) # Rotated Longitude if rotated_lons is None: - print 'ERROR: For rotated grids is needed the rotated longitudes.' + print('ERROR: For rotated grids is needed the rotated longitudes.') sys.exit(1) netcdf.createDimension('rlon', len(rotated_lons)) lon_dim = ('rlat', 'rlon',) @@ -215,7 +215,7 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, lons.axis = "X" lons.long_name = "longitude coordinate" lons.standard_name = "longitude" - print 'lons:', lons[:].shape, center_longitudes.shape + print('lons:', lons[:].shape, center_longitudes.shape) lons[:] = center_longitudes if boundary_longitudes is not None: lons.bounds = "lon_bnds" @@ -287,7 +287,7 @@ def write_netcdf(netcdf_path, center_latitudes, center_longitudes, data_list, try: var[:] = variable['data'] except Exception: - print 'VAR ERROR, netcdf shape: {0}, variable shape: {1}'.format(var[:].shape, variable['data'].shape) + print('VAR ERROR, netcdf shape: {0}, variable shape: {1}'.format(var[:].shape, variable['data'].shape)) # Grid mapping if regular_latlon: @@ -388,19 +388,19 @@ def create_voc_ratio(voc): import numpy as np [country_values, lat, lon] = extract_vars(TNO_WORLD_MASK, ['timezone_id', 'lat', 'lon']) country_values = country_values['data'].reshape((country_values['data'].shape[1], country_values['data'].shape[1])) - print OUTPUT_PATH + print(OUTPUT_PATH) if not os.path.exists(OUTPUT_PATH): os.makedirs(OUTPUT_PATH) complete_output_path = os.path.join(OUTPUT_PATH, 'ratio_{0}.nc'.format(voc)) if not os.path.exists(complete_output_path): - print 'Creating ratio file for {0}\npath: {1}'.format(voc, complete_output_path) + print('Creating ratio file for {0}\npath: {1}'.format(voc, complete_output_path)) data_list = [] for snap in get_sector_list(voc): - print snap + print(snap) mask_factor = np.zeros(country_values.shape) iso_codes = get_iso_codes() - for country_code, factor in get_country_code_and_factor(voc, snap).iteritems(): + for country_code, factor in get_country_code_and_factor(voc, snap).items(): try: mask_factor[country_values == iso_codes[country_code]] = factor except Exception: @@ -415,7 +415,7 @@ def create_voc_ratio(voc): }) write_netcdf(complete_output_path, lat['data'], lon['data'], data_list) else: - print 'Ratio file for {0} already created\npath: {1}'.format(voc, complete_output_path) + print('Ratio file for {0} already created\npath: {1}'.format(voc, complete_output_path)) return True @@ -468,7 +468,7 @@ def get_voc_list(): del df['ISO3'], df['snap'], df['output substance name'], df['fr'] df = df.drop_duplicates().dropna() voc_list = df.vcode.values - for i in xrange(len(voc_list)): + for i in range(len(voc_list)): voc_list[i] = voc_list[i].replace('v', 'voc') return df.vcode.values diff --git a/preproc/wiedinmyer_preproc.py b/preproc/wiedinmyer_preproc.py index 7ccdab724b60607e79235aa0d42c0ffe22bf5f6e..fd0344b72437bba0eea624aa2b038248729001e9 100755 --- a/preproc/wiedinmyer_preproc.py +++ b/preproc/wiedinmyer_preproc.py @@ -81,7 +81,7 @@ def do_transformation(filename): :type filename: str """ import numpy as np - print filename + print(filename) from hermesv3_gr.tools.netcdf_tools import get_grid_area from cf_units import Unit @@ -101,10 +101,10 @@ def do_transformation(filename): for output_pollutant in LIST_POLLUTANTS: input_pollutant = out_pollutant_to_in_pollutant(output_pollutant) try: - print input_pollutant + print(input_pollutant) data = nc_in.variables[input_pollutant][:] except RuntimeWarning: - print 'ERROR reading {0}'.format(input_pollutant) + print('ERROR reading {0}'.format(input_pollutant)) data = np.nan_to_num(data) data = data/grid_area # To pass from Gg/year to Gg/m2.year data = data*factor @@ -119,7 +119,7 @@ def do_transformation(filename): if not os.path.exists(out_path_aux): os.makedirs(out_path_aux) write_netcdf(os.path.join(out_path_aux, '{0}_{1}.nc'.format(output_pollutant, YEAR)), - data, data_attributes, lats, lons, grid_area, YEAR, 01) + data, data_attributes, lats, lons, grid_area, YEAR, 0o1) nc_in.close() @@ -157,7 +157,7 @@ def write_netcdf(output_name_path, data, data_atts, center_lats, center_lons, gr """ from hermesv3_gr.tools.coordinates_tools import create_bounds - print output_name_path + print(output_name_path) # Creating NetCDF & Dimensions nc_output = Dataset(output_name_path, mode='w', format="NETCDF4") nc_output.createDimension('nv', 2) @@ -243,4 +243,4 @@ if __name__ == '__main__': do_transformation(os.path.join(INPUT_PATH, INPUT_NAME)) - print 'Time(s):', timeit.default_timer() - starting_time + print('Time(s):', timeit.default_timer() - starting_time) diff --git a/setup.py b/setup.py index 34e4e16dd1182e6836bbe496e33c5f8d1e29ac8a..2050aedae17edbe8de56b1fb020fe03485034865 100755 --- a/setup.py +++ b/setup.py @@ -58,7 +58,7 @@ setup( 'ESMPy>=7.1.0.dev0', 'holidays', 'pytz', - 'timezonefinder<4.0.0', + 'timezonefinder>=4.0.0', 'mpi4py', 'pytest', ],