Source code for nes.methods.horizontal_interpolation

#!/usr/bin/env python

import sys
import warnings
import numpy as np
import pandas as pd
from geopandas import GeoSeries
import os
import nes
from mpi4py import MPI
from scipy import spatial
from filelock import FileLock
from datetime import datetime
from warnings import warn
import copy
import pyproj
import gc
import psutil

# CONSTANTS
NEAREST_OPTS = ['NearestNeighbour', 'NearestNeighbours', 'nn', 'NN']
CONSERVATIVE_OPTS = ['Conservative', 'Area_Conservative', 'cons', 'conservative', 'area']


[docs] def interpolate_horizontal(self, dst_grid, weight_matrix_path=None, kind='NearestNeighbour', n_neighbours=4, info=False, to_providentia=False, only_create_wm=False, wm=None, flux=False): """ Horizontal methods from one grid to another one. Parameters ---------- self : nes.Nes Source projection Nes Object. dst_grid : nes.Nes Final projection Nes object. weight_matrix_path : str, None Path to the weight matrix to read/create. kind : str Kind of horizontal interpolation. Accepted values: ['NearestNeighbour', 'Conservative']. n_neighbours : int Used if kind == NearestNeighbour. Number of nearest neighbours to interpolate. Default: 4. info : bool Indicates if you want to print extra info during the methods process. to_providentia : bool Indicates if we want the interpolated grid in Providentia format. only_create_wm : bool Indicates if you want to only create the Weight Matrix. wm : Nes Weight matrix Nes File flux : bool Indicates if you want to calculate the weight matrix for flux variables """ if info and self.master: print("Creating Weight Matrix") # Obtain weight matrix if self.parallel_method == 'T': weights, idx = get_weights_idx_t_axis(self, dst_grid, weight_matrix_path, kind, n_neighbours, only_create_wm, wm, flux) elif self.parallel_method in ['Y', 'X']: weights, idx = get_weights_idx_xy_axis(self, dst_grid, weight_matrix_path, kind, n_neighbours, only_create_wm, wm, flux) else: raise NotImplemented("Parallel method {0} is not implemented yet for horizontal interpolations. Use 'T'".format( self.parallel_method)) if info and self.master: print("Weight Matrix done!") if only_create_wm: # weights for only_create is the WM NES object return weights # idx[idx < 0] = np.nan idx = np.ma.masked_array(idx, mask=idx == -999) # idx = np.array(idx, dtype=float) # idx[idx < 0] = np.nan # weights[weights < 0] = np.nan weights = np.ma.masked_array(weights, mask=weights == -999) # weights = np.array(weights, dtype=float) # weights[weights < 0] = np.nan # Copy NES final_dst = dst_grid.copy() final_dst.set_communicator(dst_grid.comm) # Remove original file information final_dst.__ini_path = None final_dst.netcdf = None final_dst.dataset = None # Return final_dst final_dst.lev = self.lev final_dst._lev = self._lev final_dst.time = self.time final_dst._time = self._time final_dst.hours_start = self.hours_start final_dst.hours_end = self.hours_end if info and self.master: print("Applying weights") # Apply weights for var_name, var_info in self.variables.items(): if info and self.master: print("\t{var} horizontal interpolation".format(var=var_name)) sys.stdout.flush() src_shape = var_info['data'].shape if isinstance(dst_grid, nes.PointsNes): dst_shape = (src_shape[0], src_shape[1], idx.shape[-1]) else: dst_shape = (src_shape[0], src_shape[1], idx.shape[-2], idx.shape[-1]) # Creating new variable without data final_dst.variables[var_name] = {attr_name: attr_value for attr_name, attr_value in var_info.items() if attr_name != 'data'} # Creating empty data final_dst.variables[var_name]['data'] = np.empty(dst_shape) # src_data = var_info['data'].reshape((src_shape[0], src_shape[1], src_shape[2] * src_shape[3])) for time in range(dst_shape[0]): for lev in range(dst_shape[1]): src_aux = get_src_data(self.comm, var_info['data'][time, lev], idx, self.parallel_method) final_dst.variables[var_name]['data'][time, lev] = np.sum(weights * src_aux, axis=1) if isinstance(dst_grid, nes.PointsNes): # Removing level axis if src_shape[1] != 1: raise IndexError("Data with vertical levels cannot be interpolated to points") final_dst.variables[var_name]['data'] = final_dst.variables[var_name]['data'].reshape( (src_shape[0], idx.shape[-1])) if isinstance(dst_grid, nes.PointsNesGHOST) and not to_providentia: final_dst = final_dst.to_points() final_dst.global_attrs = self.global_attrs if info and self.master: print("Formatting") if to_providentia: # self = experiment to interpolate (regular, rotated, etc.) # final_dst = interpolated experiment (points) if isinstance(final_dst, nes.PointsNes): model_centre_lat, model_centre_lon = self.create_providentia_exp_centre_coordinates() grid_edge_lat, grid_edge_lon = self.create_providentia_exp_grid_edge_coordinates() final_dst = final_dst.to_providentia(model_centre_lon=model_centre_lon, model_centre_lat=model_centre_lat, grid_edge_lon=grid_edge_lon, grid_edge_lat=grid_edge_lat) else: msg = "The final projection must be points to interpolate an experiment and get it in Providentia format." warnings.warn(msg) sys.stderr.flush() else: # Convert dimensions (time, lev, lat, lon) or (time, lat, lon) to (time, station) for interpolated variables # and reshape data if isinstance(final_dst, nes.PointsNes): for var_name, var_info in final_dst.variables.items(): if len(var_info['dimensions']) != len(var_info['data'].shape): final_dst.variables[var_name]['dimensions'] = ('time', 'station') return final_dst
[docs] def get_src_data(comm, var_data, idx, parallel_method): """ To obtain the needed src data to interpolate. Parameters ---------- comm : MPI.Communicator. var_data : np.array Rank source data. idx : np.array Index of the needed data in a 2D flatten way. parallel_method: str Source parallel method. Returns ------- np.array Flatten source needed data. """ if parallel_method == 'T': var_data = var_data.flatten() else: var_data = comm.gather(var_data, root=0) if comm.Get_rank() == 0: if parallel_method == 'Y': axis = 0 elif parallel_method == 'X': axis = 1 else: raise NotImplementedError(parallel_method) var_data = np.concatenate(var_data, axis=axis) var_data = var_data.flatten() var_data = comm.bcast(var_data) var_data = np.pad(var_data, [1, 1], 'constant', constant_values=np.nan).take(idx + 1, mode='clip') #var_data = np.take(var_data, idx) return var_data
# noinspection DuplicatedCode
[docs] def get_weights_idx_t_axis(self, dst_grid, weight_matrix_path, kind, n_neighbours, only_create, wm, flux): """ To obtain the weights and source data index through the T axis. Parameters ---------- self : nes.Nes Source projection Nes Object. dst_grid : nes.Nes Final projection Nes object. weight_matrix_path : str, None Path to the weight matrix to read/create. kind : str Kind of horizontal interpolation. Accepted values: ['NearestNeighbour', 'Conservative']. n_neighbours : int Used if kind == NearestNeighbour. Number of nearest neighbours to interpolate. Default: 4. only_create : bool Indicates if you want to only create the Weight Matrix. wm : Nes Weight matrix Nes File flux : bool Indicates if you want to calculate the weight matrix for flux variables Returns ------- tuple Weights and source data index. """ if wm is not None: weight_matrix = wm elif weight_matrix_path is not None: with FileLock(weight_matrix_path + "{0:03d}.lock".format(self.rank)): if os.path.isfile(weight_matrix_path): if self.master: weight_matrix = read_weight_matrix(weight_matrix_path, comm=MPI.COMM_SELF) else: weight_matrix = True if kind in NEAREST_OPTS: if self.master: if len(weight_matrix.lev['data']) != n_neighbours: warn("The selected weight matrix does not have the same number of nearest neighbours." + "Re-calculating again but not saving it.") sys.stderr.flush() weight_matrix = create_nn_weight_matrix(self, dst_grid, n_neighbours=n_neighbours) else: weight_matrix = True else: if self.master: if kind in NEAREST_OPTS: weight_matrix = create_nn_weight_matrix(self, dst_grid, n_neighbours=n_neighbours, wm_path=weight_matrix_path) elif kind in CONSERVATIVE_OPTS: weight_matrix = create_area_conservative_weight_matrix( self, dst_grid, wm_path=weight_matrix_path, flux=flux) else: raise NotImplementedError(kind) else: weight_matrix = True if os.path.exists(weight_matrix_path + "{0:03d}.lock".format(self.rank)): os.remove(weight_matrix_path + "{0:03d}.lock".format(self.rank)) else: if self.master: if kind in NEAREST_OPTS: weight_matrix = create_nn_weight_matrix(self, dst_grid, n_neighbours=n_neighbours) elif kind in CONSERVATIVE_OPTS: weight_matrix = create_area_conservative_weight_matrix(self, dst_grid, flux=flux) else: raise NotImplementedError(kind) else: weight_matrix = True if only_create: return weight_matrix, None if self.master: if kind in NEAREST_OPTS: # Normalize to 1 weights = np.array(np.array(weight_matrix.variables['weight']['data'], dtype=np.float64) / np.array(weight_matrix.variables['weight']['data'], dtype=np.float64).sum(axis=1), dtype=np.float64) else: weights = np.array(weight_matrix.variables['weight']['data'], dtype=np.float64) idx = np.array(weight_matrix.variables['idx']['data'][0], dtype=int) else: weights = None idx = None weights = self.comm.bcast(weights, root=0) idx = self.comm.bcast(idx, root=0) return weights, idx
# noinspection DuplicatedCode
[docs] def get_weights_idx_xy_axis(self, dst_grid, weight_matrix_path, kind, n_neighbours, only_create, wm, flux): """ To obtain the weights and source data index through the X or Y axis. Parameters ---------- self : nes.Nes Source projection Nes Object. dst_grid : nes.Nes Final projection Nes object. weight_matrix_path : str, None Path to the weight matrix to read/create. kind : str Kind of horizontal interpolation. Accepted values: ['NearestNeighbour', 'Conservative']. n_neighbours : int Used if kind == NearestNeighbour. Number of nearest neighbours to interpolate. Default: 4. only_create : bool Indicates if you want to only create the Weight Matrix. wm : Nes Weight matrix Nes File flux : bool Indicates if you want to calculate the weight matrix for flux variables Returns ------- tuple Weights and source data index. """ if isinstance(dst_grid, nes.PointsNes) and weight_matrix_path is not None: if self.master: warn("To point weight matrix cannot be saved.") sys.stderr.flush() weight_matrix_path = None if wm is not None: weight_matrix = wm elif weight_matrix_path is not None: with FileLock(weight_matrix_path + "{0:03d}.lock".format(self.rank)): if os.path.isfile(weight_matrix_path): if self.master: weight_matrix = read_weight_matrix(weight_matrix_path, comm=MPI.COMM_SELF) else: weight_matrix = True if kind in NEAREST_OPTS: if self.master: if len(weight_matrix.lev['data']) != n_neighbours: warn("The selected weight matrix does not have the same number of nearest neighbours." + "Re-calculating again but not saving it.") sys.stderr.flush() weight_matrix = create_nn_weight_matrix(self, dst_grid, n_neighbours=n_neighbours) else: weight_matrix = True else: if kind in NEAREST_OPTS: if self.master: weight_matrix = create_nn_weight_matrix(self, dst_grid, n_neighbours=n_neighbours, wm_path=weight_matrix_path) else: weight_matrix = True elif kind in CONSERVATIVE_OPTS: weight_matrix = create_area_conservative_weight_matrix( self, dst_grid, wm_path=weight_matrix_path, flux=flux) else: raise NotImplementedError(kind) if os.path.exists(weight_matrix_path + "{0:03d}.lock".format(self.rank)): os.remove(weight_matrix_path + "{0:03d}.lock".format(self.rank)) else: if kind in NEAREST_OPTS: weight_matrix = create_nn_weight_matrix(self, dst_grid, n_neighbours=n_neighbours) elif kind in CONSERVATIVE_OPTS: weight_matrix = create_area_conservative_weight_matrix(self, dst_grid, flux=flux) else: raise NotImplementedError(kind) if only_create: return weight_matrix, None # Normalize to 1 if self.master: if kind in NEAREST_OPTS: weights = np.array(np.array(weight_matrix.variables['weight']['data'], dtype=np.float64) / np.array(weight_matrix.variables['weight']['data'], dtype=np.float64).sum(axis=1), dtype=np.float64) else: weights = np.array(weight_matrix.variables['weight']['data'], dtype=np.float64) idx = np.array(weight_matrix.variables['idx']['data'][0], dtype=np.int64) else: weights = None idx = None weights = self.comm.bcast(weights, root=0) idx = self.comm.bcast(idx, root=0) # if isinstance(dst_grid, nes.PointsNes): # print("weights 1 ->", weights.shape) # print("idx 1 ->", idx.shape) # weights = weights[:, dst_grid.write_axis_limits['x_min']:dst_grid.write_axis_limits['x_max']] # idx = idx[dst_grid.write_axis_limits['x_min']:dst_grid.write_axis_limits['x_max']] # else: weights = weights[:, :, dst_grid.write_axis_limits['y_min']:dst_grid.write_axis_limits['y_max'], dst_grid.write_axis_limits['x_min']:dst_grid.write_axis_limits['x_max']] idx = idx[:, dst_grid.write_axis_limits['y_min']:dst_grid.write_axis_limits['y_max'], dst_grid.write_axis_limits['x_min']:dst_grid.write_axis_limits['x_max']] # print("weights 2 ->", weights.shape) # print("idx 2 ->", idx.shape) return weights, idx
[docs] def read_weight_matrix(weight_matrix_path, comm=None, parallel_method='T'): """ Read weight matrix. Parameters ---------- weight_matrix_path : str Path of the weight matrix. comm : MPI.Communicator Communicator to read the weight matrix. parallel_method : str Nes parallel method to read the weight matrix. Returns ------- nes.Nes Weight matrix. """ weight_matrix = nes.open_netcdf(path=weight_matrix_path, comm=comm, parallel_method=parallel_method, balanced=True) weight_matrix.load() # In previous versions of NES weight was called inverse_dists if 'inverse_dists' in weight_matrix.variables.keys(): weight_matrix.variables['weight'] = weight_matrix.variables['inverse_dists'] return weight_matrix
[docs] def create_nn_weight_matrix(self, dst_grid, n_neighbours=4, wm_path=None, info=False): """ To create the weight matrix with the nearest neighbours method. Parameters ---------- self : nes.Nes Source projection Nes Object. dst_grid : nes.Nes Final projection Nes object. n_neighbours : int Used if kind == NearestNeighbour. Number of nearest neighbours to interpolate. Default: 4. wm_path : str Path where write the weight matrix info: bool Indicates if you want to print extra info during the methods process. Returns ------- nes.Nes Weight matrix. """ if info and self.master: print("\tCreating Nearest Neighbour Weight Matrix with {0} neighbours".format(n_neighbours)) sys.stdout.flush() # Source src_lat = np.array(self._lat['data'], dtype=np.float32) src_lon = np.array(self._lon['data'], dtype=np.float32) # 1D to 2D coordinates if len(src_lon.shape) == 1: src_lon, src_lat = np.meshgrid(src_lon, src_lat) # Destination dst_lat = np.array(dst_grid._lat['data'], dtype=np.float32) dst_lon = np.array(dst_grid._lon['data'], dtype=np.float32) if isinstance(dst_grid, nes.PointsNes): dst_lat = np.expand_dims(dst_grid._lat['data'], axis=0) dst_lon = np.expand_dims(dst_grid._lon['data'], axis=0) else: # 1D to 2D coordinates if len(dst_lon.shape) == 1: dst_lon, dst_lat = np.meshgrid(dst_lon, dst_lat) # calculate N nearest neighbour inverse distance weights (and indices) # from gridcells centres of model 1 to each gridcell centre of model 2 # model geographic longitude/latitude coordinates are first converted # to cartesian ECEF (Earth Centred, Earth Fixed) coordinates, before # calculating distances. # src_mod_xy = lon_lat_to_cartesian(src_lon.flatten(), src_lat.flatten()) # dst_mod_xy = lon_lat_to_cartesian(dst_lon.flatten(), dst_lat.flatten()) src_mod_xy = lon_lat_to_cartesian_ecef(src_lon.flatten(), src_lat.flatten()) dst_mod_xy = lon_lat_to_cartesian_ecef(dst_lon.flatten(), dst_lat.flatten()) # generate KDtree using model 1 coordinates (i.e. the model grid you are # interpolating from) src_tree = spatial.cKDTree(src_mod_xy) # get n-neighbour nearest distances/indices (ravel form) of model 1 gridcell # centres from each model 2 gridcell centre dists, idx = src_tree.query(dst_mod_xy, k=n_neighbours) # self.nearest_neighbour_inds = \ # np.column_stack(np.unravel_index(idx, lon.shape)) weight_matrix = dst_grid.copy() weight_matrix.time = [datetime(year=2000, month=1, day=1, hour=0, second=0, microsecond=0)] weight_matrix._time = [datetime(year=2000, month=1, day=1, hour=0, second=0, microsecond=0)] weight_matrix._time_bnds = None weight_matrix.time_bnds = None weight_matrix.last_level = None weight_matrix.first_level = 0 weight_matrix.hours_start = 0 weight_matrix.hours_end = 0 weight_matrix.set_communicator(MPI.COMM_SELF) # take the reciprocals of the nearest neighbours distances dists[dists < 1] = 1 inverse_dists = np.reciprocal(dists) inverse_dists_transf = inverse_dists.T.reshape((1, n_neighbours, dst_lon.shape[0], dst_lon.shape[1])) weight_matrix.variables['weight'] = {'data': inverse_dists_transf, 'units': 'm'} idx_transf = idx.T.reshape((1, n_neighbours, dst_lon.shape[0], dst_lon.shape[1])) weight_matrix.variables['idx'] = {'data': idx_transf, 'units': ''} weight_matrix.lev = {'data': np.arange(inverse_dists_transf.shape[1]), 'units': ''} weight_matrix._lev = {'data': np.arange(inverse_dists_transf.shape[1]), 'units': ''} if wm_path is not None: weight_matrix.to_netcdf(wm_path) return weight_matrix
[docs] def create_area_conservative_weight_matrix(self, dst_nes, wm_path=None, flux=False, info=False): """ To create the weight matrix with the area conservative method. Parameters ---------- self : nes.Nes Source projection Nes Object. dst_nes : nes.Nes Final projection Nes object. wm_path : str Path where write the weight matrix flux : bool Indicates if you want to calculate the weight matrix for flux variables info: bool Indicates if you want to print extra info during the methods process. Returns ------- nes.Nes Weight matrix. """ if info and self.master: print("\tCreating area conservative Weight Matrix") sys.stdout.flush() my_crs = pyproj.CRS.from_proj4("+proj=latlon") # Common projection for both shapefiles # Get a portion of the destiny grid if dst_nes.shapefile is None: dst_nes.create_shapefile() dst_grid = copy.deepcopy(dst_nes.shapefile) # Formatting Destination grid dst_grid.to_crs(crs=my_crs, inplace=True) dst_grid['FID_dst'] = dst_grid.index # Preparing Source grid if self.shapefile is None: self.create_shapefile() src_grid = copy.deepcopy(self.shapefile) # Formatting Source grid src_grid.to_crs(crs=my_crs, inplace=True) # Serialize index intersection function to avoid memory problems if self.size > 1 and self.parallel_method != 'T': src_grid = self.comm.gather(src_grid, root=0) dst_grid = self.comm.gather(dst_grid, root=0) if self.master: src_grid = pd.concat(src_grid) dst_grid = pd.concat(dst_grid) if self.master: src_grid['FID_src'] = src_grid.index src_grid = src_grid.reset_index() dst_grid = dst_grid.reset_index() fid_src, fid_dst = dst_grid.sindex.query_bulk(src_grid.geometry, predicate='intersects') # Calculate intersected areas and fractions intersection_df = pd.DataFrame(columns=["FID_src", "FID_dst"]) intersection_df['FID_src'] = np.array(src_grid.loc[fid_src, 'FID_src'], dtype=np.uint32) intersection_df['FID_dst'] = np.array(dst_grid.loc[fid_dst, 'FID_dst'], dtype=np.uint32) intersection_df['geometry_src'] = src_grid.loc[fid_src, 'geometry'].values intersection_df['geometry_dst'] = dst_grid.loc[fid_dst, 'geometry'].values del src_grid, dst_grid, fid_src, fid_dst # Split the array into smaller arrays in order to scatter the data among the processes intersection_df = np.array_split(intersection_df, self.size) else: intersection_df = None intersection_df = self.comm.scatter(intersection_df, root=0) if info and self.master: print("\t\tGrids created and ready to interpolate") sys.stdout.flush() if True: # No Warnings Zone warnings.filterwarnings('ignore') # intersection_df['weight'] = np.array(intersection_df.apply( # lambda x: x['geometry_src'].intersection(x['geometry_dst']).buffer(0).area / x['geometry_src'].area, # axis=1), dtype=np.float64) if flux: intersection_df['weight'] = np.array(intersection_df.apply( lambda x: (x['geometry_src'].intersection(x['geometry_dst']).buffer(0).area / x['geometry_src'].area) * (nes.Nes.calculate_geometry_area([x['geometry_src']])[0] / nes.Nes.calculate_geometry_area([x['geometry_dst']])[0]), axis=1), dtype=np.float64) else: intersection_df['weight'] = np.array(intersection_df.apply( lambda x: x['geometry_src'].intersection(x['geometry_dst']).buffer(0).area / x['geometry_src'].area, axis=1), dtype=np.float64) intersection_df.drop(columns=["geometry_src", "geometry_dst"], inplace=True) gc.collect() warnings.filterwarnings('default') # Format & Clean if info and self.master: print("\t\tWeights calculated. Formatting weight matrix.") sys.stdout.flush() # Initialising weight matrix if self.parallel_method != 'T': intersection_df = self.comm.gather(intersection_df, root=0) if self.master: if self.parallel_method != 'T': intersection_df = pd.concat(intersection_df) intersection_df = intersection_df.set_index(['FID_dst', intersection_df.groupby('FID_dst').cumcount()]).rename_axis( ('FID', 'level')).sort_index() intersection_df.rename(columns={"FID_src": "idx"}, inplace=True) weight_matrix = dst_nes.copy() weight_matrix.time = [datetime(year=2000, month=1, day=1, hour=0, second=0, microsecond=0)] weight_matrix._time = [datetime(year=2000, month=1, day=1, hour=0, second=0, microsecond=0)] weight_matrix._time_bnds = None weight_matrix.time_bnds = None weight_matrix.last_level = None weight_matrix.first_level = 0 weight_matrix.hours_start = 0 weight_matrix.hours_end = 0 weight_matrix.set_communicator(MPI.COMM_SELF) weight_matrix.set_levels({'data': np.arange(intersection_df.index.get_level_values('level').max() + 1), 'dimensions': ('lev',), 'units': '', 'positive': 'up'}) # Creating Weight matrix empty variables if len(weight_matrix._lat['data'].shape) == 1: shape = (1, len(weight_matrix.lev['data']), weight_matrix._lat['data'].shape[0], weight_matrix._lon['data'].shape[0],) shape_flat = (1, len(weight_matrix.lev['data']), weight_matrix._lat['data'].shape[0] * weight_matrix._lon['data'].shape[0],) else: shape = (1, len(weight_matrix.lev['data']), weight_matrix._lat['data'].shape[0], weight_matrix._lat['data'].shape[1],) shape_flat = (1, len(weight_matrix.lev['data']), weight_matrix._lat['data'].shape[0] * weight_matrix._lat['data'].shape[1],) weight_matrix.variables['weight'] = {'data': np.empty(shape_flat), 'units': '-'} weight_matrix.variables['weight']['data'][:] = -999 weight_matrix.variables['idx'] = {'data': np.empty(shape_flat), 'units': '-'} weight_matrix.variables['idx']['data'][:] = -999 # Filling Weight matrix variables for aux_lev in weight_matrix.lev['data']: aux_data = intersection_df.xs(level='level', key=aux_lev) weight_matrix.variables['weight']['data'][0, aux_lev, aux_data.index] = aux_data.loc[:, 'weight'].values weight_matrix.variables['idx']['data'][0, aux_lev, aux_data.index] = aux_data.loc[:, 'idx'].values # Re-shaping weight_matrix.variables['weight']['data'] = weight_matrix.variables['weight']['data'].reshape(shape) weight_matrix.variables['idx']['data'] = weight_matrix.variables['idx']['data'].reshape(shape) if wm_path is not None: if info and self.master: print("\t\tWeight matrix saved at {0}".format(wm_path)) sys.stdout.flush() weight_matrix.to_netcdf(wm_path) else: weight_matrix = True return weight_matrix
[docs] def lon_lat_to_cartesian(lon, lat, radius=6378137.0): """ Calculate lon, lat coordinates of a point on a sphere. DEPRECATED!!!! Parameters ---------- lon : np.array Longitude values. lat : np.array Latitude values. radius : float Radius of the sphere to get the distances. """ lon_r = np.radians(lon) lat_r = np.radians(lat) x = radius * np.cos(lat_r) * np.cos(lon_r) y = radius * np.cos(lat_r) * np.sin(lon_r) z = radius * np.sin(lat_r) return np.column_stack([x, y, z])
[docs] def lon_lat_to_cartesian_ecef(lon, lat): """ # convert observational/model geographic longitude/latitude coordinates to cartesian ECEF (Earth Centred, # Earth Fixed) coordinates, assuming WGS84 datum and ellipsoid, and that all heights = 0. # ECEF coordiantes represent positions (in meters) as X, Y, Z coordinates, approximating the earth surface # as an ellipsoid of revolution. # This conversion is for the subsequent calculation of euclidean distances of the model gridcell centres # from each observational station. # Defining the distance between two points on the earth's surface as simply the euclidean distance # between the two lat/lon pairs could lead to inaccurate results depending on the distance # between two points (i.e. 1 deg. of longitude varies with latitude). Parameters ---------- lon : np.array Longitude values. lat : np.array Latitude values. """ lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84') ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84') x, y, z = pyproj.transform(lla, ecef, lon, lat, np.zeros(lon.shape), radians=False) return np.column_stack([x, y, z])