def read_region(config, *args, **kwargs): """Snip-out target regions from nc4 file Quick and dirty hax to reduce the size of data read in from netCDF files. Keeps a memory leak in the module from blowing up the script. Not the best way to handle this. Parameters ---------- config : dict Run configuration dictionary. Used to parse out target regions. *args : Passed on to read(). **kwargs : Passed on to read(). Returns ------- years : array-like regions : array-like data : array-like """ years, regions, data = read(*args, **kwargs) if configs.is_allregions(config): regions_msk = np.ones(regions.shape, dtype='bool') else: target_regions = configs.get_regions(config, regions) regions_msk = np.isin(regions, target_regions) return years, regions[regions_msk], data[..., regions_msk]
def iterate_regions(filepath, config={}): """ Config options: column """ years, regions, data = read(filepath, config.get('column', 'rebased')) config['regionorder'] = list(regions) if configs.is_allregions(config): yield 'all', years, data return regions = list(regions) for region in configs.get_regions(config, regions): if region == 'global': region = '' ii = regions.index(region) yield regions[ii], years, data[:, ii]
def iterate_regions(filepath, column, config={}): if column is not None or 'costs' not in filepath: years, regions, data = read( filepath, column if column is not None else 'rebased') else: years, regions, data1 = read(filepath, 'costs_lb') years, regions, data2 = read(filepath, 'costs_ub') data = ((data1 + data2) / 2) / 1e5 config['regionorder'] = list(regions) if configs.is_allregions(config): yield 'all', years, data return regions = list(regions) for region in configs.get_regions(config, regions): if region == 'global': region = '' ii = regions.index(region) yield regions[ii], years, data[:, ii]
def iterate_regions(filepath, column, config={}): global deltamethod_vcv do_deltamethod = False if configs.is_parallel_deltamethod( config) else config.get('deltamethod', None) if column is not None or 'costs' not in filepath: years, regions, data = read_region( config, filepath, column if column is not None else 'rebased', do_deltamethod) else: years, regions, data1 = read_region(config, filepath, 'costs_lb', do_deltamethod) years, regions, data2 = read_region(config, filepath, 'costs_ub', do_deltamethod) data = data2 / 1e5 if deltamethod_vcv is not None and not config.get('deltamethod', False): ## Inferred that these were deltamethod files config['deltamethod'] = True if config.get('multiimpact_vcv', None) is not None and deltamethod_vcv is not None: assert isinstance(config['multiimpact_vcv'], np.ndarray) # Extend data to conform to multiimpact_vcv foundindex = None for ii in range(config['multiimpact_vcv'].shape[0] - deltamethod_vcv.shape[0] + 1): if np.allclose( deltamethod_vcv, config['multiimpact_vcv'][ii:(ii + deltamethod_vcv.shape[0]), ii:(ii + deltamethod_vcv.shape[1])]): foundindex = ii break if foundindex is None: print np.sum( np.abs(deltamethod_vcv - config['multiimpact_vcv'] [:deltamethod_vcv.shape[0], :deltamethod_vcv.shape[1]])) print np.sum( np.abs(deltamethod_vcv - config['multiimpact_vcv'][deltamethod_vcv.shape[0]:, deltamethod_vcv.shape[1]:])) assert foundindex is not None, "Cannot find the VCV for " + filepath + " within the master VCV." newdata = np.zeros( tuple([config['multiimpact_vcv'].shape[0]] + list(data.shape[1:]))) if len(data.shape) == 2: newdata[foundindex:(foundindex + deltamethod_vcv.shape[0]), :] = data else: newdata[foundindex:(foundindex + deltamethod_vcv.shape[0]), :, :] = data data = newdata deltamethod_vcv = None # reset for next file config['regionorder'] = list(regions) if configs.is_allregions(config): yield 'all', years, data return regions = list(regions) for region in configs.get_regions(config, regions): ii = regions.index(region) if config.get('deltamethod', False) and not configs.is_parallel_deltamethod(config): yield regions[ii], years, data[:, :, ii] else: yield regions[ii], years, data[:, ii]