def test_calc_grouping(): assert utils.calc_grouping("year") == ["year"] assert utils.calc_grouping("month") == ["month"] assert utils.calc_grouping("sem") == [[12, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], "unique"] # check invalid value: should raise an exception with pytest.raises(Exception) as e_info: indices.calc_grouping("unknown") == ["year"]
def test_calc_grouping(): assert utils.calc_grouping('year') == ['year'] assert utils.calc_grouping('month') == ['month'] assert utils.calc_grouping('sem') == [ [12, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], 'unique'] # check invalid value: should raise an exception with pytest.raises(Exception) as e_info: indices.calc_grouping('unknown') == ['year']
def test_calc_grouping(self): nose.tools.ok_(utils.calc_grouping('year') == ['year']) nose.tools.ok_(utils.calc_grouping('month') == ['month']) nose.tools.ok_(utils.calc_grouping('sem') == [ [12,1,2], [3,4,5], [6,7,8], [9,10,11], 'unique'] ) # check invalid value: should raise an exception try: nose.tools.ok_(indices.calc_grouping('unknown') == ['year']) assert False except: assert True
def get_indices(resources, indices): from flyingpigeon.utils import sort_by_filename, calc_grouping, drs_filename from flyingpigeon.ocgis_module import call from flyingpigeon.indices import indice_variable #names = [drs_filename(nc, skip_timestamp=False, skip_format=False, # variable=None, rename_file=True, add_file_path=True) for nc in resources] ncs = sort_by_filename(resources, historical_concatination=True) ncs_indices = [] logger.info('resources sorted found %s datasets' % len(ncs.keys()) ) for key in ncs.keys(): for indice in indices: try: name , month = indice.split('_') variable=key.split('_')[0] print name, month , variable if variable == indice_variable(name): logger.info('calculating indice %s ' % indice) grouping = calc_grouping(month) calc = [{'func' : 'icclim_' + name, 'name' : name}] prefix=key.replace(variable, name).replace('_day_','_%s_' % month) nc = call(resource=ncs[key], variable=variable, calc=calc, calc_grouping=grouping, prefix=prefix , memory_limit=500) #memory_limit=500 ncs_indices.append(nc) logger.info('Successful calculated indice %s %s' % (key, indice)) except Exception as e: logger.exception('failed to calculate indice %s %s' % (key, indice)) return ncs_indices
def _option_input_handler(self, request): from flyingpigeon.utils import calc_grouping out = {'calc_grouping': None} for obj in self.option_inputs: key = obj.identifier val = request.inputs[key][0].data if key == 'grouping': out['calc_grouping'] = calc_grouping(val) else: out[key] = val return out
def aggregatTime(resource=[], variable=None, frequency=None, prefix=None, grouping='mon', calculation='mean', historical_concatination=True): """ Aggregates over the time axis. :param resource: input netCDF files :param variable: variable to be used from resource :param frequency: time frequency in resource :param grouping: time aggregation for output :param prefix: file name prefix :param calculation: calculation methode (default = mean ) :param historical_concatination: if rcps and appropriate historical runs are present thy are concatinated :return: path to netCDF file """ try: ncs = sort_by_filename(resource, historical_concatination=historical_concatination) group = calc_grouping(grouping=grouping) except Exception as e: logger.exception('failed to determine ncs or calc_grouping') raise if len(ncs.keys())!= 1: logger.exception('None or more than one data experiments found in resource') raise Exception('None or more than one data experiments found in resource') for key in ncs.keys()[0:1]: try: if frequency == None: frequency = get_frequency(ncs[key][0]) if variable == None: variable = get_variable(ncs[key][0]) meta_attrs = { 'field': {'frequency': grouping}}# 'variable': {'new_attribute': 5, 'hello': 'attribute'}, calc = [{'func' : calculation , 'name' : variable, 'meta_attrs': meta_attrs}] logger.info('calculation: %s ' % (calc)) if prefix == None: prefix = key.replace(frequency,grouping) logger.info('prefix: %s ' % (prefix)) output = call(resource=ncs[key], variable=None, calc=calc, calc_grouping=group, prefix=prefix ) logger.info('time aggregation done for %s '% (key)) except Exception as e: logger.exception('time aggregation failed for %s' % key) raise return output # key # output
def get_yrmean(resource=[]): """ calculation of annual mean temperature and clipping Europe :param resource: list or netCDF tas input files :return list: list of output files """ from flyingpigeon.utils import calc_grouping, sort_by_filename from flyingpigeon.ocgis_module import call from flyingpigeon.subset import clipping ncs = sort_by_filename(resource) nc_tasmean = [] try: for key in ncs.keys(): try: logger.info('process %s' % (key)) calc = [{'func': 'mean', 'name': 'tas'}] calc_group = calc_grouping('yr') prefix = key.replace(key.split('_')[7], 'yr') nc_tasmean.append( clipping(resource=ncs[key], variable='tas', calc=calc, calc_grouping=calc_group, prefix=prefix, polygons='Europe')[0]) logger.info('clipping and mean tas calculation done for %s' % (key)) except Exception as e: logger.debug('mean tas calculation failed for %s : %s ' % (key, e)) except Exception as e: logger.debug('clipping failed for %s: %s' % (key, e)) return nc_tasmean
def calc_indice_percentile(resource=[], variable=None, prefix=None, indices='TG90p', refperiod=None, grouping='yr', polygons=None, percentile=90, mosaic=False, dir_output=None, dimension_map=None): """ Calculates given indices for suitable dataset in the appropriate time grouping and polygon. :param resource: list of filenames in data reference syntax (DRS) convention (netcdf) :param variable: variable name to be selected in the in netcdf file (default=None) :param indices: string of indice (default ='TG90p') :param prefix: filename prefix :param refperiod: reference period = [datetime,datetime] :param grouping: indices time aggregation (default='yr') :param dir_output: output directory for result file (netcdf) :param dimension_map: optional dimension map if different to standard (default=None) :return: reference_file, indice_file """ from os.path import join, dirname, exists from os import remove import uuid from numpy import ma from datetime import datetime as dt from flyingpigeon.ocgis_module import call from flyingpigeon.subset import clipping from flyingpigeon.utils import get_values, get_time # TODO: see ticket https://github.com/bird-house/flyingpigeon/issues/200 raise NotImplementedError('Sorry! Function is under construction.') if type(resource) != list: resource = list([resource]) # if type(indices) != list: # indices = list([indices]) # # if type(groupings) != list: # groupings = list([groupings]) # # if type(refperiod) == list: # refperiod = refperiod[0] # # if refperiod is not None: # start = dt.strptime(refperiod.split('-')[0], '%Y%m%d') # end = dt.strptime(refperiod.split('-')[1], '%Y%m%d') # time_range = [start, end] # else: # time_range = None ################################################ # Compute a custom percentile basis using ICCLIM ################################################ from ocgis.contrib import library_icclim as lic calc_group = calc_grouping(grouping) if variable is None: variable = get_variable(resource) if polygons is None: nc_reference = call(resource=resource, prefix=str(uuid.uuid4()), time_range=refperiod, output_format='nc') else: nc_reference = clipping(resource=resource, prefix=str(uuid.uuid4()), time_range=refperiod, output_format='nc', polygons=polygons, mosaic=mosaic) # arr = get_values(resource=nc_reference) # dt_arr = get_time(resource=nc_reference) # arr = ma.masked_array(arr) # dt_arr = ma.masked_array(dt_arr) # percentile = percentile # window_width = 5 # for indice in indices: # name = indice.replace('_', str(percentile)) # var = indice.split('_')[0] # # operation = None # if 'T' in var: # if percentile >= 50: # operation = 'Icclim%s90p' % var # func = 'icclim_%s90p' % var # icclim_TG90p # else: # operation = 'Icclim%s10p' % var # func = 'icclim_%s10p' % var # # ################################ # # load the appropriate operation # ################################ # # ops = [op for op in dir(lic) if operation in op] # if len(ops) == 0: # raise Exception("operator does not exist %s", operation) # # exec "percentile_dict = lic.%s.get_percentile_dict(arr, dt_arr, percentile, window_width)" % ops[0] # calc = [{'func': func, 'name': name, 'kwds': {'percentile_dict': percentile_dict}}] # # if polygons is None: # nc_indices.extend(call(resource=resource, # prefix=key.replace(variable, name).replace('_day_', '_%s_' % grouping), # calc=calc, # calc_grouping=calc_group, # output_format='nc')) # else: # nc_indices.extend(clipping(resource=resource, # prefix=key.replace(variable, name).replace('_day_', '_%s_' % grouping), # calc=calc, # calc_grouping=calc_group, # output_format='nc', # polygons=polygons, # mosaic=mosaic, # )) # if len(nc_indices) is 0: # LOGGER.debug('No indices are calculated') # return None return nc_indices
def calc_indice_simple(resource=[], variable=None, prefix=None, indice='SU', polygons=None, mosaic=False, grouping='yr', dir_output=None, dimension_map=None, memory_limit=None): """ Calculates given simple indices for suitable files in the appropriate time grouping and polygon. :param resource: list of filenames in data reference syntax (DRS) convention (netcdf) :param variable: variable name to be selected in the in netcdf file (default=None) :param indices: Indice (default ='SU') :param polygons: list of polgons (default ='FRA') :param grouping: indices time aggregation (default='yr') :param out_dir: output directory for result file (netcdf) :param dimension_map: optional dimension map if different to standard (default=None) :return: list of netcdf files with calculated indices. Files are saved into out_dir. """ from os.path import join, dirname, exists from flyingpigeon import ocgis_module from flyingpigeon.subset import clipping import uuid if type(resource) != list: resource = list([resource]) # if type(indices) != list: # indices = list([indices]) if type(polygons) != list and polygons is None: polygons = list([polygons]) # if type(groupings) != list: # groupings = list([groupings]) if dir_output is not None: if not exists(dir_output): makedirs(dir_output) datasets = sort_by_filename(resource).keys() if len(datasets) is 1: key = datasets[0] else: LOGGER.warning('more than one dataset in resource') # from flyingpigeon.subset import select_ugid # tile_dim = 25 output = None # experiments = sort_by_filename(resource) outputs = [] # for key in experiments: if variable is None: variable = get_variable(resource) LOGGER.debug('Variable detected % s ' % variable) # variable = key.split('_')[0] try: # icclim can't handling 'kg m2 sec' needs to be 'mm/day' if variable == 'pr': calc = 'pr=pr*86400' ncs = ocgis_module.call(resource=resource, variable=variable, dimension_map=dimension_map, calc=calc, memory_limit=memory_limit, # calc_grouping= calc_group, prefix=str(uuid.uuid4()), dir_output=dir_output, output_format='nc') else: ncs = resource try: calc = [{'func': 'icclim_' + indice, 'name': indice}] LOGGER.info('calc: %s' % calc) try: calc_group = calc_grouping(grouping) LOGGER.info('calc_group: %s' % calc_group) if polygons is None: try: prefix = key.replace(variable, indice).replace('_day_', '_%s_' % grouping) LOGGER.debug(' **** dir_output = %s ' % dir_output) tmp = ocgis_module.call(resource=ncs, variable=variable, dimension_map=dimension_map, calc=calc, calc_grouping=calc_group, prefix=prefix, dir_output=dir_output, output_format='nc') if len(tmp) is not 0: outputs.extend(tmp) else: msg = 'could not calc indice %s for domain ' % (indice) LOGGER.exception(msg) except: msg = 'could not calc indice %s for domain in %s' % (indice) LOGGER.exception(msg) else: try: prefix = key.replace(variable, indice).replace('_day_', '_%s_' % grouping) tmp = clipping(resource=ncs, variable=variable, dimension_map=dimension_map, calc=calc, calc_grouping=calc_group, prefix=prefix, polygons=polygons, mosaic=mosaic, dir_output=dir_output, output_format='nc') if len(tmp) is not 0: outputs.extend(tmp) else: msg = 'could not calc clipped indice %s ' % (indice) LOGGER.exception(msg) except: msg = 'could not calc indice %s for domai' % (indice) LOGGER.debug(msg) # raise Exception(msg) LOGGER.info('indice file calculated: %s' % tmp) except: msg = 'could not calc indice %s for key %s and grouping %s' % (indice, grouping) LOGGER.exception(msg) # raise Exception(msg) except: msg = 'could not calc indice %s ' % (indice) LOGGER.exception(msg) # raise Exception(msg) except: msg = 'could not calculate indices' LOGGER.exception(msg) # raise Exception(msg) LOGGER.info('indice outputs %s ' % outputs) if len(outputs) is 0: LOGGER.debug('No indices are calculated') return None return outputs
def calc_indice_unconventional(resource=[], variable=None, prefix=None, indices=None, polygons=None, groupings=None, dir_output=None, dimension_map = None): """ Calculates given indices for suitable files in the appopriate time grouping and polygon. :param resource: list of filenames in drs convention (netcdf) :param variable: variable name to be selected in the in netcdf file (default=None) :param indices: list of indices (default ='TGx') :param polygons: list of polgons (default =None) :param grouping: indices time aggregation (default='yr') :param out_dir: output directory for result file (netcdf) :param dimension_map: optional dimension map if different to standard (default=None) :return: list of netcdf files with calculated indices. Files are saved into dir_output """ from os.path import join, dirname, exists from os import remove import uuid from flyingpigeon import ocgis_module from flyingpigeon.subset import get_ugid, get_geom if type(resource) != list: resource = list([resource]) if type(indices) != list: indices = list([indices]) if type(polygons) != list and polygons != None: polygons = list([polygons]) elif polygons == None: polygons = [None] else: logger.error('Polygons not found') if type(groupings) != list: groupings = list([groupings]) if dir_output != None: if not exists(dir_output): makedirs(dir_output) experiments = sort_by_filename(resource) outputs = [] # print('environment for calc_indice_unconventional set') logger.info('environment for calc_indice_unconventional set') for key in experiments: if variable == None: variable = get_variable(experiments[key][0]) try: ncs = experiments[key] for indice in indices: logger.info('indice: %s' % indice) try: for grouping in groupings: logger.info('grouping: %s' % grouping) try: calc_group = calc_grouping(grouping) logger.info('calc_group: %s' % calc_group) for polygon in polygons: try: domain = key.split('_')[1].split('-')[0] if polygon == None: if prefix == None: prefix = key.replace(variable, indice).replace('_day_','_%s_' % grouping ) geom = None ugid = None else: if prefix == None: prefix = key.replace(variable, indice).replace('_day_','_%s_' % grouping ).replace(domain,polygon) geom = get_geom(polygon=polygon) ugid = get_ugid(polygons=polygon, geom=geom) if indice == 'TGx': calc=[{'func': 'max', 'name': 'TGx'}] tmp = ocgis_module.call(resource=ncs,# conform_units_to='celcius', variable=variable, dimension_map=dimension_map, calc=calc, calc_grouping=calc_group, prefix=prefix, dir_output=dir_output, geom=geom, select_ugid=ugid) elif indice == 'TGn': calc=[{'func': 'min', 'name': 'TGn'}] tmp = ocgis_module.call(resource=ncs, #conform_units_to='celcius', variable=variable, dimension_map=dimension_map, calc=calc, calc_grouping= calc_group, prefix=prefix, dir_output=dir_output, geom=geom, select_ugid = ugid) elif indice == 'TGx5day': calc = [{'func': 'moving_window', 'name': 'TGx5day', 'kwds': {'k': 5, 'operation': 'mean', 'mode': 'same' }}] tmp2 = ocgis_module.call(resource=ncs, #conform_units_to='celcius', variable=variable, dimension_map=dimension_map, calc=calc, prefix=str(uuid.uuid4()), geom=geom, select_ugid = ugid) calc=[{'func': 'max', 'name': 'TGx5day'}] logger.info('moving window calculated : %s' % tmp2) tmp = ocgis_module.call(resource=tmp2, variable=indice, dimension_map=dimension_map, calc=calc, calc_grouping=calc_group, prefix=prefix, dir_output=dir_output) remove(tmp2) elif indice == 'TGn5day': calc = [{'func': 'moving_window', 'name': 'TGn5day', 'kwds': {'k': 5, 'operation': 'mean', 'mode': 'same' }}] tmp2 = ocgis_module.call(resource=ncs, #conform_units_to='celcius', variable=variable, dimension_map=dimension_map, calc=calc, prefix=str(uuid.uuid4()), geom=geom, select_ugid = ugid) calc=[{'func': 'min', 'name': 'TGn5day'}] logger.info('moving window calculated : %s' % tmp2) tmp = ocgis_module.call(resource=tmp2, variable=indice, dimension_map=dimension_map, calc=calc, calc_grouping=calc_group, prefix=prefix, dir_output=dir_output) remove(tmp2) else: logger.error('Indice %s is not a known inidce' % (indice)) outputs.append(tmp) logger.info('indice file calcualted %s ' % (tmp)) except Exception as e: logger.exception('could not calc indice %s for key %s, polygon %s and calc_grouping %s : %s' % (indice, key, polygon, grouping, e )) except Exception as e: logger.exception('could not calc indice %s for key %s and calc_grouping %s : %s' % ( indice, key, polygon, e )) except Exception as e: logger.exception('could not calc indice %s for key %s: %s'% (indice, key, e )) except Exception as e: logger.exception('could not calc key %s: %s' % (key, e)) return outputs
def get_segetalflora( resource=[], dir_output=".", culture_type="fallow", climate_type=2, region=None, dimension_map=None ): """productive worker for segetalflora jobs :param resources: list of tas netCDF files. (Any time aggregation is possible) :param culture_type: Type of culture. Possible values are: 'fallow', 'intensive', 'extensive' (default:'fallow') :param climate_type: Type of climate: number 1 to 7 or 'all' (default: 2) :param region: Region for subset. If 'None' (default), the values will be calculated for Europe """ from flyingpigeon.subset import clipping from flyingpigeon.utils import calc_grouping, sort_by_filename import os from os import remove from tempfile import mkstemp from ocgis import RequestDataset, OcgOperations from cdo import Cdo cdo = Cdo() if not os.path.exists(dir_output): os.makedirs(dir_output) os.chdir(dir_output) # outputs = [] if region == None: region = "Europe" if not type(culture_type) == list: culture_type = list([culture_type]) if not type(climate_type) == list: climate_type = list([climate_type]) ncs = sort_by_filename(resource) print "%s experiments found" % (len(ncs)) print "keys: %s " % (ncs.keys()) # generate outfolder structure: dir_netCDF = "netCDF" dir_ascii = "ascii" dir_netCDF_tas = dir_netCDF + "/tas" dir_ascii_tas = dir_ascii + "/tas" if not os.path.exists(dir_netCDF): os.makedirs(dir_netCDF) if not os.path.exists(dir_ascii): os.makedirs(dir_ascii) if not os.path.exists(dir_netCDF_tas): os.makedirs(dir_netCDF_tas) if not os.path.exists(dir_ascii_tas): os.makedirs(dir_ascii_tas) tas_files = [] for key in ncs.keys(): try: print "process %s" % (key) calc = [{"func": "mean", "name": "tas"}] calc_group = calc_grouping("yr") prefix = key.replace(key.split("_")[7], "yr") if not os.path.exists(os.path.join(dir_netCDF_tas, prefix + ".nc")): nc_tas = clipping( resource=ncs[key], variable="tas", calc=calc, dimension_map=dimension_map, calc_grouping=calc_group, prefix=prefix, polygons="Europe", dir_output=dir_netCDF_tas, )[0] print "clipping done for %s" % (key) if os.path.exists(os.path.join(dir_netCDF_tas, prefix + ".nc")): tas_files.append(prefix) else: print "clipping failed for %s: No output file exists" % (key) else: print "netCDF file already exists %s" % (key) nc_tas = os.path.join(dir_netCDF_tas, prefix + ".nc") except Exception as e: print "clipping failed for %s: %s" % (key, e) try: asc_tas = os.path.join(dir_ascii_tas, prefix + ".asc") if not os.path.exists(asc_tas): f, tmp = mkstemp(dir=os.curdir, suffix=".asc") tmp = tmp.replace(os.path.abspath(os.curdir), ".") # cdo.outputtab('name,date,lon,lat,value', input = nc_tas , output = tmp) cmd = "cdo outputtab,name,date,lon,lat,value %s > %s" % (nc_tas, tmp) print cmd os.system(cmd) print ("tanslation to ascii done") remove_rows(tmp, asc_tas) remove(tmp) print ("rows with missing values removed") else: print ("tas ascii already exists") plot_ascii(asc_tas) except Exception as e: print "translation to ascii failed %s: %s" % (key, e) if os.path.exists(tmp): remove(tmp) tas_files = [os.path.join(dir_netCDF_tas, nc) for nc in os.listdir(dir_netCDF_tas)] outputs = [] for name in tas_files: for cult in culture_type: for climat in climate_type: try: calc = get_equation(culture_type=cult, climate_type=climat) if type(calc) != None: try: var = "sf%s%s" % (cult, climat) prefix = os.path.basename(name).replace("tas", var).strip(".nc") infile = name # os.path.join(dir_netCDF_tas,name+'.nc') dir_sf = os.path.join(dir_netCDF, var) if not os.path.exists(dir_sf): os.makedirs(dir_sf) if os.path.exists(os.path.join(dir_sf, prefix + ".nc")): nc_sf = os.path.join(dir_sf, prefix + ".nc") print "netCDF file already exists: %s %s " % (dir_sf, prefix) else: rd = RequestDataset(name, variable="tas", dimension_map=dimension_map) op = OcgOperations( dataset=rd, calc=calc, prefix=prefix, output_format="nc", dir_output=dir_sf, add_auxiliary_files=False, ) nc_sf = op.execute() print "segetalflora done for %s" % (prefix) outputs.append(prefix) dir_ascii_sf = os.path.join(dir_ascii, var) if not os.path.exists(dir_ascii_sf): os.makedirs(dir_ascii_sf) asc_sf = os.path.join(dir_ascii_sf, prefix + ".asc") if not os.path.exists(asc_sf): f, tmp = mkstemp(dir=os.curdir, suffix=".asc") tmp = tmp.replace(os.path.abspath(os.curdir), ".") # cdo.outputtab('name,date,lon,lat,value', input = nc_sf , output = tmp) cmd = "cdo outputtab,name,date,lon,lat,value %s > %s" % (nc_sf, tmp) os.system(cmd) print ("translation to ascii done") remove_rows(tmp, asc_sf) remove(tmp) print ("rows with missing values removed") else: print "ascii file already exists" plot_ascii(asc_sf) except Exception as e: print "failed for ascii file: %s %s " % (name, e) if os.path.exists(tmp): remove(tmp) else: print "NO EQUATION found for %s %s " % (cult, climat) except Exception as e: print "Segetal flora failed: %s" % (e) return outputs
def calc_indice_percentile(resources=[], variable=None, prefix=None, indices='TG90p', refperiod=None, groupings='yr', polygons=None, percentile=90, mosaic=False, dir_output=None, dimension_map=None): """ Calculates given indices for suitable files in the appropriate time grouping and polygon. :param resource: list of filenames in data reference syntax (DRS) convention (netcdf) :param variable: variable name to be selected in the in netcdf file (default=None) :param indices: list of indices (default ='TG90p') :param prefix: filename prefix :param refperiod: reference period tuple = (start,end) :param grouping: indices time aggregation (default='yr') :param dir_output: output directory for result file (netcdf) :param dimension_map: optional dimension map if different to standard (default=None) :return: list of netcdf files with calculated indices. Files are saved into out_dir. """ from os.path import join, dirname, exists from os import remove import uuid from numpy import ma from datetime import datetime as dt from flyingpigeon.ocgis_module import call from flyingpigeon.subset import clipping from flyingpigeon.utils import get_values, get_time if type(resources) != list: resources = list([resources]) if type(indices) != list: indices = list([indices]) if type(groupings) != list: groupings = list([groupings]) if type(refperiod) == list: refperiod = refperiod[0] if refperiod is None: start = dt.strptime(refperiod.split('-')[0], '%Y%m%d') end = dt.strptime(refperiod.split('-')[1], '%Y%m%d') time_range = [start, end] else: time_range = None if dir_output is None: if not exists(dir_output): makedirs(dir_output) ################################################ # Compute a custom percentile basis using ICCLIM ################################################ from ocgis.contrib import library_icclim as lic nc_indices = [] nc_dic = sort_by_filename(resources) for grouping in groupings: calc_group = calc_grouping(grouping) for key in nc_dic.keys(): resource = nc_dic[key] if variable is None: variable = get_variable(resource) if polygons is None: nc_reference = call(resource=resource, prefix=str(uuid.uuid4()), time_range=time_range, output_format='nc', dir_output=dir_output) else: nc_reference = clipping(resource=resource, prefix=str(uuid.uuid4()), time_range=time_range, output_format='nc', polygons=polygons, dir_output=dir_output, mosaic=mosaic) arr = get_values(resource=nc_reference) dt_arr = get_time(resource=nc_reference) arr = ma.masked_array(arr) dt_arr = ma.masked_array(dt_arr) percentile = percentile window_width = 5 for indice in indices: name = indice.replace('_', str(percentile)) var = indice.split('_')[0] operation = None if 'T' in var: if percentile >= 50: operation = 'Icclim%s90p' % var func = 'icclim_%s90p' % var # icclim_TG90p else: operation = 'Icclim%s10p' % var func = 'icclim_%s10p' % var ################################ # load the appropriate operation ################################ ops = [op for op in dir(lic) if operation in op] if len(ops) == 0: raise Exception("operator does not exist %s", operation) exec "percentile_dict = lic.%s.get_percentile_dict(arr, dt_arr, percentile, window_width)" % ops[ 0] calc = [{ 'func': func, 'name': name, 'kwds': { 'percentile_dict': percentile_dict } }] if polygons is None: nc_indices.extend( call(resource=resource, prefix=key.replace(variable, name).replace( '_day_', '_%s_' % grouping), calc=calc, calc_grouping=calc_group, output_format='nc', dir_output=dir_output)) else: nc_indices.extend( clipping( resource=resource, prefix=key.replace(variable, name).replace( '_day_', '_%s_' % grouping), calc=calc, calc_grouping=calc_group, output_format='nc', dir_output=dir_output, polygons=polygons, mosaic=mosaic, )) if len(nc_indices) is 0: logger.debug('No indices are calculated') return None return nc_indices
def calc_indice_simple(resource=[], variable=None, prefix=None, indices=None, polygons=None, mosaic=False, groupings='yr', dir_output=None, dimension_map=None, memory_limit=None): """ Calculates given simple indices for suitable files in the appropriate time grouping and polygon. :param resource: list of filenames in data reference syntax (DRS) convention (netcdf) :param variable: variable name to be selected in the in netcdf file (default=None) :param indices: list of indices (default ='SU') :param polygons: list of polgons (default ='FRA') :param grouping: indices time aggregation (default='yr') :param out_dir: output directory for result file (netcdf) :param dimension_map: optional dimension map if different to standard (default=None) :return: list of netcdf files with calculated indices. Files are saved into out_dir. """ from os.path import join, dirname, exists from flyingpigeon import ocgis_module from flyingpigeon.subset import clipping import uuid #DIR_SHP = config.shapefiles_dir() #env.DIR_SHPCABINET = DIR_SHP #env.OVERWRITE = True if type(resource) != list: resource = list([resource]) if type(indices) != list: indices = list([indices]) if type(polygons) != list and polygons != None: polygons = list([polygons]) if type(groupings) != list: groupings = list([groupings]) if dir_output != None: if not exists(dir_output): makedirs(dir_output) #from flyingpigeon.subset import select_ugid # tile_dim = 25 output = None experiments = sort_by_filename(resource) outputs = [] for key in experiments: if variable == None: variable = get_variable(experiments[key][0]) #variable = key.split('_')[0] try: if variable == 'pr': calc = 'pr=pr*86400' ncs = ocgis_module.call( resource=experiments[key], variable=variable, dimension_map=dimension_map, calc=calc, memory_limit=memory_limit, #alc_grouping= calc_group, prefix=str(uuid.uuid4()), dir_output=dir_output, output_format='nc') else: ncs = experiments[key] for indice in indices: logger.info('indice: %s' % indice) try: calc = [{'func': 'icclim_' + indice, 'name': indice}] logger.info('calc: %s' % calc) for grouping in groupings: logger.info('grouping: %s' % grouping) try: calc_group = calc_grouping(grouping) logger.info('calc_group: %s' % calc_group) if polygons == None: try: prefix = key.replace(variable, indice).replace( '_day_', '_%s_' % grouping) tmp = ocgis_module.call( resource=ncs, variable=variable, dimension_map=dimension_map, calc=calc, calc_grouping=calc_group, prefix=prefix, dir_output=dir_output, output_format='nc') outputs.append(tmp) except Exception as e: msg = 'could not calc indice %s for domain in %s' % ( indice, key) logger.debug(msg) # raise Exception(msg) else: try: prefix = key.replace(variable, indice).replace( '_day_', '_%s_' % grouping) tmp = clipping(resource=ncs, variable=variable, dimension_map=dimension_map, calc=calc, calc_grouping=calc_group, prefix=prefix, polygons=polygons, mosaic=mosaic, dir_output=dir_output, output_format='nc') outputs.append(tmp) except Exception as e: msg = 'could not calc indice %s for domain in %s' % ( indice, key) logger.debug(msg) # raise Exception(msg) logger.info('indice file calculated: %s' % tmp) except Exception as e: msg = 'could not calc indice %s for key %s and grouping %s' % ( indice, key, grouping) logger.debug(msg) # raise Exception(msg) except Exception as e: msg = 'could not calc indice %s for key %s' % (indice, key) logger.debug(msg) # raise Exception(msg) except Exception as e: msg = 'could not calc key %s' % key logger.debug(msg) # raise Exception(msg) logger.info('indice outputs %s ' % outputs) return outputs
def calc_indice_percentile(resources=[], variable=None, prefix=None, indices='TG90p', refperiod=None, groupings='yr', polygons=None, percentile=90, mosaik = False, dir_output=None, dimension_map = None): """ Calculates given indices for suitable files in the appopriate time grouping and polygon. :param resource: list of filenames in drs convention (netcdf) :param variable: variable name to be selected in the in netcdf file (default=None) :param indices: list of indices (default ='TG90p') :param prefix: filename prefix :param refperiod: reference refperiod touple = (start,end) :param grouping: indices time aggregation (default='yr') :param dir_output: output directory for result file (netcdf) :param dimension_map: optional dimension map if different to standard (default=None) :return: list of netcdf files with calculated indices. Files are saved into out_dir """ from os.path import join, dirname, exists from os import remove import uuid from numpy import ma from datetime import datetime as dt from flyingpigeon.ocgis_module import call from flyingpigeon.subset import clipping from flyingpigeon.utils import get_values, get_time if type(resources) != list: resources = list([resources]) if type(indices) != list: indices = list([indices]) if type(groupings) != list: groupings = list([groupings]) if type(refperiod) == list: refperiod = refperiod[0] if refperiod != None: start = dt.strptime(refperiod.split('-')[0] , '%Y%m%d') end = dt.strptime(refperiod.split('-')[1] , '%Y%m%d') time_range = [start, end] else: time_range = None if dir_output != None: if not exists(dir_output): makedirs(dir_output) ######################################################################################################################## # Compute a custom percentile basis using ICCLIM. ###################################################################### ######################################################################################################################## from ocgis.contrib import library_icclim as lic nc_indices = [] nc_dic = sort_by_filename(resources) for grouping in groupings: calc_group = calc_grouping(grouping) for key in nc_dic.keys(): resource = nc_dic[key] if variable == None: variable = get_variable(resource) if polygons == None: nc_reference = call(resource=resource, prefix=str(uuid.uuid4()), time_range=time_range, output_format='nc', dir_output=dir_output) else: nc_reference = clipping(resource=resource, prefix=str(uuid.uuid4()), time_range=time_range, output_format='nc', polygons=polygons, dir_output=dir_output, mosaik = mosaik) arr = get_values(nc_files=nc_reference) dt_arr = get_time(nc_files=nc_reference) arr = ma.masked_array(arr) dt_arr = ma.masked_array(dt_arr) percentile = percentile window_width = 5 for indice in indices: name = indice.replace('_', str(percentile)) var = indice.split('_')[0] operation = None if 'T' in var: if percentile >= 50: operation = 'Icclim%s90p' % var func = 'icclim_%s90p' % var # icclim_TG90p else: operation = 'Icclim%s10p' % var func = 'icclim_%s10p' % var ################################ # load the appropriate operation ################################ ops = [op for op in dir(lic) if operation in op] if len(ops) == 0: raise Exception("operator does not exist %s", operation) exec "percentile_dict = lic.%s.get_percentile_dict(arr, dt_arr, percentile, window_width)" % ops[0] calc = [{'func': func, 'name': name, 'kwds': {'percentile_dict': percentile_dict}}] if polygons == None: nc_indices.append(call(resource=resource, prefix=key.replace(variable,name).replace('_day_', '_%s_' % grouping), calc=calc, calc_grouping=calc_group, output_format='nc', dir_output=dir_output)) else: nc_indices.extend(clipping(resource=resource, prefix=key.replace(variable,name).replace('_day_', '_%s_' % grouping), calc=calc, calc_grouping=calc_group, output_format='nc', dir_output=dir_output, polygons=polygons, mosaik = mosaik, )) return nc_indices
def calc_indice_simple(resource=[], variable=None, prefix=None,indices=None, polygons=None, mosaik = False, groupings='yr', dir_output=None, dimension_map = None, memory_limit=None): """ Calculates given simple indices for suitable files in the appopriate time grouping and polygon. :param resource: list of filenames in drs convention (netcdf) :param variable: variable name to be selected in the in netcdf file (default=None) :param indices: list of indices (default ='SU') :param polygons: list of polgons (default ='FRA') :param grouping: indices time aggregation (default='yr') :param out_dir: output directory for result file (netcdf) :param dimension_map: optional dimension map if different to standard (default=None) :return: list of netcdf files with calculated indices. Files are saved into out_dir """ from os.path import join, dirname, exists from flyingpigeon import ocgis_module from flyingpigeon.subset import clipping import uuid #DIR_SHP = config.shapefiles_dir() #env.DIR_SHPCABINET = DIR_SHP #env.OVERWRITE = True if type(resource) != list: resource = list([resource]) if type(indices) != list: indices = list([indices]) if type(polygons) != list and polygons != None: polygons = list([polygons]) if type(groupings) != list: groupings = list([groupings]) if dir_output != None: if not exists(dir_output): makedirs(dir_output) #from flyingpigeon.subset import select_ugid # tile_dim = 25 output = None experiments = sort_by_filename(resource) outputs = [] for key in experiments: if variable == None: variable = get_variable(experiments[key][0]) #variable = key.split('_')[0] try: if variable == 'pr': calc = 'pr=pr*86400' ncs = ocgis_module.call(resource=experiments[key], variable=variable, dimension_map=dimension_map, calc=calc, memory_limit=memory_limit, #alc_grouping= calc_group, prefix=str(uuid.uuid4()), dir_output=dir_output, output_format='nc') else: ncs = experiments[key] for indice in indices: logger.info('indice: %s' % indice) try: calc = [{'func' : 'icclim_' + indice, 'name' : indice}] logger.info('calc: %s' % calc) for grouping in groupings: logger.info('grouping: %s' % grouping) try: calc_group = calc_grouping(grouping) logger.info('calc_group: %s' % calc_group) if polygons == None: try: if prefix == None: prefix = key.replace(variable, indice).replace('_day_','_%s_' % grouping ) tmp = ocgis_module.call(resource=ncs, variable=variable, dimension_map=dimension_map, calc=calc, calc_grouping= calc_group, prefix=prefix, dir_output=dir_output, output_format='nc') outputs.extend( [tmp] ) except Exception as e: msg = 'could not calc indice %s for domain in %s' %( indice, key) logger.exception( msg ) raise Exception(msg) else: try: if prefix == None: prefix = key.replace(variable, indice).replace('_day_','_%s_' % grouping ) tmp = clipping(resource=ncs, variable=variable, dimension_map=dimension_map, calc=calc, calc_grouping= calc_group, prefix=prefix, polygons=polygons, mosaik=mosaik, dir_output=dir_output, output_format='nc') outputs.extend( [tmp] ) except Exception as e: msg = 'could not calc indice %s for domain in %s' %( indice, key) logger.exception( msg ) raise Exception(msg) logger.info('indice file calculated') except Exception as e: msg = 'could not calc indice %s for key %s and grouping %s' % (indice, key, grouping) logger.exception(msg) raise Exception(msg) except Exception as e: msg = 'could not calc indice %s for key %s' % ( indice, key) logger.exception(msg) raise Exception(msg) except Exception as e: msg = 'could not calc key %s' % key logger.exception(msg) raise Exception(msg) return outputs
def calc_indice_percentile(resources=[], variable=None, prefix=None, indices='TG90p', refperiod=None, groupings='yr', polygons=None, percentile=90, mosaic = False, dir_output=None, dimension_map = None): """ Calculates given indices for suitable files in the appropriate time grouping and polygon. :param resource: list of filenames in data reference syntax (DRS) convention (netcdf) :param variable: variable name to be selected in the in netcdf file (default=None) :param indices: list of indices (default ='TG90p') :param prefix: filename prefix :param refperiod: reference period tuple = (start,end) :param grouping: indices time aggregation (default='yr') :param dir_output: output directory for result file (netcdf) :param dimension_map: optional dimension map if different to standard (default=None) :return: list of netcdf files with calculated indices. Files are saved into out_dir. """ from os.path import join, dirname, exists from os import remove import uuid from numpy import ma from datetime import datetime as dt from flyingpigeon.ocgis_module import call from flyingpigeon.subset import clipping from flyingpigeon.utils import get_values, get_time if type(resources) != list: resources = list([resources]) if type(indices) != list: indices = list([indices]) if type(groupings) != list: groupings = list([groupings]) if type(refperiod) == list: refperiod = refperiod[0] if refperiod != None: start = dt.strptime(refperiod.split('-')[0] , '%Y%m%d') end = dt.strptime(refperiod.split('-')[1] , '%Y%m%d') time_range = [start, end] else: time_range = None if dir_output != None: if not exists(dir_output): makedirs(dir_output) ######################################################################################################################## # Compute a custom percentile basis using ICCLIM. ###################################################################### ######################################################################################################################## from ocgis.contrib import library_icclim as lic nc_indices = [] nc_dic = sort_by_filename(resources) for grouping in groupings: calc_group = calc_grouping(grouping) for key in nc_dic.keys(): resource = nc_dic[key] if variable == None: variable = get_variable(resource) if polygons == None: nc_reference = call(resource=resource, prefix=str(uuid.uuid4()), time_range=time_range, output_format='nc', dir_output=dir_output) else: nc_reference = clipping(resource=resource, prefix=str(uuid.uuid4()), time_range=time_range, output_format='nc', polygons=polygons, dir_output=dir_output, mosaic = mosaic) arr = get_values(resource=nc_reference) dt_arr = get_time(resource=nc_reference) arr = ma.masked_array(arr) dt_arr = ma.masked_array(dt_arr) percentile = percentile window_width = 5 for indice in indices: name = indice.replace('_', str(percentile)) var = indice.split('_')[0] operation = None if 'T' in var: if percentile >= 50: operation = 'Icclim%s90p' % var func = 'icclim_%s90p' % var # icclim_TG90p else: operation = 'Icclim%s10p' % var func = 'icclim_%s10p' % var ################################ # load the appropriate operation ################################ ops = [op for op in dir(lic) if operation in op] if len(ops) == 0: raise Exception("operator does not exist %s", operation) exec "percentile_dict = lic.%s.get_percentile_dict(arr, dt_arr, percentile, window_width)" % ops[0] calc = [{'func': func, 'name': name, 'kwds': {'percentile_dict': percentile_dict}}] if polygons == None: nc_indices.append(call(resource=resource, prefix=key.replace(variable,name).replace('_day_', '_%s_' % grouping), calc=calc, calc_grouping=calc_group, output_format='nc', dir_output=dir_output)) else: nc_indices.extend(clipping(resource=resource, prefix=key.replace(variable,name).replace('_day_', '_%s_' % grouping), calc=calc, calc_grouping=calc_group, output_format='nc', dir_output=dir_output, polygons=polygons, mosaic = mosaic, )) return nc_indices #def calc_indice_unconventional(resource=[], variable=None, prefix=None, #indices=None, polygons=None, groupings=None, #dir_output=None, dimension_map = None): #""" #Calculates given indices for suitable files in the appropriate time grouping and polygon. #:param resource: list of filenames in data reference syntax (DRS) convention (netcdf) #:param variable: variable name to be selected in the in netcdf file (default=None) #:param indices: list of indices (default ='TGx') #:param polygons: list of polygons (default =None) #:param grouping: indices time aggregation (default='yr') #:param out_dir: output directory for result file (netcdf) #:param dimension_map: optional dimension map if different to standard (default=None) #:return: list of netcdf files with calculated indices. Files are saved into dir_output #""" #from os.path import join, dirname, exists #from os import remove #import uuid #from flyingpigeon import ocgis_module #from flyingpigeon.subset import get_ugid, get_geom #if type(resource) != list: #resource = list([resource]) #if type(indices) != list: #indices = list([indices]) #if type(polygons) != list and polygons != None: #polygons = list([polygons]) #elif polygons == None: #polygons = [None] #else: #logger.error('Polygons not found') #if type(groupings) != list: #groupings = list([groupings]) #if dir_output != None: #if not exists(dir_output): #makedirs(dir_output) #experiments = sort_by_filename(resource) #outputs = [] #print('environment for calc_indice_unconventional set') #logger.info('environment for calc_indice_unconventional set') #for key in experiments: #if variable == None: #variable = get_variable(experiments[key][0]) #try: #ncs = experiments[key] #for indice in indices: #logger.info('indice: %s' % indice) #try: #for grouping in groupings: #logger.info('grouping: %s' % grouping) #try: #calc_group = calc_grouping(grouping) #logger.info('calc_group: %s' % calc_group) #for polygon in polygons: #try: #domain = key.split('_')[1].split('-')[0] #if polygon == None: #if prefix == None: #prefix = key.replace(variable, indice).replace('_day_','_%s_' % grouping ) #geom = None #ugid = None #else: #if prefix == None: #prefix = key.replace(variable, indice).replace('_day_','_%s_' % grouping ).replace(domain,polygon) #geom = get_geom(polygon=polygon) #ugid = get_ugid(polygons=polygon, geom=geom) #if indice == 'TGx': #calc=[{'func': 'max', 'name': 'TGx'}] #tmp = ocgis_module.call(resource=ncs,# conform_units_to='celcius', #variable=variable, dimension_map=dimension_map, #calc=calc, calc_grouping=calc_group, prefix=prefix, #dir_output=dir_output, geom=geom, select_ugid=ugid) #elif indice == 'TGn': #calc=[{'func': 'min', 'name': 'TGn'}] #tmp = ocgis_module.call(resource=ncs, #conform_units_to='celcius', #variable=variable, dimension_map=dimension_map, #calc=calc, calc_grouping= calc_group, prefix=prefix, #dir_output=dir_output, geom=geom, select_ugid = ugid) #elif indice == 'TGx5day': #calc = [{'func': 'moving_window', 'name': 'TGx5day', 'kwds': {'k': 5, 'operation': 'mean', 'mode': 'same' }}] #tmp2 = ocgis_module.call(resource=ncs, #conform_units_to='celcius', #variable=variable, dimension_map=dimension_map, #calc=calc, prefix=str(uuid.uuid4()), #geom=geom, select_ugid = ugid) #calc=[{'func': 'max', 'name': 'TGx5day'}] #logger.info('moving window calculated : %s' % tmp2) #tmp = ocgis_module.call(resource=tmp2, #variable=indice, dimension_map=dimension_map, #calc=calc, calc_grouping=calc_group, prefix=prefix, #dir_output=dir_output) #remove(tmp2) #elif indice == 'TGn5day': #calc = [{'func': 'moving_window', 'name': 'TGn5day', 'kwds': {'k': 5, 'operation': 'mean', 'mode': 'same' }}] #tmp2 = ocgis_module.call(resource=ncs, #conform_units_to='celcius', #variable=variable, dimension_map=dimension_map, #calc=calc, prefix=str(uuid.uuid4()), #geom=geom, select_ugid = ugid) #calc=[{'func': 'min', 'name': 'TGn5day'}] #logger.info('moving window calculated : %s' % tmp2) #tmp = ocgis_module.call(resource=tmp2, #variable=indice, dimension_map=dimension_map, #calc=calc, calc_grouping=calc_group, prefix=prefix, #dir_output=dir_output) #remove(tmp2) #else: #logger.error('Indice %s is not a known inidce' % (indice)) #outputs.append(tmp) #logger.info('indice file calcualted %s ' % (tmp)) #except Exception as e: #logger.debug('could not calc indice %s for key %s, polygon %s and calc_grouping %s : %s' % (indice, key, polygon, grouping, e )) #except Exception as e: #logger.debug('could not calc indice %s for key %s and calc_grouping %s : %s' % ( indice, key, polygon, e )) #except Exception as e: #logger.debug('could not calc indice %s for key %s: %s'% (indice, key, e )) #except Exception as e: #logger.debug('could not calc key %s: %s' % (key, e)) #return outputs
def calc_indice_percentile(resources=[], variable=None, prefix=None, indices='TG90p', refperiod=None, groupings='yr', polygons=None, percentile=90, mosaic=False, dir_output=None, dimension_map=None): """ Calculates given indices for suitable files in the appropriate time grouping and polygon. :param resource: list of filenames in data reference syntax (DRS) convention (netcdf) :param variable: variable name to be selected in the in netcdf file (default=None) :param indices: list of indices (default ='TG90p') :param prefix: filename prefix :param refperiod: reference period tuple = (start,end) :param grouping: indices time aggregation (default='yr') :param dir_output: output directory for result file (netcdf) :param dimension_map: optional dimension map if different to standard (default=None) :return: list of netcdf files with calculated indices. Files are saved into out_dir. """ from os.path import join, dirname, exists from os import remove import uuid from numpy import ma from datetime import datetime as dt from flyingpigeon.ocgis_module import call from flyingpigeon.subset import clipping from flyingpigeon.utils import get_values, get_time if type(resources) != list: resources = list([resources]) if type(indices) != list: indices = list([indices]) if type(groupings) != list: groupings = list([groupings]) if type(refperiod) == list: refperiod = refperiod[0] if refperiod != None: start = dt.strptime(refperiod.split('-')[0], '%Y%m%d') end = dt.strptime(refperiod.split('-')[1], '%Y%m%d') time_range = [start, end] else: time_range = None if dir_output != None: if not exists(dir_output): makedirs(dir_output) ######################################################################################################################## # Compute a custom percentile basis using ICCLIM. ###################################################################### ######################################################################################################################## from ocgis.contrib import library_icclim as lic nc_indices = [] nc_dic = sort_by_filename(resources) for grouping in groupings: calc_group = calc_grouping(grouping) for key in nc_dic.keys(): resource = nc_dic[key] if variable == None: variable = get_variable(resource) if polygons == None: nc_reference = call(resource=resource, prefix=str(uuid.uuid4()), time_range=time_range, output_format='nc', dir_output=dir_output) else: nc_reference = clipping(resource=resource, prefix=str(uuid.uuid4()), time_range=time_range, output_format='nc', polygons=polygons, dir_output=dir_output, mosaic=mosaic) arr = get_values(resource=nc_reference) dt_arr = get_time(resource=nc_reference) arr = ma.masked_array(arr) dt_arr = ma.masked_array(dt_arr) percentile = percentile window_width = 5 for indice in indices: name = indice.replace('_', str(percentile)) var = indice.split('_')[0] operation = None if 'T' in var: if percentile >= 50: operation = 'Icclim%s90p' % var func = 'icclim_%s90p' % var # icclim_TG90p else: operation = 'Icclim%s10p' % var func = 'icclim_%s10p' % var ################################ # load the appropriate operation ################################ ops = [op for op in dir(lic) if operation in op] if len(ops) == 0: raise Exception("operator does not exist %s", operation) exec "percentile_dict = lic.%s.get_percentile_dict(arr, dt_arr, percentile, window_width)" % ops[ 0] calc = [{ 'func': func, 'name': name, 'kwds': { 'percentile_dict': percentile_dict } }] if polygons == None: nc_indices.append( call(resource=resource, prefix=key.replace(variable, name).replace( '_day_', '_%s_' % grouping), calc=calc, calc_grouping=calc_group, output_format='nc', dir_output=dir_output)) else: nc_indices.extend( clipping( resource=resource, prefix=key.replace(variable, name).replace( '_day_', '_%s_' % grouping), calc=calc, calc_grouping=calc_group, output_format='nc', dir_output=dir_output, polygons=polygons, mosaic=mosaic, )) return nc_indices #def calc_indice_unconventional(resource=[], variable=None, prefix=None, #indices=None, polygons=None, groupings=None, #dir_output=None, dimension_map = None): #""" #Calculates given indices for suitable files in the appropriate time grouping and polygon. #:param resource: list of filenames in data reference syntax (DRS) convention (netcdf) #:param variable: variable name to be selected in the in netcdf file (default=None) #:param indices: list of indices (default ='TGx') #:param polygons: list of polygons (default =None) #:param grouping: indices time aggregation (default='yr') #:param out_dir: output directory for result file (netcdf) #:param dimension_map: optional dimension map if different to standard (default=None) #:return: list of netcdf files with calculated indices. Files are saved into dir_output #""" #from os.path import join, dirname, exists #from os import remove #import uuid #from flyingpigeon import ocgis_module #from flyingpigeon.subset import get_ugid, get_geom #if type(resource) != list: #resource = list([resource]) #if type(indices) != list: #indices = list([indices]) #if type(polygons) != list and polygons != None: #polygons = list([polygons]) #elif polygons == None: #polygons = [None] #else: #logger.error('Polygons not found') #if type(groupings) != list: #groupings = list([groupings]) #if dir_output != None: #if not exists(dir_output): #makedirs(dir_output) #experiments = sort_by_filename(resource) #outputs = [] #print('environment for calc_indice_unconventional set') #logger.info('environment for calc_indice_unconventional set') #for key in experiments: #if variable == None: #variable = get_variable(experiments[key][0]) #try: #ncs = experiments[key] #for indice in indices: #logger.info('indice: %s' % indice) #try: #for grouping in groupings: #logger.info('grouping: %s' % grouping) #try: #calc_group = calc_grouping(grouping) #logger.info('calc_group: %s' % calc_group) #for polygon in polygons: #try: #domain = key.split('_')[1].split('-')[0] #if polygon == None: #if prefix == None: #prefix = key.replace(variable, indice).replace('_day_','_%s_' % grouping ) #geom = None #ugid = None #else: #if prefix == None: #prefix = key.replace(variable, indice).replace('_day_','_%s_' % grouping ).replace(domain,polygon) #geom = get_geom(polygon=polygon) #ugid = get_ugid(polygons=polygon, geom=geom) #if indice == 'TGx': #calc=[{'func': 'max', 'name': 'TGx'}] #tmp = ocgis_module.call(resource=ncs,# conform_units_to='celcius', #variable=variable, dimension_map=dimension_map, #calc=calc, calc_grouping=calc_group, prefix=prefix, #dir_output=dir_output, geom=geom, select_ugid=ugid) #elif indice == 'TGn': #calc=[{'func': 'min', 'name': 'TGn'}] #tmp = ocgis_module.call(resource=ncs, #conform_units_to='celcius', #variable=variable, dimension_map=dimension_map, #calc=calc, calc_grouping= calc_group, prefix=prefix, #dir_output=dir_output, geom=geom, select_ugid = ugid) #elif indice == 'TGx5day': #calc = [{'func': 'moving_window', 'name': 'TGx5day', 'kwds': {'k': 5, 'operation': 'mean', 'mode': 'same' }}] #tmp2 = ocgis_module.call(resource=ncs, #conform_units_to='celcius', #variable=variable, dimension_map=dimension_map, #calc=calc, prefix=str(uuid.uuid4()), #geom=geom, select_ugid = ugid) #calc=[{'func': 'max', 'name': 'TGx5day'}] #logger.info('moving window calculated : %s' % tmp2) #tmp = ocgis_module.call(resource=tmp2, #variable=indice, dimension_map=dimension_map, #calc=calc, calc_grouping=calc_group, prefix=prefix, #dir_output=dir_output) #remove(tmp2) #elif indice == 'TGn5day': #calc = [{'func': 'moving_window', 'name': 'TGn5day', 'kwds': {'k': 5, 'operation': 'mean', 'mode': 'same' }}] #tmp2 = ocgis_module.call(resource=ncs, #conform_units_to='celcius', #variable=variable, dimension_map=dimension_map, #calc=calc, prefix=str(uuid.uuid4()), #geom=geom, select_ugid = ugid) #calc=[{'func': 'min', 'name': 'TGn5day'}] #logger.info('moving window calculated : %s' % tmp2) #tmp = ocgis_module.call(resource=tmp2, #variable=indice, dimension_map=dimension_map, #calc=calc, calc_grouping=calc_group, prefix=prefix, #dir_output=dir_output) #remove(tmp2) #else: #logger.error('Indice %s is not a known inidce' % (indice)) #outputs.append(tmp) #logger.info('indice file calcualted %s ' % (tmp)) #except Exception as e: #logger.debug('could not calc indice %s for key %s, polygon %s and calc_grouping %s : %s' % (indice, key, polygon, grouping, e )) #except Exception as e: #logger.debug('could not calc indice %s for key %s and calc_grouping %s : %s' % ( indice, key, polygon, e )) #except Exception as e: #logger.debug('could not calc indice %s for key %s: %s'% (indice, key, e )) #except Exception as e: #logger.debug('could not calc key %s: %s' % (key, e)) #return outputs