Esempio n. 1
0
def test_get_time():
    timestamps = utils.get_time(local_path(TESTDATA["cmip5_tasmax_2007_nc"]))
    assert 12 == len(timestamps)

    timestamps = utils.get_time(local_path(TESTDATA["cordex_tasmax_2007_nc"]))
    assert 12 == len(timestamps)

    values = utils.get_values(
        [local_path(TESTDATA["cordex_tasmax_2006_nc"]), local_path(TESTDATA["cordex_tasmax_2007_nc"])]
    )
    assert 23 == len(values)
Esempio n. 2
0
def eval_timerange(resource, time_range):
  """
  quality checker if given time_range is covered by timesteps in resource files

  :param resource: input netCDF files 
  :param time_range: start and end date of time range [datetime,datetime]

  :returns [datetime,datetime]: time_range
  """
  from flyingpigeon.utils import get_time

  logger.info('time_range: %s' % time_range)

  if type(resource) != str: 
    resource.sort()
  time = get_time(resource)
  start = time[0]
  end = time[-1]

  if (time_range[0] > start or time_range[0] < end ):
    logger.debug('time range start %s not in input dataset covering: %s to %s' %  (time_range[0] , start, end))
    time_range[0] = start
    logger.debug('time_range start changed to first timestep of dataset')
  if (time_range[1] > end or time_range[1] < start ):
    logger.debug('time range end %s not in input dataset covering: %s to %s' %  (time_range[0] , start, end))
    time_range[1] = end
    logger.debug('time_range end changed to last timestep of dataset')
  if (time_range[0] > time_range[1]):
    time_range = reversed(time_range)
    logger.debug('time range reversed! start was later than end ')
  logger.info('time range start and end set')
  return time_range
Esempio n. 3
0
  def execute(self):
    from flyingpigeon.ocgis_module import call
    from flyingpigeon.utils import sort_by_filename, archive, get_values, get_time
        
    ncs = self.getInputValues(identifier='netcdf_file')
    logger.info("ncs: %s " % ncs) 
    coords = self.getInputValues(identifier='coords')
    logger.info("coords %s", coords)
    filenames = []    
    nc_exp = sort_by_filename(ncs, historical_concatination=True)
    
    #(fp_tar, tarout_file) = tempfile.mkstemp(dir=".", suffix='.tar')
    #tar = tarfile.open(tarout_file, "w")

    from numpy import savetxt, column_stack
    from shapely.geometry import Point
    
    for key in nc_exp.keys():
      try:
        logger.info('start calculation for %s ' % key )
        ncs = nc_exp[key]
        times = get_time(ncs)
        concat_vals = ['%s-%02d-%02d_%02d:%02d:%02d' %
                       (t.year, t.month, t.day, t.hour, t.minute, t.second) for t in times]
        header = 'date_time'
        filename = '%s.csv' % key
        filenames.append(filename) 
        
        for p in coords:
          try: 
            self.status.set('processing point : {0}'.format(p), 20)
            # define the point:  
            p = p.split(',')
            point = Point(float(p[0]), float(p[1]))       
            
            # get the values
            timeseries = call(resource=ncs, geom=point, select_nearest=True)
            vals = get_values(timeseries)
            
            # concatination of values 
            header = header + ',%s-%s' % (p[0], p[1])
            concat_vals = column_stack([concat_vals, vals])
          except Exception as e: 
            logger.debug('failed for point %s %s' % (p , e))
        self.status.set('*** all points processed for {0} ****'.format(key), 50)
        savetxt(filename, concat_vals, fmt='%s', delimiter=',', header=header)
      except Exception as e: 
        logger.debug('failed for %s %s' % (key, e))

    ### set the outputs
    self.status.set('*** creating output tar archive ****',90) 
    tarout_file = archive(filenames)
    self.tarout.setValue( tarout_file )
Esempio n. 4
0
  def execute(self):
    from flyingpigeon.ocgis_module import call
    from flyingpigeon.utils import get_time, get_variable, sort_by_filename
    
    from datetime import datetime as dt
    from netCDF4 import Dataset
    from numpy import savetxt, column_stack, squeeze
    
    ncs = self.getInputValues(identifier='netcdf_file')
    logging.info("ncs: %s " % ncs) 
    coords = self.getInputValues(identifier='coords')
    logging.info("coords %s", coords)

 
    nc_exp = sort_by_filename(ncs) # dictionary {experiment:[files]}
    filenames = []
    
    (fp_tar, tarout_file) = tempfile.mkstemp(dir=".", suffix='.tar')
    tar = tarfile.open(tarout_file, "w")
    
    for key in nc_exp.keys():
      logging.info('start calculation for %s ' % key )
      ncs = nc_exp[key]
      nc = ncs[0]
      
      times = get_time(nc)
      var = get_variable(nc)
      
      concat_vals = [dt.strftime(t, format='%Y-%d-%m_%H:%M:%S') for t in times]
      header = 'date_time'
      filename = '%s.csv' % key
      filenames.append(filename) 
      
      for ugid, p in enumerate(coords, start=1):
        self.status.set('processing point : {0}'.format(p), 20)
        p = p.split(',')
        self.status.set('splited x and y coord : {0}'.format(p), 20)
        point = Point(float(p[0]), float(p[1]))
        
        #get the timeseries at gridpoint
        timeseries = call(resource=ncs, geom=point, select_nearest=True)
        
        ds = Dataset(timeseries)
        vals = squeeze(ds.variables[var])
        header = header + ',%s_%s' % (p[0], p[1])
        concat_vals = column_stack([concat_vals, vals])

      savetxt(filename, concat_vals, fmt='%s', delimiter=',', header=header)
      tar.add( filename )
      
    tar.close()
    self.tarout.setValue( tarout_file )
Esempio n. 5
0
def spaghetti(resouces, variable=None, title=None, dir_out=None):
  """
  retunes a png file containing the appropriate spaghetti plot. 
  
  :param resouces: list of files containing the same variable 
  :param variable: variable to be visualised, if None (default) variable will be detected
  :param title: sting to be used as title
  :param dir_out: directory for output files
  """

  fig = plt.figure(figsize=(20,10), dpi=600, facecolor='w', edgecolor='k')

  logger.debug('Start visualisation spagetti plot')
  
  # === prepare invironment
  if type(resouces) == str: 
    resouces = list([resouces])    
  if variable == None:
    variable = utils.get_variable(resouces[0])
  if title == None:
    title = "Field mean of %s " % variable
  if dir_out == None: 
    dir_out = os.curdir

  try: 
    o1 , output_png = mkstemp(dir=dir_out, suffix='.png')
    
    for c , nc in enumerate(resouces):
      # get timestapms
      try: 
        d =  utils.get_time(nc) # [datetime.strptime(elem, '%Y-%m-%d') for elem in strDate[0]]
        
        dt = [datetime.strptime(str(i), '%Y-%m-%d %H:%M:%S') for i in d ]
        ds=Dataset(nc)
        data = np.squeeze(ds.variables[variable][:])
        if len(data.shape) == 3: 
          meanData = np.mean(data,axis=1)
          ts = np.mean(meanData,axis=1)
        else: 
          ts = data
        plt.plot( dt,ts )
        #fig.line( dt,ts )
      except Exception as e:
        logger.debug('lineplot failed for %s: %s\n' % (nc, e))

      # plot into current figure
      # , legend= nc 
    
    #fig.legend()[0].orientation = "bottom_left"
    # fig.legend().orientation = "bottom_left"
    plt.title(title, fontsize=20)
    plt.grid()# .grid_line_alpha=0.3
    #lt.rcParams.update({'font.size': 22})
    #window_size = 30
    #window = np.ones(window_size)/float(window_size)
    fig.savefig(output_png)
    #bplt.hold('off')
    
    plt.close()
    
    logger.debug('timesseries spagetti plot done for %s with %s lines.'% (variable, c)) 
  except Exception as e:
    msg = 'matplotlib spagetti plot failed for %s' % variable
    logger.debug(msg)
    #raise Exception(msg) 
  return output_png 
    def execute(self):
        logger.info('Start process')
        from datetime import datetime as dt
        from flyingpigeon import weatherregimes as wr
        from tempfile import mkstemp
        
        ################################
        # reading in the input arguments
        ################################
        try: 
            resource = self.getInputValues(identifier='resource')
            url_Rdat = self.getInputValues(identifier='Rdat')[0]
            url_dat = self.getInputValues(identifier='dat')[0]
            url_ref_file = self.getInputValues(identifier='netCDF') # can be None
            season = self.getInputValues(identifier='season')[0]
            period = self.getInputValues(identifier='period')[0]            
            anualcycle = self.getInputValues(identifier='anualcycle')[0]
        except Exception as e: 
            logger.debug('failed to read in the arguments %s ' % e)
        
        try: 
            start = dt.strptime(period.split('-')[0] , '%Y%m%d')
            end = dt.strptime(period.split('-')[1] , '%Y%m%d')
            # kappa = int(self.getInputValues(identifier='kappa')[0])
            
            logger.info('period %s' % str(period))
            logger.info('season %s' % str(season))
            logger.info('read in the arguments')
            logger.info('url_ref_file: %s' % url_ref_file)
            logger.info('url_Rdat: %s' % url_Rdat)
            logger.info('url_dat: %s' % url_dat)
        except Exception as e: 
            logger.debug('failed to convert arguments %s ' % e)
           
        ############################
        # fetching trainging data 
        ############################
        
        from flyingpigeon.utils import download, get_time
        from os.path import abspath
        
        try:
          dat = abspath(download(url_dat))
          Rdat = abspath(download(url_Rdat))
          logger.info('training data fetched')
        except Exception as e:
          logger.error('failed to fetch training data %s' % e)
          
        ############################################################    
        ### get the required bbox and time region from resource data
        ############################################################        
        # from flyingpigeon.weatherregimes import get_level
        
        from flyingpigeon.ocgis_module import call 
        from flyingpigeon.utils import get_variable
        time_range = [start, end]

        variable = get_variable(resource)

        if len(url_ref_file) > 0:
            ref_file = download(url_ref_file[0])  
            model_subset = call(resource=resource, variable=variable, 
                time_range=time_range,  # conform_units_to=conform_units_to, geom=bbox, spatial_wrapping='wrap',
                regrid_destination=ref_file, regrid_options='bil')
            logger.info('Dataset subset with regridding done: %s ' % model_subset)
        else:
            model_subset = call(resource=resource, variable=variable, 
                time_range=time_range,  # conform_units_to=conform_units_to, geom=bbox, spatial_wrapping='wrap',
                )
            logger.info('Dataset time period extracted: %s ' % model_subset)
            
        
        ##############################################
        ### computing anomalies 
        ##############################################
        
        cycst = anualcycle.split('-')[0]
        cycen = anualcycle.split('-')[0]
        reference = [dt.strptime(cycst,'%Y%m%d'), dt.strptime(cycen,'%Y%m%d')]
        model_anomal = wr.get_anomalies(model_subset, reference=reference)

        #####################
        ### extracting season
        #####################
        model_season = wr.get_season(model_anomal, season=season)

        #######################
        ### call the R scripts
        #######################
        import shlex
        import subprocess
        from flyingpigeon import config
        from os.path import curdir, exists, join

        try:
          rworkspace = curdir
          Rsrc = config.Rsrc_dir() 
          Rfile = 'weatherregimes_projection.R'
          
          yr1 = start.year
          yr2 = end.year
          time = get_time(model_season, format='%Y%m%d')

          #ip, output_graphics = mkstemp(dir=curdir ,suffix='.pdf')
          ip, file_pca = mkstemp(dir=curdir ,suffix='.txt')
          ip, file_class = mkstemp(dir=curdir ,suffix='.Rdat')
          ip, output_frec = mkstemp(dir=curdir ,suffix='.txt')
                    
          args = ['Rscript', join(Rsrc,Rfile), '%s/' % curdir, 
                  '%s/' % Rsrc, 
                  '%s' % model_season, 
                  '%s' % variable,
                  '%s' % str(time).strip("[]").replace("'","").replace(" ",""),
            #      '%s' % output_graphics,
                  '%s' % dat, 
                  '%s' % Rdat, 
                  '%s' % file_pca,
                  '%s' % file_class, 
                  '%s' % output_frec,      
                  '%s' % season, 
                  '%s' % start.year, 
                  '%s' % end.year,                  
                  '%s' % 'MODEL']

          logger.info('Rcall builded')
        except Exception as e: 
          msg = 'failed to build the R command %s' % e
          logger.error(msg)  
          raise Exception(msg)
        try:
          output,error = subprocess.Popen(args, stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate() #, shell=True
          logger.info('R outlog info:\n %s ' % output)
          logger.debug('R outlog errors:\n %s ' % error)
          if len(output) > 0:            
            self.status.set('**** weatherregime in R suceeded', 90)
          else:
            logger.error('NO! output returned from R call')
        except Exception as e: 
          msg = 'weatherregime in R %s ' % e
          logger.error(msg)  
          raise Exception(msg)

        ############################################
        ### set the outputs
        ############################################

        #self.Routput_graphic.setValue( output_graphics )
        self.output_pca.setValue( file_pca )
        self.output_classification.setValue( file_class )
        self.output_netcdf.setValue( model_season )
        self.output_frequency.setValue( output_frec )
    def execute(self):
        logger.info('Start process')
      
        try: 
            logger.info('read in the arguments')
            resources = self.getInputValues(identifier='resources')
            method = self.getInputValues(identifier='method')
            time_region = self.getInputValues(identifier='time_region')[0]
            bbox = self.getInputValues(identifier='BBox')[0]
            
            logger.info('bbox %s' % str(bbox))
            logger.info('time_region %s' % str(time_region))
            logger.info('method: %s' % str(method))
            

        except Exception as e: 
            logger.error('failed to read in the arguments %s ' % e)
        
        #bbox = '-80,22.5,50,70'
        logger.info('bbox is set to %s' % bbox)     

        #####################    
        ### get the required bbox from resource
        #####################
        # from flyingpigeon.ocgis_module import call 
        
        from flyingpigeon.utils import sort_by_filename, get_time # , calc_grouping
        from flyingpigeon import weatherclass as wc
        from flyingpigeon.visualisation import plot_tSNE, plot_kMEAN, concat_images, plot_pressuremap
        
        from datetime import datetime as dt
        from numpy import savetxt, column_stack
        
        import tarfile
        
        from cdo import *
        cdo = Cdo()        
        
        # grouping = calc_grouping(time_region)
        ncs = sort_by_filename(resources, historical_concatination=True)

        png_clusters = []
        txt_info = []
        png_pressuremaps = []
        
        try:
          # open tar files
          tar_info = tarfile.open('info.tar', "w")
          logger.info('tar files prepared')
        except:
          msg = 'tar file preparation failed'
          logger.exception(msg)
          raise Exception(msg)

        
        for key in ncs.keys():
          if len(ncs[key])>1:
            input = cdo.timmerge(input=ncs[key], output='merge.nc' )
          elif len(ncs[key])==1:
            input = ncs[key]
          else:
            logger.debug('invalid number of input files for dataset %s' % key)            
 
          #for tr in time_region:
          if not time_region == 'None':
            nc_grouped = cdo.selmon(time_region, input=input, output='grouped.nc')
          else:
            nc_grouped = input 
          
      #     for bb in bbox:    
          nc  = cdo.sellonlatbox('%s' % bbox, input=nc_grouped, output='subset.nc')
          logger.info('nc subset: %s ' % nc)
          
          try:
            vals, pca = wc.get_pca(nc)
            logger.info('PCa calculated')
          except:
            logger.debug('failed to calculate PCs')
            raise
          
          for md in method:
            try:
              if md == 'tSNE':
                data = wc.calc_tSNE(pca)
                png_clusters.append(plot_tSNE(data,title='tSNE month: %s [lonlat: %s]' % (time_region,bbox), sub_title='file: %s' % key))
                logger.info('tSNE calculated for %s ' % key)
              if md == 'kMEAN':
                kmeans = wc.calc_kMEAN(pca)
                c = kmeans.predict(pca)
                times = get_time(nc)
                timestr = [dt.strftime(t, format='%Y-%d-%m_%H:%M:%S') for t in times]
                tc = column_stack([timestr, c])
                fn = '%s.txt' % key
                
                savetxt(fn, tc, fmt='%s', header='Date_Time WeatherRegime')

                tar_info.add(fn) #, arcname = basename(nc) 
                
                png_clusters.append(plot_kMEAN(kmeans, pca, title='kMEAN month: %s [lonlat: %s]' % (time_region,bbox), sub_title='file: %s' % key))
                logger.info('kMEAN calculated for %s ' % key)
                
                subplots = []
                for i in range(4): 
                    subplots.append(plot_pressuremap((vals[c==i]/100), title='Weather Regime %s: Month %s ' % (i, time_region), sub_title='file: %s' % key))

                
                from PIL import Image
                import sys
                from tempfile import mkstemp

                open_subplots = map(Image.open, subplots)
                w = max(i.size[0] for i in open_subplots)
                h = max(i.size[1] for i in open_subplots)
                
                result = Image.new("RGB", (w*2, h*2))
                # p = h / len(open_subplots)
                c = 0 
                for i ,iw in enumerate([0,w]):
                    for j, jh in enumerate([0,h]):
                        oi = open_subplots[c] 
                        c = c +1
                    
                        cw = oi.size[0]
                        ch = oi.size[1]

                        box = [iw,jh,iw+cw,jh+ch]
                        result.paste(oi, box=box)

                ip, pressuremap = mkstemp(dir='.',suffix='.png')
                result.save(pressuremap)
                png_pressuremaps.append(pressuremap)
                
            except:
              logger.debug('faild to calculate cluster for %s' % key )
              raise

        c_clusters = concat_images(png_clusters)
        c_maps = concat_images(png_pressuremaps)
        
              
        try:
          tar_info.close()  
          logger.info('tar files closed')
        except Exception as e:
          logger.exception('tar file closing failed')
    

        # call 
        # self.output_nc.setValue( nc )
        self.output_clusters.setValue( c_clusters  )
        self.output_maps.setValue( c_maps  )
        self.output_info.setValue('info.tar')
Esempio n. 8
0
def spaghetti(resouces, variable=None, title=None, dir_out=None):
  """
  creates a png file containing the appropriate spaghetti plot as a field mean of the values. 
  
  :param resouces: list of files containing the same variable 
  :param variable: variable to be visualised. If None (default), variable will be detected
  :param title: string to be used as title
  :param dir_out: directory for output files
  
  :retruns str: path to png file
  """
  
  try:
    fig = plt.figure(figsize=(20,10), dpi=600, facecolor='w', edgecolor='k')
    logger.debug('Start visualisation spaghetti plot')
    
    # === prepare invironment
    if type(resouces) != list: 
      resouces = [resouces]    
    if variable == None:
      variable = utils.get_variable(resouces[0])
    if title == None:
      title = "Field mean of %s " % variable
    if dir_out == None: 
      dir_out = os.curdir
    logger.info('plot values preparation done')  
  except Exception as e:
    msg = "plot values preparation failed: %s" % (e)
    logger.exception(msg)
    raise Exception(msg)

  try: 
    o1 , output_png = mkstemp(dir=dir_out, suffix='.png')
    
    for c , nc in enumerate(resouces):
      # get timestapms
      try: 
        d = utils.get_time(nc) # [datetime.strptime(elem, '%Y-%m-%d') for elem in strDate[0]]
        
        dt = [datetime.strptime(str(i), '%Y-%m-%d %H:%M:%S') for i in d ]
        ds=Dataset(nc)
        data = np.squeeze(ds.variables[variable][:])
        if len(data.shape) == 3: 
          meanData = np.mean(data,axis=1)
          ts = np.mean(meanData,axis=1)
        else: 
          ts = data[:]
        plt.plot( dt,ts )
        #fig.line( dt,ts )
      except Exception as e:
        msg = "lineplot failed for %s" % (nc)
        logger.exception(msg)
        raise Exception(msg)
      
    plt.title(title, fontsize=20)
    plt.grid()
    fig.savefig(output_png)
    plt.close()
    logger.info('timeseries spaghetti plot done for %s with %s lines.'% (variable, c)) 
  except Exception as e:
    msg = 'matplotlib spaghetti plot failed: %s' % e
    logger.exception(msg)
    raise Exception(msg) 
  return output_png 
Esempio n. 9
0
def calc_indice_percentile(resources=[], variable=None, 
    prefix=None, indices='TG90p', refperiod=None,
    groupings='yr', polygons=None, percentile=90, mosaik = False, 
    dir_output=None, dimension_map = None):
    """
    Calculates given indices for suitable files in the appopriate time grouping and polygon.

    :param resource: list of filenames in drs convention (netcdf)
    :param variable: variable name to be selected in the in netcdf file (default=None)
    :param indices: list of indices (default ='TG90p')
    :param prefix: filename prefix 
    :param refperiod: reference refperiod touple = (start,end)
    :param grouping: indices time aggregation (default='yr')
    :param dir_output: output directory for result file (netcdf)
    :param dimension_map: optional dimension map if different to standard (default=None)

    :return: list of netcdf files with calculated indices. Files are saved into out_dir
    """
    from os.path import join, dirname, exists
    from os import remove
    import uuid
    from numpy import ma 
    from datetime import datetime as dt

    from flyingpigeon.ocgis_module import call
    from flyingpigeon.subset import clipping
    from flyingpigeon.utils import get_values, get_time
    
    if type(resources) != list: 
      resources = list([resources])
    if type(indices) != list: 
      indices = list([indices])
      
    if type(groupings) != list: 
      groupings = list([groupings])
      
    if type(refperiod) == list: 
      refperiod = refperiod[0]
      
    if refperiod != None:
      start = dt.strptime(refperiod.split('-')[0] , '%Y%m%d')
      end = dt.strptime(refperiod.split('-')[1] , '%Y%m%d')
      time_range = [start, end]
    else:  
      time_range = None
    
    if dir_output != None:
      if not exists(dir_output): 
        makedirs(dir_output)
    
    ########################################################################################################################
    # Compute a custom percentile basis using ICCLIM. ######################################################################
    ########################################################################################################################

    from ocgis.contrib import library_icclim  as lic 
    nc_indices = []
    nc_dic = sort_by_filename(resources)
    
    for grouping in groupings:
      calc_group = calc_grouping(grouping)
      for key in nc_dic.keys():
        resource = nc_dic[key]
        if variable == None: 
          variable = get_variable(resource)
        if polygons == None:
          nc_reference = call(resource=resource, 
            prefix=str(uuid.uuid4()), 
            time_range=time_range,
            output_format='nc', 
            dir_output=dir_output)
        else:
          nc_reference = clipping(resource=resource, 
            prefix=str(uuid.uuid4()),
            time_range=time_range, 
            output_format='nc', 
            polygons=polygons,
            dir_output=dir_output, 
            mosaik = mosaik)
          
        arr = get_values(nc_files=nc_reference)
        dt_arr = get_time(nc_files=nc_reference)
        arr = ma.masked_array(arr)
        dt_arr = ma.masked_array(dt_arr)
        percentile = percentile
        window_width = 5
        
        for indice in indices:
          name = indice.replace('_', str(percentile))
          var = indice.split('_')[0]

          operation = None
          if 'T' in var: 
            if percentile >= 50: 
              operation = 'Icclim%s90p' % var
              func = 'icclim_%s90p' % var # icclim_TG90p
            else: 
              operation = 'Icclim%s10p' % var
              func = 'icclim_%s10p' % var
              
          ################################
          # load the appropriate operation
          ################################

          ops = [op for op in dir(lic) if operation in op]
          if len(ops) == 0:
              raise Exception("operator does not exist %s", operation)
          
          exec "percentile_dict = lic.%s.get_percentile_dict(arr, dt_arr, percentile, window_width)" % ops[0]
          calc = [{'func': func, 'name': name, 'kwds': {'percentile_dict': percentile_dict}}]
          
          if polygons == None:
            nc_indices.append(call(resource=resource, 
                                prefix=key.replace(variable,name).replace('_day_', '_%s_' % grouping), 
                                calc=calc, 
                                calc_grouping=calc_group, 
                                output_format='nc',
                                dir_output=dir_output))
          else: 
            nc_indices.extend(clipping(resource=resource, 
                                prefix=key.replace(variable,name).replace('_day_', '_%s_' % grouping), 
                                calc=calc, 
                                calc_grouping=calc_group, 
                                output_format='nc',
                                dir_output=dir_output,
                                polygons=polygons, 
                                mosaik = mosaik,
                                ))
    return nc_indices
Esempio n. 10
0
    def execute(self):
        logger.info('Start process')
        from datetime import datetime as dt
        from flyingpigeon import weatherregimes as wr
        from tempfile import mkstemp

        ################################
        # reading in the input arguments
        ################################
        try:
            resource = self.getInputValues(identifier='resource')
            url_Rdat = self.getInputValues(identifier='Rdat')[0]
            url_dat = self.getInputValues(identifier='dat')[0]
            url_ref_file = self.getInputValues(
                identifier='netCDF')  # can be None
            season = self.getInputValues(identifier='season')[0]
            period = self.getInputValues(identifier='period')[0]
            anualcycle = self.getInputValues(identifier='anualcycle')[0]
        except Exception as e:
            logger.debug('failed to read in the arguments %s ' % e)

        try:
            start = dt.strptime(period.split('-')[0], '%Y%m%d')
            end = dt.strptime(period.split('-')[1], '%Y%m%d')
            # kappa = int(self.getInputValues(identifier='kappa')[0])

            logger.info('period %s' % str(period))
            logger.info('season %s' % str(season))
            logger.info('read in the arguments')
            logger.info('url_ref_file: %s' % url_ref_file)
            logger.info('url_Rdat: %s' % url_Rdat)
            logger.info('url_dat: %s' % url_dat)
        except Exception as e:
            logger.debug('failed to convert arguments %s ' % e)

        ############################
        # fetching trainging data
        ############################

        from flyingpigeon.utils import download, get_time
        from os.path import abspath

        try:
            dat = abspath(download(url_dat))
            Rdat = abspath(download(url_Rdat))
            logger.info('training data fetched')
        except Exception as e:
            logger.error('failed to fetch training data %s' % e)

        ############################################################
        ### get the required bbox and time region from resource data
        ############################################################
        # from flyingpigeon.weatherregimes import get_level

        from flyingpigeon.ocgis_module import call
        from flyingpigeon.utils import get_variable
        time_range = [start, end]

        variable = get_variable(resource)

        if len(url_ref_file) > 0:
            ref_file = download(url_ref_file[0])
            model_subset = call(
                resource=resource,
                variable=variable,
                time_range=
                time_range,  # conform_units_to=conform_units_to, geom=bbox, spatial_wrapping='wrap',
                regrid_destination=ref_file,
                regrid_options='bil')
            logger.info('Dataset subset with regridding done: %s ' %
                        model_subset)
        else:
            model_subset = call(
                resource=resource,
                variable=variable,
                time_range=
                time_range,  # conform_units_to=conform_units_to, geom=bbox, spatial_wrapping='wrap',
            )
            logger.info('Dataset time period extracted: %s ' % model_subset)

        ##############################################
        ### computing anomalies
        ##############################################

        cycst = anualcycle.split('-')[0]
        cycen = anualcycle.split('-')[0]
        reference = [
            dt.strptime(cycst, '%Y%m%d'),
            dt.strptime(cycen, '%Y%m%d')
        ]
        model_anomal = wr.get_anomalies(model_subset, reference=reference)

        #####################
        ### extracting season
        #####################
        model_season = wr.get_season(model_anomal, season=season)

        #######################
        ### call the R scripts
        #######################
        import shlex
        import subprocess
        from flyingpigeon import config
        from os.path import curdir, exists, join

        try:
            rworkspace = curdir
            Rsrc = config.Rsrc_dir()
            Rfile = 'weatherregimes_projection.R'

            yr1 = start.year
            yr2 = end.year
            time = get_time(model_season, format='%Y%m%d')

            #ip, output_graphics = mkstemp(dir=curdir ,suffix='.pdf')
            ip, file_pca = mkstemp(dir=curdir, suffix='.txt')
            ip, file_class = mkstemp(dir=curdir, suffix='.Rdat')
            ip, output_frec = mkstemp(dir=curdir, suffix='.txt')

            args = [
                'Rscript',
                join(Rsrc, Rfile),
                '%s/' % curdir,
                '%s/' % Rsrc,
                '%s' % model_season,
                '%s' % variable,
                '%s' % str(time).strip("[]").replace("'", "").replace(" ", ""),
                #      '%s' % output_graphics,
                '%s' % dat,
                '%s' % Rdat,
                '%s' % file_pca,
                '%s' % file_class,
                '%s' % output_frec,
                '%s' % season,
                '%s' % start.year,
                '%s' % end.year,
                '%s' % 'MODEL'
            ]

            logger.info('Rcall builded')
        except Exception as e:
            msg = 'failed to build the R command %s' % e
            logger.error(msg)
            raise Exception(msg)
        try:
            output, error = subprocess.Popen(
                args, stdout=subprocess.PIPE,
                stderr=subprocess.PIPE).communicate()  #, shell=True
            logger.info('R outlog info:\n %s ' % output)
            logger.debug('R outlog errors:\n %s ' % error)
            if len(output) > 0:
                self.status.set('**** weatherregime in R suceeded', 90)
            else:
                logger.error('NO! output returned from R call')
        except Exception as e:
            msg = 'weatherregime in R %s ' % e
            logger.error(msg)
            raise Exception(msg)

        ############################################
        ### set the outputs
        ############################################

        #self.Routput_graphic.setValue( output_graphics )
        self.output_pca.setValue(file_pca)
        self.output_classification.setValue(file_class)
        self.output_netcdf.setValue(model_season)
        self.output_frequency.setValue(output_frec)
Esempio n. 11
0
def spaghetti(resouces, variable=None, title=None, dir_out=None):
    """
    creates a png file containing the appropriate spaghetti plot as a field mean of the values.

    :param resouces: list of files containing the same variable
    :param variable: variable to be visualised. If None (default), variable will be detected
    :param title: string to be used as title
    :param dir_out: directory for output files

    :retruns str: path to png file
    """

    try:
        fig = plt.figure(figsize=(20, 10),
                         dpi=600,
                         facecolor='w',
                         edgecolor='k')
        logger.debug('Start visualisation spaghetti plot')

        # === prepare invironment
        if type(resouces) != list:
            resouces = [resouces]
        if variable is None:
            variable = utils.get_variable(resouces[0])
        if title is None:
            title = "Field mean of %s " % variable
        if dir_out is None:
            dir_out = os.curdir
        logger.info('plot values preparation done')
    except:
        msg = "plot values preparation failed"
        logger.exception(msg)
        raise Exception(msg)
    try:
        o1, output_png = mkstemp(dir=dir_out, suffix='.png')
        for c, nc in enumerate(resouces):
            # get timestapms
            try:
                d = utils.get_time(
                    nc
                )  # [datetime.strptime(elem, '%Y-%m-%d') for elem in strDate[0]]
                dt = [
                    datetime.strptime(str(i), '%Y-%m-%d %H:%M:%S') for i in d
                ]
                ds = Dataset(nc)
                data = np.squeeze(ds.variables[variable][:])
                if len(data.shape) == 3:
                    meanData = np.mean(data, axis=1)
                    ts = np.mean(meanData, axis=1)
                else:
                    ts = data[:]
                plt.plot(dt, ts)
                # fig.line( dt,ts )
            except:
                msg = "spaghetti plot failed for"
                logger.exception(msg)
                raise Exception(msg)

        plt.title(title, fontsize=20)
        plt.grid()
        fig.savefig(output_png)
        plt.close()
        logger.info('timeseries spaghetti plot done for %s with %s lines.' %
                    (variable, c))
    except:
        msg = 'matplotlib spaghetti plot failed'
        logger.exception(msg)
        raise Exception(msg)
    return output_png
Esempio n. 12
0
from os import listdir
from os.path import join
from flyingpigeon import utils
from flyingpigeon import metadata as md
from pandas import DataFrame
from flyingpigeon import calculation as cal

p = '/home/nils/data/AFR-44/tas/'
ncs = [
    join(p, nc) for nc in listdir(p)
    if not 'tas_AFR-44_MOHC-HadGEM2-ES_historical_r1i1p1_KNMI-RACMO22T_v2_day'
    in nc
]
ncs_dic = utils.sort_by_filename(ncs)

ts = utils.get_time(ncs_dic[ncs_dic.keys()[0]])
data = cal.fieldmean(ncs_dic[ncs_dic.keys()[0]])
Esempio n. 13
0
def method_A(resource=[], start=None, end=None, timeslice=20, 
  variable=None, title=None, cmap='seismic' ):
  """returns the result
  
  :param resource: list of paths to netCDF files
  :param start: beginning of reference period (if None (default), the first year of the consistent ensemble will be detected)
  :param end: end of comparison period (if None (default), the last year of the consistent ensemble will be detected)
  :param timeslice: period length for mean calculation of reference and comparison period
  :param variable: variable name to be detected in the netCDF file. If not set (not recommended), the variable name will be detected
  :param title: str to be used as title for the signal mal
  :param cmap: define the color scheme for signal map plotting 

  :return: signal.nc, low_agreement_mask.nc, high_agreement_mask.nc, graphic.png, text.txt
  """
  from os.path import split
  from cdo import Cdo
  cdo = Cdo()
  cdo.forceOutput = True 
  
  try: 
    # preparing the resource
#    from flyingpigeon.ocgis_module import call
    file_dic = sort_by_filename(resource, historical_concatination = True)
    #print file_dic
    logger.info('file names sorted experimets: %s' % len(file_dic.keys()))
  except Exception as e:
    msg = 'failed to sort the input files'
    logger.exception(msg)
    raise Exception(msg)
  

  try:
    mergefiles = []
    for key in file_dic.keys():
      
      if type(file_dic[key]) == list and len(file_dic[key]) > 1:
        input = []
        for i in file_dic[key]:
          print i 
          input.extend([i.replace(' ','\\\ ')])
        mergefiles.append(cdo.mergetime(input=input, output=key+'_mergetime.nc'))
      else:
        mergefiles.extend(file_dic[key])
#      files.append(cdo.selyear('%s/%s' % (start1,end2), input = tmpfile , output =  key+'.nc' )) #python version
    logger.info('datasets merged %s ' % mergefiles)
  except Exception as e:
    msg = 'seltime and mergetime failed %s' % e
    logger.exception(msg)
    raise Exception(e)    
  
  try: 
    text_src = open('infiles.txt', 'a')
    for key in file_dic.keys():
      text_src.write(key + '\n')
    text_src.close()
  except Exception as e:
    msg = 'failed to write source textfile'
    logger.exception(msg)
    raise Exception(msg)
    
  # configure reference and compare period
  try: 
    if start == None:
      st_set = set()
      en_set = set()
      for f in mergefiles:
        print f
        times = get_time(f)
        st_set.update([times[0].year])
        if end == None: 
          en_set.update([times[-1].year])
      start = max(st_set)
      if end == None:
        end = min(en_set)
    logger.info('Start and End: %s - %s ' % (start, end))
    if start >= end: 
      logger.error('ensemble is inconsistent!!! start year is later than end year')
  except Exception as e:
    msg = 'failed to detect start and end times of the ensemble'
    logger.exception(msg)
    raise Exception(msg)

  # set the periodes: 
  try: 
    start = int(start)
    end = int(end)
    if timeslice == None: 
      timeslice = int((end - start) / 3)
      if timeslice == 0: 
        timeslice = 1
    else: 
      timeslice = int(timeslice)
    start1 = start
    start2 = start1 + timeslice - 1 
    end1 = end - timeslice + 1
    end2 = end
    logger.info('timeslice and periodes set')
  except Exception as e:
    msg = 'failed to set the periodes'
    logger.exception(msg)
    raise Exception(msg)

  try:
    files = []
    for i, mf in enumerate(mergefiles):
      files.append(cdo.selyear('{0}/{1}'.format(start1,end2), input=[mf.replace(' ','\ ')] , output='file_{0}_.nc'.format(i) )) #python version
    logger.info('timeseries selected from defined start to end year')
  except Exception as e:
    msg = 'seltime and mergetime failed'
    logger.exception(msg)
    raise Exception(msg)    

  try: 
    # ensemble mean 
    nc_ensmean = cdo.ensmean(input=files , output='nc_ensmean.nc')
    logger.info('ensemble mean calculation done')
  except Exception as e:
    msg = 'ensemble mean failed'
    logger.exception(msg)
    raise Exception(msg)
  
  try: 
    # ensemble std 
    nc_ensstd  = cdo.ensstd(input=files , output='nc_ensstd.nc')
    logger.info('ensemble std and calculation done')
  except Exception as e:
    msg = 'ensemble std or failed'
    logger.exception(msg)
    raise Exception(msg)
  
  # get the get the signal as difference from the beginning (first years) and end period (last years), :
  try:
    selyearstart = cdo.selyear('%s/%s' % (start1,start2), input = nc_ensmean, output = 'selyearstart.nc' ) 
    selyearend = cdo.selyear('%s/%s' % (end1,end2), input = nc_ensmean, output = 'selyearend.nc' )
    meanyearst = cdo.timmean(input = selyearstart, output= 'meanyearst.nc')
    meanyearend = cdo.timmean(input = selyearend, output= 'meanyearend.nc')
    signal = cdo.sub(input=[meanyearend, meanyearst], output = 'signal.nc')
    logger.info('Signal calculation done')
  except Exception as e:
    msg = 'calculation of signal failed'
    logger.exception(msg)
    raise Exception(msg)
  
  # get the intermodel standard deviation (mean over whole period)
  try:
    #std_selyear = cdo.selyear('%s/%s' % (end1,end2), input=nc_ensstd, output='std_selyear.nc')
    #std = cdo.timmean(input = std_selyear, output = 'std.nc')
    
    std = cdo.timmean(input = nc_ensstd, output = 'std.nc')
    std2 = cdo.mulc('2', input = std, output = 'std2.nc')
    logger.info('calculation of internal model std for time period done')
  except Exception as e:
    msg = 'calculation of internal model std failed'
    logger.exception(msg) 
    raise Exception(msg)
  try:
    absolut = cdo.abs(input=signal, output='absolut_signal.nc')
    high_agreement_mask = cdo.gt(input=[absolut,std2],  output= 'large_change_with_high_model_agreement.nc')
    low_agreement_mask = cdo.lt(input=[absolut,std], output= 'small_signal_or_low_agreement_of_models.nc')
    logger.info('high and low mask done')
  except Exception as e:
    msg = 'calculation of robustness mask failed'
    logger.exception(msg)
    raise Exception(msg)
  
  try: 
    if variable == None: 
      variable = get_variable(signal)
    logger.info('variable to be plotted: %s' % variable)
    
    if title == None: 
      title='Change of %s (difference of mean %s-%s to %s-%s)' % (variable, end1, end2, start1, start2)  
    
    graphic = None
    graphic = map_ensembleRobustness(signal, high_agreement_mask, low_agreement_mask, 
              variable=variable, 
              cmap=cmap,
              title = title)
    
    logger.info('graphic generated')
  except Exception as e:
    msg('graphic generation failed: %s' % e)
    logger.debug(msg)
    raise Exception(msg)

  return signal, low_agreement_mask, high_agreement_mask, graphic, text_src # 
Esempio n. 14
0
def set_dynamic_md(resource):
    """
  Dynamic meta data like time frequency, spatial extent, start/end time, etc.
  :param resource: netCDF file where basic meta data should be set
  """
    from flyingpigeon.utils import get_timerange, get_time
    frequency = get_frequency(resource)

    time_coverage_start, time_coverage_end = get_timerange(resource)
    time_number_steps = len(get_time(resource))

    # max_lat, min_lat, max_lon, min_lat = get_extent(resource)

    ds = Dataset(resource, mode='a')

    try:
        driving_experiment = ds.driving_experiment
        ds.delncattr('driving_experiment')
    except Exception as e:
        LOGGER.error(e)
        driving_experiment = ''

    try:
        driving_experiment_name = ds.driving_experiment_name
        ds.delncattr('driving_experiment_name')
    except Exception as e:
        LOGGER.error(e)
        driving_experiment_name = ''

    try:
        driving_model_ensemble_member = ds.driving_model_ensemble_member
        ds.delncattr('driving_model_ensemble_member')
    except Exception as e:
        LOGGER.error(e)
        driving_model_ensemble_member = ''

    try:
        experiment = ds.experiment
        ds.delncattr('experiment')
    except Exception as e:
        LOGGER.error(e)
        experiment = ''
    try:
        tracking_id = ds.tracking_id
        ds.delncattr('tracking_id')
    except Exception as e:
        LOGGER.error(e)
        tracking_id = ''

    try:
        experiment_id = ds.experiment_id
        ds.delncattr('experiment_id')
    except Exception as e:
        LOGGER.error(e)
        experiment_id = ''

    try:
        project_id = ds.project_id
        ds.delncattr('project_id')
    except Exception as e:
        LOGGER.error(e)
        project_id = ''

    try:
        institution_id = ds.institution_id
        ds.delncattr('institution_id')
    except Exception as e:
        LOGGER.error(e)
        institution_id = ''

    try:
        model_version_id = ds.model_version_id
        ds.delncattr('model_version_id')
    except Exception as e:
        LOGGER.error(e)
        model_version_id = ''

    try:
        driving_model_id = ds.driving_model_id
        ds.delncattr('driving_model_id')
    except Exception as e:
        LOGGER.error(e)
        driving_model_id = ''

    try:
        driving_ensemble_member = ds.driving_ensemble_member
        ds.delncattr('driving_ensemble_member')
    except Exception as e:
        LOGGER.error(e)
        driving_ensemble_member = ''

    try:
        driving_model_id = ds.driving_model_id
        ds.delncattr('driving_model_id')
    except Exception as e:
        LOGGER.error(e)
        driving_model_id = ''

    try:
        model_id = ds.model_id
        ds.delncattr('model_id')
    except Exception as e:
        LOGGER.error(e)
        driving_model_id = ''

    try:
        contact = ds.contact
        ds.delncattr('contact')
    except Exception as e:
        LOGGER.error(e)
        contact = ''
    try:
        driving_experiment_id = ds.driving_experiment_id
        ds.delncattr('driving_experiment_id')
    except Exception as e:
        LOGGER.error(e)
        driving_experiment_id = ''

    try:
        domain = ds.CORDEX_domain
    except Exception as e:
        LOGGER.error(e)
        domain = ''
    ds.close()

    min_lat, max_lat, min_lon, max_lon = get_extent(resource)
    geospatial_increment = get_geospatial_increment(resource)

    try:
        md_dynamic = {
            'in_var_driving_experiment': driving_experiment,
            'in_var_driving_experiment_name': driving_experiment_name,
            'in_var_driving_model_ensemble_member':
            driving_model_ensemble_member,
            'in_var_experiment': experiment,
            'in_var_experiment_id': experiment_id,
            'in_var_project_id': project_id,
            'in_var_contact': contact,
            'in_var_institution_id': institution_id,
            'in_var_model_version_id': model_version_id,
            'in_var_driving_model_id': driving_model_id,
            'in_var_model_id': model_id,
            'in_var_driving_ensemble_member': driving_ensemble_member,
            'in_var_driving_experiment_id': driving_experiment_id,
            'in_var_domain': domain,
            'in_var_tracking_id': tracking_id,
            'frequency': frequency,
            'time_coverage_start': time_coverage_start,
            'time_coverage_end': time_coverage_end,
            'time_number_steps': time_number_steps,
            #'time_number_gaps': '',
            #'cdm_datatype':'' ,
            'domain': '%s_subset' % domain,
            'geospatial_increment': geospatial_increment,
            'geospatial_lat_min': min_lat,
            'geospatial_lat_max': max_lat,
            'geospatial_lon_min': min_lon,
            'geospatial_lon_max': max_lon,
        }

        #:product = "output" ;
        #:rcm_version_id = "v1" ;
        #:references = "http://www.smhi.se/en/Research/Research-departments/climate-research-rossby-centre" ;

    except Exception as e:
        LOGGER.error('failed to populate dynamic metadata dictionary')

    try:
        ds = Dataset(resource, mode='a')
        ds.setncatts(md_dynamic)
        ds.close()
    except Exception as e:
        LOGGER.error(e)

    return (resource)
Esempio n. 15
0
def calc_indice_percentile(resources=[], variable=None, 
    prefix=None, indices='TG90p', refperiod=None,
    groupings='yr', polygons=None, percentile=90, mosaic = False, 
    dir_output=None, dimension_map = None):
    """
    Calculates given indices for suitable files in the appropriate time grouping and polygon.

    :param resource: list of filenames in data reference syntax (DRS) convention (netcdf)
    :param variable: variable name to be selected in the in netcdf file (default=None)
    :param indices: list of indices (default ='TG90p')
    :param prefix: filename prefix 
    :param refperiod: reference period tuple = (start,end)
    :param grouping: indices time aggregation (default='yr')
    :param dir_output: output directory for result file (netcdf)
    :param dimension_map: optional dimension map if different to standard (default=None)

    :return: list of netcdf files with calculated indices. Files are saved into out_dir.
    """
    from os.path import join, dirname, exists
    from os import remove
    import uuid
    from numpy import ma 
    from datetime import datetime as dt

    from flyingpigeon.ocgis_module import call
    from flyingpigeon.subset import clipping
    from flyingpigeon.utils import get_values, get_time
    
    if type(resources) != list: 
      resources = list([resources])
    if type(indices) != list: 
      indices = list([indices])
      
    if type(groupings) != list: 
      groupings = list([groupings])
      
    if type(refperiod) == list: 
      refperiod = refperiod[0]
      
    if refperiod != None:
      start = dt.strptime(refperiod.split('-')[0] , '%Y%m%d')
      end = dt.strptime(refperiod.split('-')[1] , '%Y%m%d')
      time_range = [start, end]
    else:  
      time_range = None
    
    if dir_output != None:
      if not exists(dir_output): 
        makedirs(dir_output)
    
    ########################################################################################################################
    # Compute a custom percentile basis using ICCLIM. ######################################################################
    ########################################################################################################################

    from ocgis.contrib import library_icclim  as lic 
    nc_indices = []
    nc_dic = sort_by_filename(resources)
    
    for grouping in groupings:
      calc_group = calc_grouping(grouping)
      for key in nc_dic.keys():
        resource = nc_dic[key]
        if variable == None: 
          variable = get_variable(resource)
        if polygons == None:
          nc_reference = call(resource=resource, 
            prefix=str(uuid.uuid4()), 
            time_range=time_range,
            output_format='nc', 
            dir_output=dir_output)
        else:
          nc_reference = clipping(resource=resource, 
            prefix=str(uuid.uuid4()),
            time_range=time_range, 
            output_format='nc', 
            polygons=polygons,
            dir_output=dir_output, 
            mosaic = mosaic)
          
        arr = get_values(resource=nc_reference)
        dt_arr = get_time(resource=nc_reference)
        arr = ma.masked_array(arr)
        dt_arr = ma.masked_array(dt_arr)
        percentile = percentile
        window_width = 5
        
        for indice in indices:
          name = indice.replace('_', str(percentile))
          var = indice.split('_')[0]

          operation = None
          if 'T' in var: 
            if percentile >= 50: 
              operation = 'Icclim%s90p' % var
              func = 'icclim_%s90p' % var # icclim_TG90p
            else: 
              operation = 'Icclim%s10p' % var
              func = 'icclim_%s10p' % var
              
          ################################
          # load the appropriate operation
          ################################

          ops = [op for op in dir(lic) if operation in op]
          if len(ops) == 0:
              raise Exception("operator does not exist %s", operation)
          
          exec "percentile_dict = lic.%s.get_percentile_dict(arr, dt_arr, percentile, window_width)" % ops[0]
          calc = [{'func': func, 'name': name, 'kwds': {'percentile_dict': percentile_dict}}]
          
          if polygons == None:
            nc_indices.append(call(resource=resource, 
                                prefix=key.replace(variable,name).replace('_day_', '_%s_' % grouping), 
                                calc=calc, 
                                calc_grouping=calc_group, 
                                output_format='nc',
                                dir_output=dir_output))
          else: 
            nc_indices.extend(clipping(resource=resource, 
                                prefix=key.replace(variable,name).replace('_day_', '_%s_' % grouping), 
                                calc=calc, 
                                calc_grouping=calc_group, 
                                output_format='nc',
                                dir_output=dir_output,
                                polygons=polygons, 
                                mosaic = mosaic,
                                ))
    return nc_indices

#def calc_indice_unconventional(resource=[], variable=None, prefix=None,
  #indices=None, polygons=None,  groupings=None, 
  #dir_output=None, dimension_map = None):
    #"""
    #Calculates given indices for suitable files in the appropriate time grouping and polygon.

    #:param resource: list of filenames in data reference syntax (DRS) convention (netcdf)
    #:param variable: variable name to be selected in the in netcdf file (default=None)
    #:param indices: list of indices (default ='TGx')
    #:param polygons: list of polygons (default =None)
    #:param grouping: indices time aggregation (default='yr')
    #:param out_dir: output directory for result file (netcdf)
    #:param dimension_map: optional dimension map if different to standard (default=None)

    #:return: list of netcdf files with calculated indices. Files are saved into dir_output
    #"""
    
    #from os.path import join, dirname, exists
    #from os import remove
    #import uuid
    #from flyingpigeon import ocgis_module
    #from flyingpigeon.subset import get_ugid, get_geom

    #if type(resource) != list: 
      #resource = list([resource])
    #if type(indices) != list: 
      #indices = list([indices])
    #if type(polygons) != list and polygons != None:
      #polygons = list([polygons])
    #elif polygons == None:
      #polygons = [None]
    #else: 
      #logger.error('Polygons not found')
    #if type(groupings) != list:
      #groupings = list([groupings])
    
    #if dir_output != None:
      #if not exists(dir_output): 
        #makedirs(dir_output)
    
    #experiments = sort_by_filename(resource)
    #outputs = []

    #print('environment for calc_indice_unconventional set')
    #logger.info('environment for calc_indice_unconventional set')
    
    #for key in experiments:
      #if variable == None:
        #variable = get_variable(experiments[key][0])
      #try: 
        #ncs = experiments[key]
        #for indice in indices:
          #logger.info('indice: %s' % indice)
          #try: 
            #for grouping in groupings:
              #logger.info('grouping: %s' % grouping)
              #try:
                #calc_group = calc_grouping(grouping)
                #logger.info('calc_group: %s' % calc_group)
                #for polygon in polygons:  
                  #try:
                    #domain = key.split('_')[1].split('-')[0]
                    #if polygon == None:
                      #if prefix == None: 
                        #prefix = key.replace(variable, indice).replace('_day_','_%s_' % grouping )
                      #geom = None
                      #ugid = None
                    #else:
                      #if prefix == None: 
                        #prefix = key.replace(variable, indice).replace('_day_','_%s_' % grouping ).replace(domain,polygon)
                      #geom = get_geom(polygon=polygon)
                      #ugid = get_ugid(polygons=polygon, geom=geom)
                    #if indice == 'TGx':
                      #calc=[{'func': 'max', 'name': 'TGx'}]
                      #tmp = ocgis_module.call(resource=ncs,# conform_units_to='celcius',
                                              #variable=variable, dimension_map=dimension_map, 
                                              #calc=calc, calc_grouping=calc_group, prefix=prefix,
                                              #dir_output=dir_output, geom=geom, select_ugid=ugid)
                    #elif indice == 'TGn':
                      #calc=[{'func': 'min', 'name': 'TGn'}]
                      #tmp = ocgis_module.call(resource=ncs, #conform_units_to='celcius',
                                              #variable=variable, dimension_map=dimension_map, 
                                              #calc=calc, calc_grouping= calc_group, prefix=prefix,
                                               #dir_output=dir_output, geom=geom, select_ugid = ugid)
                    #elif indice == 'TGx5day':
                      #calc = [{'func': 'moving_window', 'name': 'TGx5day', 'kwds': {'k': 5, 'operation': 'mean', 'mode': 'same' }}]
                      #tmp2 = ocgis_module.call(resource=ncs, #conform_units_to='celcius',
                                              #variable=variable, dimension_map=dimension_map, 
                                              #calc=calc, prefix=str(uuid.uuid4()),
                                              #geom=geom, select_ugid = ugid)
                      #calc=[{'func': 'max', 'name': 'TGx5day'}]
                      #logger.info('moving window calculated : %s' % tmp2)
                      #tmp = ocgis_module.call(resource=tmp2,
                                              #variable=indice, dimension_map=dimension_map, 
                                              #calc=calc, calc_grouping=calc_group, prefix=prefix,
                                              #dir_output=dir_output)
                      #remove(tmp2)
                    #elif indice == 'TGn5day':
                      #calc = [{'func': 'moving_window', 'name': 'TGn5day', 'kwds': {'k': 5, 'operation': 'mean', 'mode': 'same' }}]
                      #tmp2 = ocgis_module.call(resource=ncs, #conform_units_to='celcius',
                                              #variable=variable, dimension_map=dimension_map, 
                                              #calc=calc, prefix=str(uuid.uuid4()),
                                              #geom=geom, select_ugid = ugid)
                      #calc=[{'func': 'min', 'name': 'TGn5day'}]
                      
                      #logger.info('moving window calculated : %s' % tmp2)
                      
                      #tmp = ocgis_module.call(resource=tmp2,
                                              #variable=indice, dimension_map=dimension_map, 
                                              #calc=calc, calc_grouping=calc_group, prefix=prefix,
                                              #dir_output=dir_output)
                      #remove(tmp2)
                    #else: 
                      #logger.error('Indice %s is not a known inidce' % (indice))
                    #outputs.append(tmp)
                    #logger.info('indice file calcualted %s ' % (tmp))
                  #except Exception as e:
                    #logger.debug('could not calc indice %s for key %s, polygon %s and calc_grouping %s : %s' %  (indice, key, polygon, grouping, e ))
              #except Exception as e:
                #logger.debug('could not calc indice %s for key %s and calc_grouping %s : %s' % ( indice, key, polygon, e ))
          #except Exception as e:
            #logger.debug('could not calc indice %s for key %s: %s'%  (indice, key, e ))
      #except Exception as e:
        #logger.debug('could not calc key %s: %s' % (key, e))
    #return outputs
Esempio n. 16
0
def calc_indice_percentile(resources=[],
                           variable=None,
                           prefix=None,
                           indices='TG90p',
                           refperiod=None,
                           groupings='yr',
                           polygons=None,
                           percentile=90,
                           mosaic=False,
                           dir_output=None,
                           dimension_map=None):
    """
    Calculates given indices for suitable files in the appropriate time grouping and polygon.

    :param resource: list of filenames in data reference syntax (DRS) convention (netcdf)
    :param variable: variable name to be selected in the in netcdf file (default=None)
    :param indices: list of indices (default ='TG90p')
    :param prefix: filename prefix 
    :param refperiod: reference period tuple = (start,end)
    :param grouping: indices time aggregation (default='yr')
    :param dir_output: output directory for result file (netcdf)
    :param dimension_map: optional dimension map if different to standard (default=None)

    :return: list of netcdf files with calculated indices. Files are saved into out_dir.
    """
    from os.path import join, dirname, exists
    from os import remove
    import uuid
    from numpy import ma
    from datetime import datetime as dt

    from flyingpigeon.ocgis_module import call
    from flyingpigeon.subset import clipping
    from flyingpigeon.utils import get_values, get_time

    if type(resources) != list:
        resources = list([resources])
    if type(indices) != list:
        indices = list([indices])

    if type(groupings) != list:
        groupings = list([groupings])

    if type(refperiod) == list:
        refperiod = refperiod[0]

    if refperiod != None:
        start = dt.strptime(refperiod.split('-')[0], '%Y%m%d')
        end = dt.strptime(refperiod.split('-')[1], '%Y%m%d')
        time_range = [start, end]
    else:
        time_range = None

    if dir_output != None:
        if not exists(dir_output):
            makedirs(dir_output)

    ########################################################################################################################
    # Compute a custom percentile basis using ICCLIM. ######################################################################
    ########################################################################################################################

    from ocgis.contrib import library_icclim as lic
    nc_indices = []
    nc_dic = sort_by_filename(resources)

    for grouping in groupings:
        calc_group = calc_grouping(grouping)
        for key in nc_dic.keys():
            resource = nc_dic[key]
            if variable == None:
                variable = get_variable(resource)
            if polygons == None:
                nc_reference = call(resource=resource,
                                    prefix=str(uuid.uuid4()),
                                    time_range=time_range,
                                    output_format='nc',
                                    dir_output=dir_output)
            else:
                nc_reference = clipping(resource=resource,
                                        prefix=str(uuid.uuid4()),
                                        time_range=time_range,
                                        output_format='nc',
                                        polygons=polygons,
                                        dir_output=dir_output,
                                        mosaic=mosaic)

            arr = get_values(resource=nc_reference)
            dt_arr = get_time(resource=nc_reference)
            arr = ma.masked_array(arr)
            dt_arr = ma.masked_array(dt_arr)
            percentile = percentile
            window_width = 5

            for indice in indices:
                name = indice.replace('_', str(percentile))
                var = indice.split('_')[0]

                operation = None
                if 'T' in var:
                    if percentile >= 50:
                        operation = 'Icclim%s90p' % var
                        func = 'icclim_%s90p' % var  # icclim_TG90p
                    else:
                        operation = 'Icclim%s10p' % var
                        func = 'icclim_%s10p' % var

                ################################
                # load the appropriate operation
                ################################

                ops = [op for op in dir(lic) if operation in op]
                if len(ops) == 0:
                    raise Exception("operator does not exist %s", operation)

                exec "percentile_dict = lic.%s.get_percentile_dict(arr, dt_arr, percentile, window_width)" % ops[
                    0]
                calc = [{
                    'func': func,
                    'name': name,
                    'kwds': {
                        'percentile_dict': percentile_dict
                    }
                }]

                if polygons == None:
                    nc_indices.append(
                        call(resource=resource,
                             prefix=key.replace(variable, name).replace(
                                 '_day_', '_%s_' % grouping),
                             calc=calc,
                             calc_grouping=calc_group,
                             output_format='nc',
                             dir_output=dir_output))
                else:
                    nc_indices.extend(
                        clipping(
                            resource=resource,
                            prefix=key.replace(variable, name).replace(
                                '_day_', '_%s_' % grouping),
                            calc=calc,
                            calc_grouping=calc_group,
                            output_format='nc',
                            dir_output=dir_output,
                            polygons=polygons,
                            mosaic=mosaic,
                        ))
    return nc_indices


#def calc_indice_unconventional(resource=[], variable=None, prefix=None,
#indices=None, polygons=None,  groupings=None,
#dir_output=None, dimension_map = None):
#"""
#Calculates given indices for suitable files in the appropriate time grouping and polygon.

#:param resource: list of filenames in data reference syntax (DRS) convention (netcdf)
#:param variable: variable name to be selected in the in netcdf file (default=None)
#:param indices: list of indices (default ='TGx')
#:param polygons: list of polygons (default =None)
#:param grouping: indices time aggregation (default='yr')
#:param out_dir: output directory for result file (netcdf)
#:param dimension_map: optional dimension map if different to standard (default=None)

#:return: list of netcdf files with calculated indices. Files are saved into dir_output
#"""

#from os.path import join, dirname, exists
#from os import remove
#import uuid
#from flyingpigeon import ocgis_module
#from flyingpigeon.subset import get_ugid, get_geom

#if type(resource) != list:
#resource = list([resource])
#if type(indices) != list:
#indices = list([indices])
#if type(polygons) != list and polygons != None:
#polygons = list([polygons])
#elif polygons == None:
#polygons = [None]
#else:
#logger.error('Polygons not found')
#if type(groupings) != list:
#groupings = list([groupings])

#if dir_output != None:
#if not exists(dir_output):
#makedirs(dir_output)

#experiments = sort_by_filename(resource)
#outputs = []

#print('environment for calc_indice_unconventional set')
#logger.info('environment for calc_indice_unconventional set')

#for key in experiments:
#if variable == None:
#variable = get_variable(experiments[key][0])
#try:
#ncs = experiments[key]
#for indice in indices:
#logger.info('indice: %s' % indice)
#try:
#for grouping in groupings:
#logger.info('grouping: %s' % grouping)
#try:
#calc_group = calc_grouping(grouping)
#logger.info('calc_group: %s' % calc_group)
#for polygon in polygons:
#try:
#domain = key.split('_')[1].split('-')[0]
#if polygon == None:
#if prefix == None:
#prefix = key.replace(variable, indice).replace('_day_','_%s_' % grouping )
#geom = None
#ugid = None
#else:
#if prefix == None:
#prefix = key.replace(variable, indice).replace('_day_','_%s_' % grouping ).replace(domain,polygon)
#geom = get_geom(polygon=polygon)
#ugid = get_ugid(polygons=polygon, geom=geom)
#if indice == 'TGx':
#calc=[{'func': 'max', 'name': 'TGx'}]
#tmp = ocgis_module.call(resource=ncs,# conform_units_to='celcius',
#variable=variable, dimension_map=dimension_map,
#calc=calc, calc_grouping=calc_group, prefix=prefix,
#dir_output=dir_output, geom=geom, select_ugid=ugid)
#elif indice == 'TGn':
#calc=[{'func': 'min', 'name': 'TGn'}]
#tmp = ocgis_module.call(resource=ncs, #conform_units_to='celcius',
#variable=variable, dimension_map=dimension_map,
#calc=calc, calc_grouping= calc_group, prefix=prefix,
#dir_output=dir_output, geom=geom, select_ugid = ugid)
#elif indice == 'TGx5day':
#calc = [{'func': 'moving_window', 'name': 'TGx5day', 'kwds': {'k': 5, 'operation': 'mean', 'mode': 'same' }}]
#tmp2 = ocgis_module.call(resource=ncs, #conform_units_to='celcius',
#variable=variable, dimension_map=dimension_map,
#calc=calc, prefix=str(uuid.uuid4()),
#geom=geom, select_ugid = ugid)
#calc=[{'func': 'max', 'name': 'TGx5day'}]
#logger.info('moving window calculated : %s' % tmp2)
#tmp = ocgis_module.call(resource=tmp2,
#variable=indice, dimension_map=dimension_map,
#calc=calc, calc_grouping=calc_group, prefix=prefix,
#dir_output=dir_output)
#remove(tmp2)
#elif indice == 'TGn5day':
#calc = [{'func': 'moving_window', 'name': 'TGn5day', 'kwds': {'k': 5, 'operation': 'mean', 'mode': 'same' }}]
#tmp2 = ocgis_module.call(resource=ncs, #conform_units_to='celcius',
#variable=variable, dimension_map=dimension_map,
#calc=calc, prefix=str(uuid.uuid4()),
#geom=geom, select_ugid = ugid)
#calc=[{'func': 'min', 'name': 'TGn5day'}]

#logger.info('moving window calculated : %s' % tmp2)

#tmp = ocgis_module.call(resource=tmp2,
#variable=indice, dimension_map=dimension_map,
#calc=calc, calc_grouping=calc_group, prefix=prefix,
#dir_output=dir_output)
#remove(tmp2)
#else:
#logger.error('Indice %s is not a known inidce' % (indice))
#outputs.append(tmp)
#logger.info('indice file calcualted %s ' % (tmp))
#except Exception as e:
#logger.debug('could not calc indice %s for key %s, polygon %s and calc_grouping %s : %s' %  (indice, key, polygon, grouping, e ))
#except Exception as e:
#logger.debug('could not calc indice %s for key %s and calc_grouping %s : %s' % ( indice, key, polygon, e ))
#except Exception as e:
#logger.debug('could not calc indice %s for key %s: %s'%  (indice, key, e ))
#except Exception as e:
#logger.debug('could not calc key %s: %s' % (key, e))
#return outputs
Esempio n. 17
0
def set_dynamic_md(resource):
  """
  Dynamic meta data like time frequency, spatial extent, start/end time, etc.
  :param resource: netCDF file where basic meta data should be set
  """
  from flyingpigeon.utils import get_timerange, get_time
  frequency = get_frequency(resource)
  
  time_coverage_start, time_coverage_end = get_timerange(resource)
  time_number_steps = len(get_time(resource))
  
  # max_lat, min_lat, max_lon, min_lat = get_extent(resource)
  
  ds = Dataset(resource, mode='a')
  
  try:
    driving_experiment = ds.driving_experiment
    ds.delncattr('driving_experiment')
  except Exception as e: 
    logger.error(e)
    driving_experiment = ''

  try:
    driving_experiment_name = ds.driving_experiment_name
    ds.delncattr('driving_experiment_name')
  except Exception as e: 
    logger.error(e)
    driving_experiment_name = ''

  try:
    driving_model_ensemble_member = ds.driving_model_ensemble_member
    ds.delncattr('driving_model_ensemble_member')
  except Exception as e: 
    logger.error(e)
    driving_model_ensemble_member = ''    
    
  try:
    experiment = ds.experiment
    ds.delncattr('experiment')
  except Exception as e: 
    logger.error(e)
    experiment = ''
  try:
    tracking_id = ds.tracking_id
    ds.delncattr('tracking_id')
  except Exception as e: 
    logger.error(e)
    tracking_id = ''    
    
  try:
    experiment_id = ds.experiment_id
    ds.delncattr('experiment_id')
  except Exception as e: 
    logger.error(e)
    experiment_id = ''
  
  try:
    project_id = ds.project_id
    ds.delncattr('project_id')
  except Exception as e: 
    logger.error(e)
    project_id = ''
    
  try:
    institution_id = ds.institution_id
    ds.delncattr('institution_id')
  except Exception as e: 
    logger.error(e)
    institution_id = ''
 
  try:
    model_version_id = ds.model_version_id
    ds.delncattr('model_version_id')
  except Exception as e: 
    logger.error(e)
    model_version_id = ''
    
  try:
    driving_model_id = ds.driving_model_id
    ds.delncattr('driving_model_id')
  except Exception as e: 
    logger.error(e)
    driving_model_id = ''

  try:
    driving_ensemble_member = ds.driving_ensemble_member
    ds.delncattr('driving_ensemble_member')
  except Exception as e: 
    logger.error(e)
    driving_ensemble_member = '' 
    
  try:
    driving_model_id = ds.driving_model_id
    ds.delncattr('driving_model_id')
  except Exception as e: 
    logger.error(e)
    driving_model_id = ''
  
  try:
    model_id = ds.model_id
    ds.delncattr('model_id')
  except Exception as e: 
    logger.error(e)
    driving_model_id =''
    
    
  try:
    contact = ds.contact
    ds.delncattr('contact')
  except Exception as e: 
    logger.error(e)
    contact = ''
  try:
    driving_experiment_id = ds.driving_experiment_id
    ds.delncattr('driving_experiment_id')
  except Exception as e: 
    logger.error(e)
    driving_experiment_id = ''
    
  try:
    domain = ds.CORDEX_domain
  except Exception as e: 
    logger.error(e)
    domain = ''
  ds.close()
  
  min_lat, max_lat, min_lon, max_lon = get_extent(resource)
  geospatial_increment = get_geospatial_increment(resource)
  
  try: 
    md_dynamic = {
      'in_var_driving_experiment' :driving_experiment,
      'in_var_driving_experiment_name': driving_experiment_name,
      'in_var_driving_model_ensemble_member' : driving_model_ensemble_member,
      'in_var_experiment': experiment,
      'in_var_experiment_id': experiment_id,    
      'in_var_project_id': project_id,
      'in_var_contact': contact,
      'in_var_institution_id':institution_id,  
      'in_var_model_version_id': model_version_id, 
      'in_var_driving_model_id': driving_model_id,
      'in_var_model_id': model_id,
      'in_var_driving_ensemble_member':driving_ensemble_member, 
      'in_var_driving_experiment_id': driving_experiment_id, 
      'in_var_domain': domain, 
      'in_var_tracking_id' : tracking_id,
      'frequency': frequency,
      'time_coverage_start': time_coverage_start,
      'time_coverage_end':time_coverage_end,
      'time_number_steps':time_number_steps,
      #'time_number_gaps': '',
      #'cdm_datatype':'' ,
      'domain':'%s_subset' % domain ,
      'geospatial_increment': geospatial_increment,
      'geospatial_lat_min':min_lat ,
      'geospatial_lat_max':max_lat ,
      'geospatial_lon_min':min_lon ,
      'geospatial_lon_max':max_lon ,
      }
    
    #:product = "output" ;
    #:rcm_version_id = "v1" ;
    #:references = "http://www.smhi.se/en/Research/Research-departments/climate-research-rossby-centre" ;
                
    
  except Exception as e: 
    logger.error('failed to populate dynamic metadata dictionary')
    
  try:
    ds = Dataset(resource, mode='a')
    ds.setncatts(md_dynamic)
    ds.close()
  except Exception as e:
    logger.error(e)
    
  return(resource)
Esempio n. 18
0
    def _handler(self, request, response):
        init_process_logger('log.txt')
        response.outputs['output_log'].file = 'log.txt'

        response.update_status('execution started at : {}'.format(dt.now()), 5)

        ################################
        # reading in the input arguments
        ################################
        try:
            LOGGER.info('read in the arguments')
            # resources = self.getInputValues(identifier='resources')
            season = request.inputs['season'][0].data
            LOGGER.info('season %s', season)

            period = request.inputs['period'][0].data
            LOGGER.info('period %s', period)
            anualcycle = request.inputs['anualcycle'][0].data

            start = dt.strptime(period.split('-')[0], '%Y%m%d')
            end = dt.strptime(period.split('-')[1], '%Y%m%d')
            LOGGER.debug('start: %s , end: %s ' % (start, end))

            resource = archiveextract(resource=rename_complexinputs(request.inputs['resource']))
            # resource = archiveextract(resource=[res.file for res in request.inputs['resource']])
            url_Rdat = request.inputs['Rdat'][0].data
            url_dat = request.inputs['dat'][0].data
            url_ref_file = request.inputs['netCDF'][0].data  # can be None
            # season = self.getInputValues(identifier='season')[0]
            # period = self.getInputValues(identifier='period')[0]
            # anualcycle = self.getInputValues(identifier='anualcycle')[0]
            LOGGER.info('period %s' % str(period))
            LOGGER.info('season %s' % str(season))
            LOGGER.info('read in the arguments')
            LOGGER.info('url_ref_file: %s' % url_ref_file)
            LOGGER.info('url_Rdat: %s' % url_Rdat)
            LOGGER.info('url_dat: %s' % url_dat)
        except Exception as e:
            LOGGER.debug('failed to convert arguments %s ' % e)

        ############################
        # fetching trainging data
        ############################

        try:
            dat = abspath(download(url_dat))
            Rdat = abspath(download(url_Rdat))
            LOGGER.info('training data fetched')
        except Exception as e:
            LOGGER.error('failed to fetch training data %s' % e)

        ##########################################################
        # get the required bbox and time region from resource data
        ##########################################################
        # from flyingpigeon.weatherregimes import get_level
        try:
            from flyingpigeon.ocgis_module import call
            from flyingpigeon.utils import get_variable
            time_range = [start, end]

            variable = get_variable(resource)

            if len(url_ref_file) > 0:
                ref_file = download(url_ref_file)
                model_subset = call(
                    resource=resource, variable=variable,
                    time_range=time_range,  # conform_units_to=conform_units_to, geom=bbox, spatial_wrapping='wrap',
                    regrid_destination=ref_file, regrid_options='bil')
                LOGGER.info('Dataset subset with regridding done: %s ' % model_subset)
            else:
                model_subset = call(
                    resource=resource, variable=variable,
                    time_range=time_range,  # conform_units_to=conform_units_to, geom=bbox, spatial_wrapping='wrap',
                )
                LOGGER.info('Dataset time period extracted: %s ' % model_subset)
        except:
            LOGGER.exception('failed to make a data subset ')

        #######################
        # computing anomalies
        #######################
        try:
            cycst = anualcycle.split('-')[0]
            cycen = anualcycle.split('-')[0]
            reference = [dt.strptime(cycst, '%Y%m%d'), dt.strptime(cycen, '%Y%m%d')]
            model_anomal = wr.get_anomalies(model_subset, reference=reference)

            #####################
            # extracting season
            #####################

            model_season = wr.get_season(model_anomal, season=season)
        except:
            LOGGER.exception('failed to compute anualcycle or seasons')

        #######################
        # call the R scripts
        #######################

        import shlex
        import subprocess
        from flyingpigeon import config
        from os.path import curdir, exists, join

        try:
            rworkspace = curdir
            Rsrc = config.Rsrc_dir()
            Rfile = 'weatherregimes_projection.R'

            yr1 = start.year
            yr2 = end.year
            time = get_time(model_season, format='%Y%m%d')

            # ip, output_graphics = mkstemp(dir=curdir ,suffix='.pdf')
            ip, file_pca = mkstemp(dir=curdir, suffix='.txt')
            ip, file_class = mkstemp(dir=curdir, suffix='.Rdat')
            ip, output_frec = mkstemp(dir=curdir, suffix='.txt')

            args = ['Rscript', join(Rsrc, Rfile), '%s/' % curdir,
                    '%s/' % Rsrc,
                    '%s' % model_season,
                    '%s' % variable,
                    '%s' % str(time).strip("[]").replace("'", "").replace(" ", ""),
                    # '%s' % output_graphics,
                    '%s' % dat,
                    '%s' % Rdat,
                    '%s' % file_pca,
                    '%s' % file_class,
                    '%s' % output_frec,
                    '%s' % season,
                    '%s' % start.year,
                    '%s' % end.year,
                    '%s' % 'MODEL']

            LOGGER.info('Rcall builded')
        except Exception as e:
            msg = 'failed to build the R command %s' % e
            LOGGER.error(msg)
            raise Exception(msg)
        try:
            output, error = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
            # , shell=True
            LOGGER.info('R outlog info:\n %s ' % output)
            LOGGER.debug('R outlog errors:\n %s ' % error)
            if len(output) > 0:
                response.update_status('**** weatherregime in R suceeded', 90)
            else:
                LOGGER.error('NO! output returned from R call')
        except Exception as e:
            msg = 'weatherregime in R %s ' % e
            LOGGER.error(msg)
            raise Exception(msg)

        #################
        # set the outputs
        #################

        response.update_status('Set the process outputs ', 95)

        response.outputs['output_pca'].file = file_pca
        response.outputs['output_classification'].file = file_class
        response.outputs['output_netcdf'].file = model_season
        response.outputs['output_frequency'].file = output_frec

        response.update_status('done', 100)
        return response