Ejemplo n.º 1
0
def indice_multivar(in_files1, var1,
                    in_files2, var2,
                    out_file,
                    indice_name,
                    time_range,
                    slice_mode,
                    project,
                    N_lev=None,
                    callback=None):
    
    '''
    This function returns result NetCDF file containing a climate indice based on two variable (ETR, DTR, vDTR).
    
    
    :param in_files1: input NetCDF files corresponding to the first variable
    :type in_files1: list of str
    :param var1: first variable to process (e.g. "tasmax")
    :type var1: str
    
    :param in_files2: input NetCDF files corresponding to the second variable
    :type in_files2: list of str
    :param var2: second variable to process (e.g. "tasmin")
    :type var2: str
    
    :param out_file: output file name
    :type out_file: str
    :param indice_name: climate indice name
    :type indice_name: str
    :param time_range: time range (dt1 should be the first day of year/month, dt2 - the last day of year/month). Note: to include dt2 -> add in datetime hour/minute (HH=23, mm=59).
    :type time_range: list of 2 datetime objects [dt1, dt2]  
    :param slice_mode: "year" or "month" 
    :type slice_mode: str
    :param project: project name ("CMIP5" or "CORDEX")
    :type slice_mode: str
    
    :rtype: output NetCDF file name (str)
    
    .. note:: Both file lists must contain the same number steps.
    .. note:: First variable is always bigger that the second variable (par ex. var1="tasmax" and var2="tasmin")
    '''

    inc1 = Dataset(in_files1[0], 'r')
    inc2 = Dataset(in_files2[0], 'r')
    
    onc = Dataset(out_file, 'w' ,format="NETCDF3_CLASSIC")
    
    fill_val1 = get_att_value(inc1, var1, '_FillValue')
    fill_val2 = get_att_value(inc2, var2, '_FillValue')
    
    indice_dim = copy_var_dim(inc1, onc, var1, project) # tuple ('time', 'lat', 'lon')
    
    nb_rows = inc1.variables[indice_dim[1]].shape[0]
    nb_columns = inc1.variables[indice_dim[2]].shape[0]
    
    calend = get_att_value(inc1, indice_dim[0], 'calendar')
    units = get_att_value(inc1, indice_dim[0], 'units')
    
    inc1.close()
    inc2.close()

    ind_type = 'f'    
    ind = onc.createVariable(indice_name, ind_type, (indice_dim[0], indice_dim[1], indice_dim[2]), fill_value = fill_val1)
       
    
    dt_begin = time_range[0] # datetime object
    dt_end = time_range[1] # datetime object
    
    ############################
    glob_dict_timeStep_indice = {}
    ############################

    
    for in_file1, in_file2  in zip(in_files1, in_files2):

        nc1 = Dataset(in_file1, 'r')
        nc2 = Dataset(in_file2, 'r')
        
        time_steps_vect1 = get_list_dates_from_nc(nc1, 'dt')
        time_steps_vect2 = get_list_dates_from_nc(nc2, 'dt') 

        if time_steps_vect1 != time_steps_vect2:
            print 'Error: ...........'
        else:     
        
            dict_year_chunk1 = get_dict_year_chunk(time_steps_vect1)   
            
            if N_lev==None:
                values1 = nc1.variables[var1]
                values2 = nc2.variables[var2]
            else:
                values1 = nc1.variables[var1][:,N_lev,:,:]
                values2 = nc2.variables[var2][:,N_lev,:,:]
            
    
            for year in sorted(dict_year_chunk1.keys()):
    
                if year>=dt_begin.year and year<=dt_end.year:
                    
                    i1 = dict_year_chunk1[year][0]
                    i2 = dict_year_chunk1[year][1]
    
                    values_current_chunk1 = values1[i1:i2+1,:,:] # on charge les donnees (pour 1 annee) pour faire le traitement
                    values_current_chunk2 = values2[i1:i2+1,:,:]
                    
                    time_steps_current_chunk1 = numpy.array(time_steps_vect1[i1:i2+1])
                       
                    
                    if (slice_mode=='year'):
                        mydict_TimeStep_3DArray1=get_dict_year_3Darr(values_current_chunk1, time_steps_current_chunk1)
                        mydict_TimeStep_3DArray2=get_dict_year_3Darr(values_current_chunk2, time_steps_current_chunk1)
                    elif (slice_mode=='month'):
                        mydict_TimeStep_3DArray1=get_dict_month_3Darr(values_current_chunk1, time_steps_current_chunk1)
                        mydict_TimeStep_3DArray2=get_dict_month_3Darr(values_current_chunk2, time_steps_current_chunk1)
                    
                    mydict_indice=get_dict_timeStep_indice_multivar(mydict_TimeStep_3DArray1, mydict_TimeStep_3DArray2, indice_name, fill_val1, fill_val2, ind, onc)
                    
                    glob_dict_timeStep_indice.update(mydict_indice)
      
                    del values_current_chunk1, values_current_chunk2, time_steps_current_chunk1
      
                    print "Processed: ", year
                    
                    #counter_year = counter_year + 1
                    #print counter_year, total_nb_years_to_process
    
                    #status = "Year processed {0}/{1} ({3})".format(counter_year, total_nb_years_to_process, year)
                    #print status
                    
                #else:
                    #print "data not processed ", year
                    #callback("Skipping year %d" % year,percentageComplete)
    
                #time.sleep(0.01)
            #    #time.sleep(1.01)
            #    pbar.update(i+1)
            #    i+=1
            #
            #pbar.finish()
            

            nc1.close()
            nc2.close()
    
        #pbar_files.finish()
            
        #print '---'    
        #print sorted(glob_dict_timeStep_indice.keys())
        #print '---'     
        
    glob_indice = get_globindice(glob_dict_timeStep_indice, nb_rows, nb_columns) # tuple (time_step_vect, indice_2D_arr)
    
    ind[:,:,:] = glob_indice[0][:,:,:]
    
    # set global attributs
    #eval(indice_name + '_setglobattr(onc)')
    ## for all:
    #setglobattr_history(onc, indice_name, slice_mode, dt_begin, dt_end)
    #onc.setncattr('institution', '')
    onc.setncattr('source', '') # Here soon will be source meta data
    #onc.setncattr('comment', '')   
    #onc.setncattr('reference', '')
    
    # set global attributs
    set_globattr.title(onc, indice_name)
    set_globattr.references(onc)
    set_globattr.comment(onc, indice_name)
    set_globattr.institution(onc, institution_str='Climate impact portal (http://climate4impact.eu)')
    set_globattr.history2(onc, slice_mode, indice_name, time_range)

    # set variable attributs
    eval('set_longname_units.' + indice_name + '_setvarattr(ind)')
    # for all:
    ind.missing_value = fill_val1
    ind.setncattr('standard_name', 'ECA_indice')

    #print indice[1][:] # must be float or str!    
    #time_steps = [str(i) for i in indice[1][:]]
    
    time_steps_indice_dt = glob_indice[1][:]
    time_bnds_dt = get_glob_time_bnds(time_steps_indice_dt, slice_mode)
    
    set_time_values(onc, time_steps_indice_dt, calend, units)
    set_timebnds_values(onc, time_bnds_dt, calend, units)
    
    onc.close()
    
    return out_file
Ejemplo n.º 2
0
def indice(in_files,
           out_file,
           var,
           indice_name,
           time_range,
           slice_mode,
           project,
           threshold=None,
           N_lev=None,
           callback=None):
    
    '''
    This function returns result NetCDF file containing a simple climate indice (based on one variable).
    
    
    :param in_files: input NetCDF files
    :type in_files: list of str
    :param out_file: output file name
    :type out_file: str
    :param var: variable name to process
    :type var: str
    :param indice_name: climate indice name
    :type indice_name: str
    :param time_range: time range (dt1 should be the first day of year/month, dt2 - the last day of year/month). Note: to include dt2 -> add in datetime hour/minute (HH=23, mm=59).
    :type time_range: list of 2 datetime objects [dt1, dt2]  
    :param slice_mode: "year" or "month" 
    :type slice_mode: str
    :param project: project name ("CMIP5" or "CORDEX")
    :type project: str
    :param threshold: user defined threshold for certain indices 
    :type threshold: float
    
    :rtype: output NetCDF file name (str)
    
    '''
    
    #print "DADA"
       
    #callback("Init Opening "+in_files[0],0);
    inc = Dataset(in_files[0], 'r')
    #callback("Finished opening "+in_files[0],0);
    
    onc = Dataset(out_file, 'w' ,format="NETCDF3_CLASSIC")
    
    fill_val = get_att_value(inc, var, '_FillValue')

    indice_dim = copy_var_dim(inc, onc, var, project) # tuple ('time', 'lat', 'lon')
    
    nb_rows = inc.variables[indice_dim[1]].shape[0]
    nb_columns = inc.variables[indice_dim[2]].shape[0]
    
    calend = get_att_value(inc, indice_dim[0], 'calendar')
    units = get_att_value(inc, indice_dim[0], 'units')
    
    inc.close()

    ind_type = 'f'
        
    ind = onc.createVariable(indice_name, ind_type, (indice_dim[0], indice_dim[1], indice_dim[2]), fill_value = fill_val)  
    
    dt_begin = time_range[0] # datetime object
    dt_end = time_range[1] # datetime object
    
    ############################
    glob_dict_timeStep_indice = {}
    ############################
    
    #j=0
    #pbar_files = ProgressBar(widgets=[Percentage(),' ', Bar()], maxval=len(in_files)).start()
    
    total_nb_years_to_process = dt_end.year -dt_begin.year + 1
    
    for ifile in in_files:
        
        
        #pbar_files.widgets[1]= ' processing file ' +str(j+1)
        #time.sleep(1.01)
        #pbar_files.update(j+1)
        #j+=1
        
        #callback("Opening "+ifile,0);
        nc = Dataset(ifile, 'r')
        
        time_steps_vect = get_list_dates_from_nc(nc, 'dt') 
        
        dict_year_chunk = get_dict_year_chunk(time_steps_vect)   
        #print dict_year_chunk
        
        if N_lev==None:
            values = nc.variables[var]
        else:
            values = nc.variables[var][:,N_lev,:,:]
        
        
        #pbar = ProgressBar(widgets=['',Percentage(), Bar()], maxval=len(dict_year_chunk.keys())).start()
        #i=0
        
        currentStep=1
        totalSteps=len(dict_year_chunk.keys())
        
        counter_year = 0
        for year in sorted(dict_year_chunk.keys()):
            
            #pbar.widgets[0]= ' <'+str(year)+' processed> '
            
            percentageComplete = (currentStep/totalSteps)*100
            #callback("Processing year %d/%d %d" % (currentStep,totalSteps,year),percentageComplete)
            
            if year>=dt_begin.year and year<=dt_end.year:
                
                #callback("Processing year %d/%d %d" % (currentStep,totalSteps,year),percentageComplete)
                
                i1 = dict_year_chunk[year][0]
                i2 = dict_year_chunk[year][1]
                #print i1, i2
                values_current_chunk = values[i1:i2+1,:,:] # on charge les donnees (pour 1 annee) pour faire le traitement
                time_steps_current_chunk = numpy.array(time_steps_vect[i1:i2+1])
                
                
                if (slice_mode=='year'):
                    mydict_TimeStep_3DArray=get_dict_year_3Darr(values_current_chunk, time_steps_current_chunk)
                elif (slice_mode=='month'):
                    mydict_TimeStep_3DArray=get_dict_month_3Darr(values_current_chunk, time_steps_current_chunk)
                    
                
                mydict_indice=get_dict_timeStep_indice(mydict_TimeStep_3DArray, indice_name, fill_val, ind, onc, threshold)
                
                glob_dict_timeStep_indice.update(mydict_indice)
  
                del values_current_chunk, time_steps_current_chunk
  
                print "Processed: ", year
                
                #counter_year = counter_year + 1
                #print counter_year, total_nb_years_to_process

                #status = "Year processed {0}/{1} ({3})".format(counter_year, total_nb_years_to_process, year)
                #print status
                
            #else:
                #print "data not processed ", year
                #callback("Skipping year %d" % year,percentageComplete)

            #time.sleep(0.01)
        #    #time.sleep(1.01)
        #    pbar.update(i+1)
        #    i+=1
        #
        #pbar.finish()
        
        nc.close()
        
        

    #pbar_files.finish()
        
    #print '---'    
    #print sorted(glob_dict_timeStep_indice.keys())
    #print '---'     
    
    glob_indice = get_globindice(glob_dict_timeStep_indice, nb_rows, nb_columns) # tuple (time_step_vect, indice_2D_arr)
    
    ind[:,:,:] = glob_indice[0][:,:,:]
    
    # set global attributs
    #eval(indice_name + '_setglobattr(onc)')
    ## for all:
    #setglobattr_history(onc, indice_name, slice_mode, dt_begin, dt_end)
    #onc.setncattr('institution', '')
    onc.setncattr('source', '') 
    #onc.setncattr('comment', '')   
    #onc.setncattr('reference', '')
       
    
    # set global attributs
    
    # title
    if threshold != None:
        onc.setncattr('title', 'Indice {0} with user defined threshold'.format(indice_name))
    else:
        set_globattr.title(onc, indice_name)
        
    set_globattr.references(onc)
    set_globattr.comment(onc, indice_name)
    set_globattr.institution(onc, institution_str='Climate impact portal (http://climate4impact.eu)')
    set_globattr.history2(onc, slice_mode, indice_name, time_range)

    # set variable attributs
    if threshold != None:
        eval('set_longname_units_custom_indices.' + indice_name + '_setvarattr(ind, threshold)')
    else:
        eval('set_longname_units.' + indice_name + '_setvarattr(ind)')
        ind.setncattr('standard_name', 'ECA_indice')
    # for all:
    ind.missing_value = fill_val
    
    #print indice[1][:] # must be float or str!    
    #time_steps = [str(i) for i in indice[1][:]]
    
    time_steps_indice_dt = glob_indice[1][:]
    time_bnds_dt = get_glob_time_bnds(time_steps_indice_dt, slice_mode)
    
    set_time_values(onc, time_steps_indice_dt, calend, units)
    set_timebnds_values(onc, time_bnds_dt, calend, units)
    
    onc.close()
    
    return out_file