def write_age(self, data, outkey='Z_Age', missing=-1, dtype='int8', filename=None): logging.debug('Writing ' + filename + ' for ' + str(outkey)) ensure_dir(filename) try: f = AugmentedNetcdfDataset(filename, 'a', format='NETCDF4') f.createDimensionIfNotExists('x', self.xoutputsize) f.createDimensionIfNotExists('y', self.youtputsize) except OSError: f = AugmentedNetcdfDataset(filename, 'w', format='NETCDF4') f.createDimensionIfNotExists('x', self.xoutputsize) f.createDimensionIfNotExists('y', self.youtputsize) self._set_date_and_version(f, self.date, __version__, self.model_id) f.setncattr('institution', 'IPMA') outvar = f.createVariableIfNotExists(outkey, data.dtype, ('y', 'x'), zlib=True, complevel=5, fletcher32=True) data = data.astype(dtype) outvar[self.xslice, self.yslice] = data f.close()
def write_age(self, data, outkey='Z_Age', missing=-1, dtype='int8', filename=None): logging.debug('Writing ' + filename + ' for ' + str(outkey)) ensure_dir(filename) f = h5py.File(filename, 'a') if outkey in f.keys(): dataset = f[outkey] else: dataset = f.create_dataset(outkey, chunks=True, compression='gzip', fletcher32=True, shape=(self.xoutputsize, self.youtputsize), dtype=dtype) dataset.attrs.create('CLASS', 'Data'.encode('ASCII')) dataset.attrs.create('PRODUCT', long_names[outkey].encode('ASCII')) dataset.attrs.create('PRODUCT_ID', product_id[outkey]) dataset.attrs.create('N_COLS', self.youtputsize) dataset.attrs.create('N_LINES', self.xoutputsize) dataset.attrs.create('NB_BYTES', 1) dataset.attrs.create('UNITS', 'days'.encode('ASCII')) dataset.attrs.create('CAL_SLOPE', 1.0) dataset.attrs.create('CAL_OFFSET', 0.0) dataset.attrs.create("MISSING_VALUE", missing) dataset.attrs.create("SCALING_FACTOR", 1.) dataset.attrs.create("OFFSET", 0.) data = data.astype(dtype) dataset[self.xslice, self.yslice] = data f.close()
def write_brdf(self, alldata, configkey, scale=10000., offset=0., missing=-32768): """ Write data in hdf file for the K012 kernel coefficients of the brdf model """ for ichannel in range(0, self.n_channels_ref): for iparam in range(0, self.model_len): filename = self.config[configkey][f'band{ichannel+1}'][ 'filename'] outkey = f'K{iparam}' logging.debug('Writing ' + filename + ' for ' + str(outkey)) ensure_dir(filename) try: f = AugmentedNetcdfDataset(filename, 'a', format='NETCDF4') except OSError: f = AugmentedNetcdfDataset(filename, 'w', format='NETCDF4') f.createDimensionIfNotExists('longitude', self.xoutputsize) f.createDimensionIfNotExists('latitude', self.youtputsize) f.createDimensionIfNotExists('NBAND', self.n_channels_ref) f.createDimensionIfNotExists('KERNEL_INDEX', self.model_len) f.createDimensionIfNotExists('KERNEL_INDEX2', self.model_len) self._set_date_and_version(f, self.date, __version__, self.model_id) data = alldata[:, :, ichannel, iparam] logging.debug('Average : ' + str(np.mean(data[:]))) data_int = ((data * scale) + offset).round().astype(self.dtype) data_int[np.isnan(data)] = missing #~ dataset[self.xslice,self.yslice] = data_int missing = np.int16(missing) outvar = f.createVariableIfNotExists(outkey, data_int.dtype, ('latitude', 'longitude'), zlib=True, complevel=5, fletcher32=True, attributes={ 'units': '', 'offset': offset, 'scale_factor': scale, '_FillValue': missing, 'long_name': f'BRDF {outkey}' }) outvar[self.xslice, self.yslice] = data_int logging.debug('Average : ' + str(np.mean(data_int[:]))) f.close()
def write_tocr(self, data, xslice, yslice, date, key, typ): logging.info('Writing ' + typ + ' to ' + self.outfilename) try: ensure_dir(self.outfilename) try: f = AugmentedNetcdfDataset(self.outfilename,'a', format='NETCDF4') except OSError: f = AugmentedNetcdfDataset(self.outfilename,'w', format='NETCDF4') f.createDimensionIfNotExists('latitude', self.xoutputsize) f.createDimensionIfNotExists('longitude', self.youtputsize) f.createDimensionIfNotExists('NBAND', self.n_channels_ref) self._set_date_and_version(f, date, __version__, self.model_id) if typ == 'TOC-R' or typ == 'TOC-R-ERR': scale_factor = 1./10000.0 missing_value = -32767 outvar = f.createVariableIfNotExists(key, 'int16', ('latitude','longitude','NBAND'), zlib=True, complevel=5, fletcher32=True, attributes = {'units': '', 'offset':0., 'scale_factor':scale_factor } ) missing = np.isnan(data) data[data < 0] = 0.0 data[data > 1.01] = 1.0 data = data / scale_factor data = data.astype(np.int16) data[missing] = missing_value outvar[xslice, yslice,:] = data[:,:,:] elif typ=='Z-QFLAG': outvar=f.createVariableIfNotExists(key, 'uint8', ('latitude','longitude','NBAND'), zlib=True, complevel=5, fletcher32=True) outvar[xslice,yslice,:]=data[:,:,:] elif typ == 'solzenith': outvar = f.createVariableIfNotExists(key, data.dtype, ('latitude','longitude'), zlib=True, complevel=5, fletcher32=True) outvar[xslice, yslice] = data[:,:] elif typ == 'n_valid_obs': data = data.astype('int8') outvar = f.createVariableIfNotExists(key, data.dtype, ('latitude','longitude'), zlib=True, complevel=5, fletcher32=True, attributes = {'units': '', 'long_name' : 'NMOD for {key}'.format(key=key) } ) outvar[xslice, yslice] = data[:,:] elif typ == 'latitude': outvar = f.createVariableIfNotExists(key, data.dtype, ('latitude'), complevel=5, fletcher32=True, zlib=True, attributes = {'units': 'degrees', 'title' : 'latitude', 'long_name' : 'latitude' } ) outvar[xslice] = data[:,0] # as per VITO's request, take only first column] elif typ == 'longitude': outvar = f.createVariableIfNotExists(key, data.dtype, ('longitude'), complevel=5, fletcher32=True, zlib=True, attributes = {'units': 'degrees', 'title' : 'longitude', 'long_name' : 'longitude' } ) outvar[yslice] = data[0,:] # as peer VITO's request, take only first row else: raise Exception('Unknown type of data to write : typ = ' + str(typ)) f.close() except Exception as e: logging.error('Problem writing ' + key + ' on ' + self.outfilename + ' : ' + str(e)) raise(e)
def write_brdf_covariance(self, alldata, configkey, scale=10000., offset=0., missing=-32768): """ Write covariance in hdf file for the K012 kernel coefficients of the brdf model """ for ichannel in range(0, self.n_channels_ref): for iparam in range(0, self.model_len): for jparam in range(iparam, self.model_len): filename = self.config[configkey][f'band{ichannel+1}'][ 'cov'] ensure_dir(filename) outkey = f'C{iparam}{jparam}' logging.debug('Writing ' + filename + ' for ' + str(outkey)) f = h5py.File(filename, 'a') if outkey in f.keys(): dataset = f[outkey] else: dataset = f.create_dataset(outkey, chunks=True, compression='gzip', fletcher32=True, shape=(self.xoutputsize, self.youtputsize), dtype=self.dtype) dataset.attrs.create('CLASS', 'Data'.encode('ASCII')) dataset.attrs.create( 'PRODUCT', 'Covariance Matrix Element {iparam}{jparam}'. format(iparam=iparam, jparam=jparam).encode('ASCII')) dataset.attrs.create('PRODUCT_ID', 128) dataset.attrs.create('N_COLS', self.youtputsize) dataset.attrs.create('N_LINES', self.xoutputsize) dataset.attrs.create('NB_BYTES', 2) dataset.attrs.create('SCALING_FACTOR', 1.0) dataset.attrs.create('UNITS', 'dimensionless'.encode('ASCII')) dataset.attrs.create('CAL_SLOPE', 1.0) dataset.attrs.create("MISSING_VALUE", missing) dataset.attrs.create("CAL_OFFSET", 0.) dataset.attrs.create("OFFSET", 0.) dataset.attrs.create("SCALING_FACTOR", scale) dataset.attrs.create("OFFSET", offset) data = alldata[:, :, ichannel, iparam, jparam] data_int = ((data * scale) + offset).round().astype( self.dtype) data_int[np.isnan(data)] = missing dataset[self.xslice, self.yslice] = data_int f.close()
def write_albedo_after_spectral_integration(self, alldata, typ, filename, missing=-1, scale=10000., offset=0.): dtype = '<i2' # beware there is rounding of the value below for iout, outname in enumerate(self.outalbedos_names): for iin, inname in enumerate(self.inalbedos_names): f = h5py.File(filename, 'a') fullname = outname + '-' + inname logging.debug('Writing ' + filename + ' for albedo ' + fullname) ensure_dir(filename) if typ == 'albedo': outkey = 'AL-' + fullname elif typ == 'albedo-err': outkey = 'AL-' + fullname + '-ERR' if outkey == 'AL-NI-BH' or outkey == 'AL-NI-BH-ERR' or outkey == 'AL-VI-BH' or outkey == 'AL-VI-BH-ERR': continue if outkey in f.keys(): dataset = f[outkey] else: dataset = f.create_dataset(outkey, chunks=True, compression='gzip', fletcher32=True, shape=(self.xoutputsize, self.youtputsize), dtype=dtype) dataset.attrs.create('CLASS', 'Data'.encode('ASCII')) dataset.attrs.create('PRODUCT', long_names[outkey].encode('ASCII')) dataset.attrs.create('PRODUCT_ID', product_id[outkey]) dataset.attrs.create('N_COLS', self.youtputsize) dataset.attrs.create('N_LINES', self.xoutputsize) dataset.attrs.create('NB_BYTES', 2) dataset.attrs.create('UNITS', 'dimensionless'.encode('ASCII')) dataset.attrs.create('CAL_SLOPE', 1.0) dataset.attrs.create('CAL_OFFSET', 0.0) dataset.attrs.create("MISSING_VALUE", missing) dataset.attrs.create("SCALING_FACTOR", scale) dataset.attrs.create("OFFSET", offset) data = alldata[:, :, iin, iout] data_int = ((data * scale) + offset).round().astype(dtype) data_int[np.isnan(data)] = missing dataset[self.xslice, self.yslice] = data_int f.close()
def write_all_brdf(self, al2runner): print('writing to ', self.config) for ichannel in range(0, self.n_channels_ref): filename = self.config['brdf'][f'band{ichannel+1}']['filename'] ensure_dir(filename) self._write_global_attributes(self.date, filename, options=None) data = al2runner.age.values[:, :, ichannel] self.write_age(data, filename=filename) for ichannel in range(0, self.n_channels_ref): filename = self.config['brdf'][f'band{ichannel+1}']['filename'] ensure_dir(filename) self._write_global_attributes(self.date, filename, options=None) data = al2runner.quality.values[:, :, ichannel] self.write_qflag(data, filename=filename) for ichannel in range(0, self.n_channels_ref): ensure_dir(filename) filename = self.config['brdf-d01'][f'band{ichannel+1}']['filename'] self._write_global_attributes(self.date, filename, options=None) data = al2runner.quality1.values[:, :, ichannel] self.write_qflag(data, filename=filename) self.write_brdf(al2runner.brdf.values, 'brdf') self.write_brdf_covariance(al2runner.covariance.values, 'brdf') self.write_brdf(al2runner.brdf1.values, 'brdf-d01') self.write_brdf_covariance(al2runner.covariance1.values, 'brdf-d01')
def write_brdf(self, data, key, typ): """ Will write the numpy array "data", in the file defined above in "self.brdf_file", on the data layer "key". "typ" should be a known identifier, as the data will be processed differently according its value. """ logging.debug('Writing ' + key + ':' + typ + ' to ' + self.brdf_file) try: ensure_dir(self.brdf_file) try: f = AugmentedNetcdfDataset(self.brdf_file, 'a', format='NETCDF4') except OSError: f = AugmentedNetcdfDataset(self.brdf_file, 'w', format='NETCDF4') f.createDimensionIfNotExists('X', self.xoutputsize) f.createDimensionIfNotExists('Y', self.youtputsize) f.createDimensionIfNotExists('NBAND', self.n_channels_ref) f.createDimensionIfNotExists('KERNEL_INDEX', self.model_len) f.createDimensionIfNotExists('KERNEL_INDEX2', self.model_len) self._set_date_and_version(f, self.date, __version__, self.model_id) if typ == 'brdf': outvar = f.createVariableIfNotExists( key, data.dtype, ('X', 'Y', 'NBAND', 'KERNEL_INDEX'), zlib=True, complevel=5, fletcher32=True) elif typ == 'covariance': outvar = f.createVariableIfNotExists( key, data.dtype, ('X', 'Y', 'NBAND', 'KERNEL_INDEX', 'KERNEL_INDEX2'), zlib=True, complevel=5, fletcher32=True) elif typ == 'quality': outvar = f.createVariableIfNotExists(key, data.dtype, ('X', 'Y', 'NBAND'), zlib=True, complevel=5, fletcher32=True) elif typ == 'age': outvar = f.createVariableIfNotExists(key, data.dtype, ('X', 'Y', 'NBAND'), zlib=True, complevel=5, fletcher32=True) elif typ == 'latitude' or typ == 'longitude': outvar = f.createVariableIfNotExists(key, data.dtype, ('X', 'Y'), zlib=True, complevel=5, fletcher32=True) elif typ == 'n_valid_obs': data = data.astype('int8') outvar = f.createVariableIfNotExists( key, data.dtype, ('X', 'Y'), zlib=True, complevel=5, fletcher32=True, attributes={ 'units': '', 'long_name': 'NMOD for {key}'.format(key=key) }) else: raise Exception('Unknown type of data to write : typ = ' + str(typ)) # here is the actual writing command outvar[self.xslice, self.yslice, ...] = data[...] f.close() except Exception as e: print(e) logging.error('Problem writing ' + key + ' on ' + self.brdf_file + ' : ' + str(e)) raise (e)
def write_albedo(self, data, key, typ): """ Will write the numpy array "data", in the file defined above in "self.albedo_file", on the data layer "key". "typ" should be a known identifier, as the data will be processed differently according its value. """ logging.debug('Writing ' + key + ' to ' + self.albedo_file) # + str(self.date)) try: ensure_dir(self.albedo_file) try: f = AugmentedNetcdfDataset(self.albedo_file, 'a', format='NETCDF4') except OSError: f = AugmentedNetcdfDataset(self.albedo_file, 'w', format='NETCDF4') f.createDimensionIfNotExists('longitude', self.xoutputsize) f.createDimensionIfNotExists('latitude', self.youtputsize) f.createDimensionIfNotExists('NBAND', self.n_channels_ref) self._set_date_and_version(f, self.date, __version__, self.model_id) f.setncattr('institution', 'VITO') if typ == 'albedo': #print(f'--------------') #print(f'DATA {self.date} :{key}: {data[0, 0]}') scale_factor = 1. / 10000 missing_value = -32767 dtype = np.int16 outvar = f.createVariableIfNotExists( key, dtype, ('latitude', 'longitude'), complevel=5, fletcher32=True, zlib=True, attributes={ 'units': '', 'offset': 0., 'scale_factor': scale_factor, 'long_name': 'Albedo {key}'.format(key=key) }) missing = np.isnan(data) #######with numpy.warning.filterwarnings(divide='ignore'): ####### numpy.float64(1.0) / 0.0 data[data < -3.0] = -3.0 data[data > 3.0] = 3.0 #print(f'{self.date} :{key}: m, s, l: {missing[0, 0]}') data = data / scale_factor data = data.astype(dtype) data[missing] = missing_value #print(f'DATA {self.date} :{key}: {data[0, 0]}') outvar[self.xslice, self.yslice] = data[:, :] #outvar[self.xslice, self.yslice] = 7 ##f.close() ##f = AugmentedNetcdfDataset(self.albedo_file,'r', format='NETCDF4') ##var = f[key] ##var.set_auto_maskandscale(False) ###print(f'HERE {self.date} :{key}: {var[self.xslice, self.yslice][0,0]}') ##f.close() ##f = AugmentedNetcdfDataset(self.albedo_file,'r', format='NETCDF4') ##print(f'autoscale {self.date} :{key}: {f[key][self.xslice, self.yslice][0,0]}') elif typ == 'albedo_cov': scale_factor = 1. / 10000 missing_value = -32767 dtype = np.int16 outvar = f.createVariableIfNotExists( key, dtype, ('latitude', 'longitude'), complevel=5, fletcher32=True, zlib=True, attributes={ 'units': '', 'offset': 0., 'scale_factor': scale_factor, 'long_name': 'Albedo cov {key}'.format(key=key) }) missing = np.isnan(data) data[data < -3.0] = -3.0 data[data > 3.0] = 3.0 data = data / scale_factor data = data.astype(dtype) data[missing] = missing_value outvar[self.xslice, self.yslice] = data[:, :] elif typ == 'age': data = data.astype('int8') outvar = f.createVariableIfNotExists( key, data.dtype, ('latitude', 'longitude'), complevel=5, fletcher32=True, zlib=True, attributes={ 'units': 'days', 'long_name': 'Age {key}'.format(key=key) }) outvar[self.xslice, self.yslice] = data[:, :] elif typ == 'n_valid_obs': data = data.astype('int8') outvar = f.createVariableIfNotExists( key, data.dtype, ('latitude', 'longitude'), complevel=5, fletcher32=True, zlib=True, attributes={ 'units': '', 'long_name': 'NMOD for {key}'.format(key=key) }) outvar[self.xslice, self.yslice] = data[:, :] elif typ == 'quality': data = data.astype('uint8') outvar = f.createVariableIfNotExists( key, data.dtype, ('latitude', 'longitude'), complevel=5, fletcher32=True, zlib=True, attributes={ 'units': '', 'long_name': 'Quality flag {key}'.format(key=key) }) outvar[self.xslice, self.yslice] = data[:, :] elif typ == 'latitude': outvar = f.createVariableIfNotExists(key, data.dtype, ('latitude'), complevel=5, fletcher32=True, zlib=True, attributes={ 'units': 'degrees', 'title': 'latitude', 'long_name': 'latitude' }) outvar[ self. xslice] = data[:, 0] # as per VITO's request, take only first column] elif typ == 'longitude': outvar = f.createVariableIfNotExists(key, data.dtype, ('longitude'), complevel=5, fletcher32=True, zlib=True, attributes={ 'units': 'degrees', 'title': 'longitude', 'long_name': 'longitude' }) outvar[self.yslice] = data[ 0, :] # as peer VITO's request, take only first row else: raise Exception('Unknown type of data to write : typ = ' + str(typ)) f.close() except Exception as e: logging.error('Problem writing ' + key + ' : ' + str(e)) raise Exception()
def save_yaml(dic, filename): ensure_dir(filename) with open(filename, 'w') as f: yaml.dump(dic, f, indent=2)
def to_yaml(self, filename=None): """ This function saves the full state of the object DataStore into an human readable file """ if filename: logging.debug(f'data_store : config written in {filename}') ensure_dir(filename) return to_yaml_function(dict(self), filename=filename)
def write_albedo_after_spectral_integration(self, alldata, typ, filename, missing_val=-1, scale=10000., offset=0.): dtype = '<i2' # beware there is rounding of the value below ensure_dir(filename) try: f = AugmentedNetcdfDataset(filename, 'a', format='NETCDF4') except OSError: f = AugmentedNetcdfDataset(filename, 'w', format='NETCDF4') f.createDimensionIfNotExists('longitude', self.xoutputsize) f.createDimensionIfNotExists('latitude', self.youtputsize) f.createDimensionIfNotExists('time', 1) for iout, outname in enumerate(self.outalbedos_names): for iin, inname in enumerate(self.inalbedos_names): fullname = outname + '-' + inname if typ == 'albedo': outkey = 'AL-' + fullname scale_factor = 1. / 10000 missing_value = np.int16(-32767) dtype = np.int16 outvar = f.createVariableIfNotExists( outkey, dtype, ('time', 'latitude', 'longitude'), complevel=5, fletcher32=True, zlib=True, attributes={ 'units': '', 'offset': 0., 'scale_factor': scale_factor, '_FillValue': missing_value, 'long_name': 'Albedo {key}'.format(key=outkey) }) data = np.array([alldata[:, :, iin, iout]]) missing = np.isnan(data) #######with numpy.warning.filterwarnings(divide='ignore'): ####### numpy.float64(1.0) / 0.0 data[data < -3.0] = -3.0 data[data > 3.0] = 3.0 #print(f'{self.date} :{key}: m, s, l: {missing[0, 0]}') data = data / scale_factor data = data.astype(dtype) data[missing] = missing_value #print(f'DATA {self.date} :{key}: {data[0, 0]}') outvar[:, self.xslice, self.yslice] = data[:, :, :] elif typ == 'albedo-err': outkey = 'AL-' + fullname + '-ERR' scale_factor = 1. / 10000 missing_value = np.int16(-32767) dtype = np.int16 outvar = f.createVariableIfNotExists( outkey, dtype, ('time', 'latitude', 'longitude'), complevel=5, fletcher32=True, zlib=True, attributes={ 'units': '', 'offset': 0., 'scale_factor': scale_factor, '_FillValue': missing_value, 'long_name': 'Albedo cov {key}'.format(key=outkey) }) data = np.array([alldata[:, :, iin, iout]]) missing = np.isnan(data) data[data < -3.0] = -3.0 data[data > 3.0] = 3.0 data = data / scale_factor data = data.astype(dtype) data[missing] = missing_value outvar[:, self.xslice, self.yslice] = data[:, :, :] if outkey == 'AL-NI-BH' or outkey == 'AL-NI-BH-ERR' or outkey == 'AL-VI-BH' or outkey == 'AL-VI-BH-ERR': continue data = alldata[:, :, iin, iout] data_int = ((data * scale) + offset).round().astype(dtype) data_int[np.isnan(data)] = missing_val #~ dataset[self.xslice,self.yslice] = data_int f.close()
def write_albedo_per_band(self, alldata, typ, filename, ichannel, missing=-1, scale=10000., offset=0.): dtype = '<i2' # beware there is rounding of the value below logging.debug('Writing ' + filename + ' for albedo err channel ' + str(ichannel + 1)) ensure_dir(filename) try: f = AugmentedNetcdfDataset(filename, 'a', format='NETCDF4') except OSError: f = AugmentedNetcdfDataset(filename, 'w', format='NETCDF4') f.createDimensionIfNotExists('x', self.xoutputsize) f.createDimensionIfNotExists('y', self.youtputsize) f.createDimensionIfNotExists('time', 1) self._set_date_and_version(f, self.date, __version__, self.model_id) f.setncattr('institution', 'IPMA') for j, bhdh in enumerate(self.inalbedos_names): if typ == 'albedo': outkey = 'AL-SP-' + bhdh scale_factor = 1. / 10000 missing_value = np.int16(-32768) dtype = np.int16 outvar = f.createVariableIfNotExists( outkey, dtype, ('time', 'y', 'x'), complevel=5, fletcher32=True, zlib=True, attributes={ 'units': '', 'offset': 0., '_FillValue': missing_value, 'scale_factor': scale_factor, 'long_name': 'Albedo {key}'.format(key=outkey) }) data = np.array([alldata[:, :, ichannel, j]]) # pas beau missing = np.isnan(data) #######with numpy.warning.filterwarnings(divide='ignore'): ####### numpy.float64(1.0) / 0.0 data[data < -3.0] = -3.0 data[data > 3.0] = 3.0 #print(f'{self.date} :{key}: m, s, l: {missing[0, 0]}') data = data / scale_factor data = data.astype(dtype) data[missing] = missing_value #print(f'DATA {self.date} :{key}: {data[0, 0]}') outvar[0, self.xslice, self.yslice] = data[:, :, :] elif typ == 'albedo-err': outkey = 'AL-SP-' + bhdh + '-ERR' scale_factor = 1. / 10000 missing_value = np.int16(-32768) dtype = np.int16 outvar = f.createVariableIfNotExists( outkey, dtype, ('time', 'y', 'x'), complevel=5, fletcher32=True, zlib=True, attributes={ 'units': '', 'offset': 0., 'scale_factor': scale_factor, '_FillValue': missing_value, 'long_name': 'Albedo cov {key}'.format(key=outkey) }) data = np.array([alldata[:, :, ichannel, j]]) missing = np.isnan(data) data[data < -3.0] = -3.0 data[data > 3.0] = 3.0 data = data / scale_factor data = data.astype(dtype) data[missing] = missing_value outvar[0, self.xslice, self.yslice] = data[:, :, :] #~ data = alldata[:,:,ichannel,j] #~ data_int = ((data * scale) + offset).round().astype(dtype) #~ data_int[np.isnan(data)] = missing #~ outvar[self.xslice,self.yslice] = data_int f.close()