Exemple #1
0
    def add_array_params(self, traj):
        length = len(traj)
        da_data = np.zeros(length, dtype=np.int)
        traj.f_store(only_init=True)
        traj.f_add_result(SharedResult, 'daarrays.a', SharedArray()).create_shared_data(obj=da_data)
        traj.f_add_result(SharedResult, 'daarrays.ca', SharedCArray()).create_shared_data( obj=da_data)
        traj.f_add_result(SharedResult, 'daarrays.ea', SharedEArray()).create_shared_data(shape=(0, 10),
                                                            atom=pt.FloatAtom(),
                                                            expectedrows=length)
        traj.f_add_result(SharedResult, 'daarrays.vla', SharedVLArray()).create_shared_data(atom=pt.FloatAtom())


        traj.f_add_result(SharedResult, 'tabs.t1', SharedTable()).create_shared_data(description={'idx': pt.IntCol(), 'run_name': pt.StringCol(30)},
                        expectedrows=length)

        traj.f_add_result(SharedResult, 'tabs.t2', SharedTable()).create_shared_data(description={'run_name': pt.StringCol(300)})

        traj.f_add_result(SharedResult, 'pandas.df', SharedPandasFrame())

        traj.f_store()

        with StorageContextManager(traj) as cm:
            for run_name in self.traj.f_get_run_names():
                row = traj.t2.row
                row['run_name'] = run_name
                row.append()
            traj.t2.flush()

        traj.t2.create_index('run_name')
def save_h5(filepath, complib, diff):
    """Read series of floating point number from filepath and save it
    as hdf5. The extension is .h5 if diff is False. If diff is True
    the file ends with _diff.h5

    filepath -- path of the file to be read.

    complib -- compression library, passed on to PyTables. Can be
    zlib, lzo, gzip, bzip2.

    diff -- save the original numbers or the first differences.
    """
    data = numpy.loadtxt(filepath, dtype=float)
    if diff:
        new_file_path = '%s_diff_%s.h5' % (filepath, complib)
        to_save = numpy.diff(data)
    else:
        new_file_path = '%s_%s.h5' % (filepath, complib)
        to_save = data
    fltr = tables.Filters(complevel=9, complib=complib)
    new_file = tables.openFile(new_file_path, mode='w')
    carray = new_file.createCArray(new_file.root,
                                   'array',
                                   tables.FloatAtom(),
                                   numpy.shape(to_save),
                                   filters=fltr)
    if diff:
        carray.start = float(data[0])
    carray[:] = to_save
    new_file.close()
    return new_file_path
Exemple #3
0
def write_regional_freqs(outfile, regions, reg_freqs, tree_freqs,
                                                         ninds_dict):
    """
    Writes expected regional allele frequencies over all simulated trees,
    as well as for each individual tree.
    """
    ## Filters for compression
    filters = tables.Filters(complevel=5, complib='blosc')

    with tables.open_file(outfile, 'w') as f:
        ## Write expected allele frequency for each tree for the region
        for region, freqs in tree_freqs.iteritems():
            ninds = ninds_dict[region]

            ## Suppress warning about naming convention
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                ca = f.create_carray(f.root, region, tables.FloatAtom(),
                            shape=tree_freqs[region].shape, filters=filters)
            ca[:] = tree_freqs[region]
            ca.attrs.ninds = ninds

        ## Convert array values to string for writing to character array
        ninds_array = np.asarray([str(ninds_dict[r]) for r in regions])
        reg_freqs = map(str, reg_freqs)
        region_freqs_array = np.asarray(zip(regions, reg_freqs, ninds_array))

        ## Write total expected allele frequency per region
        title = 'region\texpected_alleles\tnum_inds'
        f.create_array(f.root, 'regional_expected_freqs', region_freqs_array,
                                title=title)
Exemple #4
0
def subset_and_writeout(hf_in, fname, thin, maskval, binfn=lambda x:x):
    print 'Subsetting for %s'%fname
    res=5
    hf_out = tb.openFile(os.path.join('5k-covariates',fname.replace('-','_').replace('.','_')+'.hdf5'),'w')
    hf_out.createArray('/','lon',lon[lon_min_i:lon_max_i:res])
    hf_out.createArray('/','lat',lat[lat_min_i:lat_max_i:res])
    
    d = hf_in.root.data[(hf_in.root.data.shape[0]-lat_max_i*thin):\
                        (hf_in.root.data.shape[0]-lat_min_i*thin):\
                        thin, 
                        lon_min_i*thin:\
                        lon_max_i*thin:\
                        thin]
    
    d = map_utils.grid_convert(map_utils.grid_convert(d,'y-x+','x+y+')[::res,::res], 'x+y+','y-x+')
    
    hf_out.createCArray('/','data',atom=tb.FloatAtom(),shape=d.shape,filters=tb.Filters(complevel=1,complib='zlib'))
    hf_out.createCArray('/','mask',atom=tb.BoolAtom(),shape=d.shape,filters=tb.Filters(complevel=1,complib='zlib'))
    hf_out.root.data.attrs.view = 'y-x+'
    
    
    hf_out.root.data[:]=binfn(d)
    hf_out.root.mask[:] = (d==maskval)+clipped_pete_mask
    
    hf_out.close()
def generate_array(array_fname): 
	file = tables.open_file(array_fname, mode='w')
	array = file.create_earray(file.root, 'data', tables.FloatAtom(itemsize=4), (0,))
	for i in xrange(WRITE_CHUNKS): 
		new_array = np.random.rand(ELEMENT_COUNT/WRITE_CHUNKS)
		array.append(new_array)
		del new_array
	file.close() 
Exemple #6
0
 def test99a_nonIntEnum(self):
     """Describing an enumerated column of floats (not implemented)."""
     colors = {'red': 1.0}
     self.assertRaises(NotImplementedError,
                       self._createCol,
                       colors,
                       'red',
                       base=tables.FloatAtom())
Exemple #7
0
    def test03_carray(self):
        """Check dtype accessor for EArray objects."""

        a = self.h5file.create_earray('/',
                                      'array',
                                      atom=tb.FloatAtom(),
                                      shape=[0, 2])
        self.assertEqual(a.dtype, a.atom.dtype)
Exemple #8
0
def init_array(outfile, array_name):
    """ Initialize extendable arrays to store control likelihoods """
    with tables.open_file(outfile, 'w') as f:
        title = 'Likelihood of tree having produced the observed allele freq'
        f.create_earray(f.root,
                        array_name,
                        atom=tables.FloatAtom(),
                        title=title,
                        shape=(0, ))
Exemple #9
0
def main(args):
    pedfile = os.path.expanduser(args.pedfile)
    climb_lik_file = os.path.expanduser(args.climb_lik_file)
    control_lik_file = os.path.expanduser(args.control_lik_file)
    hap_length = args.haplotype_length

    print "Loading pedigree..."
    P = ped.Pedigree(pedfile)

    with tables.open_file(climb_lik_file, 'r') as f:
        shape = f.root.liks.shape
        haplotype_liks = np.zeros(shape)

        for i, tree in enumerate(f.root.trees):

            num_hidden_transmissions = get_num_hidden_transissions(P, tree)
            scale = 1. / (len(tree) - num_hidden_transmissions)
            haplotype_liks[i] = scipy.stats.erlang(2,
                                                   scale=scale).pdf(hap_length)

            if i % 10000 == 0:
                print "Calculating haplotype likelihoods for tree number", i, \
                      "of", len(haplotype_liks)

    norm_hap_liks = np.log2(haplotype_liks / np.sum(haplotype_liks))

    print "Writing combined likelihoods to file..."
    with tables.open_file(control_lik_file, 'a') as hapfile:
        tot_hap_liks = hapfile.create_carray(
            hapfile.root,
            'tot_hap_liks',
            tables.FloatAtom(),
            shape=shape,
            title='Total tree likelihoods, including haplotype length')
        haps = hapfile.create_carray(hapfile.root, 'haplotype_liks',
                tables.FloatAtom(), shape=shape,
                title='Likelihood of observed haplotype length:' + \
                        str(hap_length) + 'Morgans')

        haps[:] = norm_hap_liks
        tot_liks = hapfile.root.tot_liks[:]
        t = norm_hap_liks + tot_liks
        tot_hap_liks[:] = norm_hap_liks + tot_liks
Exemple #10
0
    def __init__(self, name, fname, spoints=64, destination=''):

        dirname = os.path.join(destination, name)
        if not os.path.isdir(dirname):
            os.mkdir(dirname)
        fname = os.path.join(dirname, fname)
        f = tables.open_file(fname, 'w')
        f.create_group('/', 'pos', 'positive spikes')
        f.create_group('/', 'neg', 'negative spikes')

        for sign in ('pos', 'neg'):
            f.create_earray('/' + sign, 'spikes', tables.Float32Atom(),
                            (0, spoints))
            f.create_earray('/' + sign, 'times', tables.FloatAtom(), (0, ))

        f.create_earray('/', 'thr', tables.FloatAtom(), (0, 3))

        self.f = f
        print('Initialized ' + fname)
Exemple #11
0
def init_output(outfile):
    """
    Creates a timestamped output directory and writes header to output file.
    """
    with tables.open_file(outfile, 'w') as f:
        ## Create extendable arrays so we can incrementally write output
        f.create_earray(f.root, 'ancs', atom=tables.IntAtom(), shape=(0, ))
        f.create_earray(f.root, 'liks', atom=tables.FloatAtom(), shape=(0, ))

        ## Trees, which are variable-length, must be added individually
        f.create_vlarray(f.root, 'trees', atom=tables.IntAtom())
        f.create_vlarray(f.root, 'genotypes', atom=tables.IntAtom())
 def setup_h5(self,):
 	filename = self.filenames[0]
 	x = read_wav(filename)
 	spec_x = calc_specgram(x,22050,1024)
 	spec_x = make_4tensor(spec_x)
 	self.data_shape = spec_x.shape[1:]
 	self.x_earray_shape = (0,) + self.data_shape
 	self.chunkshape = (1,) + self.data_shape
 	self.h5_x = self.h5.createEArray('/','x',tables.FloatAtom(itemsize=4),self.x_earray_shape,chunkshape=self.chunkshape,expectedrows=self.num_files)
 	self.h5_filenames = self.h5.createEArray('/','filenames',tables.StringAtom(256),(0,),expectedrows=self.num_files)
 	self.h5_x.append(spec_x)
 	self.h5_filenames.append([filename])
Exemple #13
0
 def create_examples(self, timesteps, nfea, species_names):
     """Creates file to store classification examples.
     """
     path = os.path.join(self.data_dir, "examples.h5")
     h5file = tables.open_file(path, 'w')
     for species in species_names:
         h5file.create_earray('/',
                              species,
                              atom=tables.FloatAtom(),
                              shape=(0, timesteps, nfea),
                              expectedrows=100000)
     h5file.close()
Exemple #14
0
    def create_db(filename, params, total_env_count=None, traj_per_env=None):
        """
        :param filename: file name for database
        :param params: dotdict describing the domain
        :param total_env_count: total number of environments in the dataset (helps to preallocate space)
        :param traj_per_env: number of trajectories per environment
        """
        N = params.grid_n
        M = params.grid_m
        num_state = N * M
        if total_env_count is not None and traj_per_env is not None:
            total_traj_count = total_env_count * traj_per_env
        else:
            total_traj_count = 0

        if os.path.isfile(filename):
            print (filename + " already exitst, opening.")
            return tables.open_file(filename, mode='a')

        db = tables.open_file(filename, mode='w')

        db.create_earray(db.root, 'envs', tables.IntAtom(), shape=(0, N, M), expectedrows=total_env_count)

        db.create_earray(db.root, 'expRs', tables.FloatAtom(), shape=(0, ), expectedrows=total_traj_count)

        db.create_earray(db.root, 'valids', tables.IntAtom(), shape=(0, ), expectedrows=total_traj_count)

        db.create_earray(db.root, 'bs', tables.FloatAtom(), shape=(0, num_state), expectedrows=total_traj_count)

        db.create_earray(db.root, 'steps', tables.IntAtom(),
                         shape=(0, 3),  # state,  action, observation
                         expectedrows=total_traj_count * 10) # rough estimate

        db.create_earray(db.root, 'samples', tables.IntAtom(),
                         shape=(0, 6),  # env_id, goal_state, step_id, traj_length, collisions, failed
                         expectedrows=total_traj_count)
        db.create_earray(db.root, 'qmdpBeliefs', tables.FloatAtom(), shape=(0, num_state,),expectedrows=total_traj_count*10)
        return db
Exemple #15
0
 def create_examples(self, timesteps, nfea):
     """Creates file to store spatiotemporal examples.
     """
     path = os.path.join(self.data_dir, "examples.h5")
     h5file = tables.open_file(path, 'w')
     for side in ['head', 'tail']:
         group = h5file.create_group('/', side)
         h5file.create_earray(group,
                              'entering',
                              atom=tables.FloatAtom(),
                              shape=(0, timesteps, nfea),
                              expectedrows=10000)
         h5file.create_earray(group,
                              'exiting',
                              atom=tables.FloatAtom(),
                              shape=(0, timesteps, nfea),
                              expectedrows=1000)
         h5file.create_earray(group,
                              'ignore',
                              atom=tables.FloatAtom(),
                              shape=(0, timesteps, nfea),
                              expectedrows=10000)
     h5file.close()
Exemple #16
0
    def initialize_database(self, **kargs):
        """
        Initializes the data_file.

        :param kargs: Can pass in 'n_points': Maximum number of data points for an event to be added.
        """

        filters = tb.Filters(complib='blosc', complevel=4)
        shape = (kargs['n_points'],)
        a = tb.FloatAtom()
        if not 'data' in self.root:
            self.createCArray(self.root, 'data', a, shape=shape, title='Data', filters=filters)

        # set the attributes
        self.root.data.attrs.sample_rate = kargs['sample_rate']
Exemple #17
0
def get_pics_input(folder, input_train, filename):
    img_dtype = tables.FloatAtom()
    data_shape = (0, 256, 256, 3)
    hdf5_file = tables.open_file('{}.hdf5'.format(filename), mode='w')
    storage = hdf5_file.create_earray(hdf5_file.root,
                                      'images',
                                      img_dtype,
                                      shape=data_shape)

    for i in range(input_train.shape[0]):
        print(i)
        filename = '{}/{}'.format(folder, input_train[i][0])
        img = cv2.imread(filename) / 255.0
        img = cv2.resize(img, (256, 256), cv2.INTER_LINEAR)
        # face = extract_face(img, required_size=(256,256))
        storage.append(img[None])
    hdf5_file.close()
Exemple #18
0
def save(fname, Y):
    '''
	WARNING:  "spm1d.io.save" is deprecated and will be removed from future versions of spm1d.
	'''
    _check4pytables()
    import tables
    ### create file (existing file will be overwritten)
    fid = tables.openFile(fname, mode='w')
    try:
        ### write data:
        atom = tables.FloatAtom()
        filter0 = tables.Filters(complevel=5, complib='zlib')
        CA = fid.createCArray(fid.root, 'Y', atom, Y.shape, filters=filter0)
        CA[:] = Y
        fid.close()
    except IOError:
        fid.close()
        raise IOError('Error saving file.')
Exemple #19
0
    def bin_write(onebin):
        binid = bid2i[onebin]
        #pdb.set_trace()
        if not "/" + binid in drugbins:
            ncol = tabcache[onebin][0].shape[1]
            drugbins.create_earray(drugbins.root,
                                   binid,
                                   tables.FloatAtom(),
                                   shape=(0, ncol),
                                   chunkshape=(50, ncol))
            #outcome_tab.create_vlarray(outcome_tab.root, binid, tables.Int32Atom())
        #pdb.set_trace()

        if True:  #drugbins.get_node("/" + binid).shape[0] < 50000:
            to_write = np.vstack(tabcache[onebin])
            drugbins.get_node("/" + binid).append(to_write)
            '''
            with open("tmp/e" + savename + binid,'ab') as f:
                np.savetxt(f, to_write)
            for i in outcomecache[onebin]:
                outcome_tab.get_node("/" + binid).append(list(i))
            '''
            #if to_write.shape[1] > 6:
            #    pdb.set_trace()
            if to_write.shape[1] > 6:

                scaler.partial_fit(to_write[:,
                                            6:])  ###patid, drug, [4-d bin] = 6
            with open(tmp + binid, 'a') as f:
                f.write("\n".join(
                    [json.dumps(list(i))
                     for i in outcomecache[onebin]]) + '\n')
        #elif binid not in overflowlist:
        #    print("exceeded 50000 for " + binid)
        #    overflowlist.append(binid)
        tabcacheCt[onebin] = 0
        tabcache[onebin] = []
        outcomecache[onebin] = []
Exemple #20
0
    def save_data_h5(self, filename, notes=''):
        """Save all the data tables in an hdf5 file.

        The structure in the hdf5 file mirrors the structure in the
        model under /data element - but only two levels deep."""
        config.LOGGER.debug('Start saving the data')
        starttime = datetime.now()
        compression_filter = tables.Filters(complevel=9,
                                            complib='zlib',
                                            fletcher32=True)
        h5file = tables.openFile(
            filename,
            mode='w',
            title='Traub Network: timestamp: %s' %
            (config.timestamp.strftime('%Y-%m-%d %H:%M:%S')),
            filters=compression_filter)
        h5file.root._v_attrs.simtime = self.simtime
        h5file.root._v_attrs.simdt = self.simdt
        h5file.root._v_attrs.plotdt = self.plotdt
        h5file.root._v_attrs['notes'] = notes
        h5file.root._v_attrs.timestamp = config.timestamp.strftime(
            '%Y-%m-%d %H:%M:%S')
        # Save simulation configuration data. I am saving it both in
        # data file as well as network file as often the data file is
        # too large and may not be available if the simulation is
        # cancelled midway.
        runconfig = h5file.createGroup(h5file.root, 'runconfig',
                                       'Simulation settings')
        for section in config.runconfig.sections():
            table_data = config.runconfig.items(section)
            if table_data:
                sectiontab = h5file.createTable(runconfig, section, table_data)
        for child_id in self.data.children():
            child = moose.Neutral(child_id)
            if child.className == 'Neutral':
                group = h5file.createGroup(h5file.root, child.name)
                for tab_id in child.children():
                    tab = moose.Table(tab_id)
                    print 'Saving', tab.path, 'of length:', len(tab)
                    if tab.className == 'Table':
                        carray = h5file.createCArray(group,
                                                     tab.name,
                                                     tables.FloatAtom(),
                                                     shape=(len(tab), ))
                        carray[:] = numpy.array(tab)
            elif child.className == 'Table':
                tab = moose.Table(child_id)
                carray = h5file.createCArray(h5file.root,
                                             tab.name,
                                             tables.FloatAtom(),
                                             shape=(len(tab), ))
                carray[:] = numpy.array(tab)

                # NOTE: I found that saving the first value with first
                # forwards diffs saves a lot of space on
                # compression. But will go with direct method now to
                # see if it is worth it.
            else:
                raise Warning(
                    'Element %s of unhandled type %s under data element:  -- not known for saving.'
                    % (tab.name, tab.className))
        h5file.close()
        endtime = datetime.now()
        delta = endtime - starttime
        config.BENCHMARK_LOGGER.info(
            'Saved data to %s: %g s' %
            (filename,
             delta.days * 86400 + delta.seconds + delta.microseconds * 1e-6))
freq_files = sorted([line.strip() for line in open(freq_file_paths)])

with tables.open_file(freq_files[0], 'r') as f:
    nodes = [
        n for n in f.walk_nodes()
        if n._v_name != '/' and n._v_name != 'regional_expected_freqs'
    ]
    region_names = [n._v_name for n in nodes]
    region_sizes = [n._v_attrs['ninds'] for n in nodes]

with tables.open_file(freq_merge_file, 'w') as f:
    node_dict = {}
    for name, size in zip(region_names, region_sizes):
        node_dict[name] = tables.EArray(f.root,
                                        name,
                                        tables.FloatAtom(),
                                        shape=(0, ))
        node_dict[name]._v_attrs['ninds'] = size

    for fname in freq_files:
        print("Merging", fname)
        with tables.open_file(fname, 'r') as g:
            for name in region_names:
                print("Merging", name)
                new_node = g.get_node(g.root, name)
                node_dict[name].append(new_node[:])

# with tables.open_file(climb_merge_file, 'w') as climb_merge_file:
#     ancs = tables.EArray(climb_merge_file.root, 'ancs', tables.IntAtom(), shape=(0,))
#     genotypes = tables.VLArray(climb_merge_file.root, 'genotypes', tables.IntAtom())
#     liks = tables.EArray(climb_merge_file.root, 'liks', tables.IntAtom(), shape=(0,))
Exemple #22
0
# Append several rows with default values
for i in range(10):
    table.row.append()
table.flush()

# create new arrays
atom1 = tables.IntAtom()
shape1 = (2, 10, 10, 1)
filters1 = tables.Filters(complevel=1)
#(2, 10, 10, 3)
array1 = fileh.create_carray(fileh.root,
                             'array1',
                             atom1,
                             shape1,
                             filters=filters1)
atom2 = tables.FloatAtom()
shape2 = (2, 10, 10, 3, 1)
filters2 = tables.Filters(complevel=1)
#(2, 10, 10, 3, 200)
array2 = fileh.create_carray(fileh.root,
                             'array2',
                             atom2,
                             shape2,
                             filters=filters2)

# Add multimensional attributes to the objects
# Integers will go in /table
table.attrs.MD1 = numpy.arange(5, dtype="int8")
table.attrs.MD2 = numpy.arange(10, dtype="int64").reshape(2, 5)

# Complex will go in /array1
Exemple #23
0
climb_lik_file = '/RQusagers/dnelson/project/anc_finder/results/BALSAC/CAID_3M_all_anc_out.csv'

max_trees = 1000

print "Loading climbing likelihoods"
climb_liks = np.genfromtxt(climb_lik_file,
                           skip_header=True,
                           delimiter=',',
                           usecols=[1])[:max_trees]
print "Loading trees"
trees = [line.strip() for line in open(tree_anc_file, 'r')][:max_trees]

climb_outfile = os.path.expanduser('~/temp/CAID_climb_1000.h5')
with tables.open_file(climb_outfile, 'w') as f:
    ## Create extendable arrays so we can incrementally write output
    f.create_earray(f.root, 'liks', atom=tables.FloatAtom(), shape=(0, ))
    f.create_earray(f.root, 'ancs', atom=tables.FloatAtom(), shape=(0, ))

    ## Trees, which are variable-length, must be added individually
    f.create_vlarray(f.root, 'trees', atom=tables.IntAtom())
    f.create_vlarray(f.root, 'genotypes', atom=tables.IntAtom())

incremental_write(climb_liks, trees, climb_outfile)

## Store control likelihoods
# control_outfile = os.path.expanduser('~/temp/CAID_control.h5')
#
# init_array(control_outfile, array_name='control_liks')
# with tables.open_file(control_outfile, 'a') as f:
#     f.root.control_liks.append([np.log2(conv_prob)])
def main():
    # Input variables
    # ##########################################################################
    # Start and end year
    start_year = 1997
    end_year = 2012

    # Aggregations ('A' : annual, 'M' : monthly)
    agg = ['A', 'M']
    # Number of nan values in daily resolution for each aggregation
    nan_max = [25, 2]

    # plot variogram : True or False
    save_plots = True

    # Input and output files
    # ##########################################################################
    # daily precipitation data
    fn_precip = os.path.join(basepath, '03_Data', '1781_2016_daily.h5')
    ##    fn_precip = os.path.join(basepath, '03_Data','02_data_cross_validation',
    ##                             'set_a', '1781_2016_daily.h5')
    # dem germany
    fn_dem = os.path.join(basepath, '02_Import', '03_QGIS_project_germany',
                          'xyz_germany_utm.dat')
    # smoothed elevation of gauges
    fn_sme_gauges = os.path.join(basepath, '05_Smoothed_elevation',
                                 'smoothed_elevation_gauges.h5')
    # smoothed elevation grid
    fn_sme_grid = os.path.join(basepath, '05_Smoothed_elevation',
                               'smoothed_elevation_ger.h5')

    outpath = os.path.join(basepath, '04_Research', '00_Interpolation',
                           '01_results', '01_data', 'set_orig')
    outpath_misc = os.path.join(outpath, 'misc')
    if not os.path.exists(outpath_misc): os.makedirs(outpath_misc)

    # output file name (with EDK interpolated precipitation values)
    fn_month = os.path.join(
        outpath,
        r'monthly_precip_interpolated_{}_{}.h5'.format(start_year, end_year))
    fn_year = os.path.join(
        outpath,
        r'yearly_precip_interpolated_{}_{}.h5'.format(start_year, end_year))
    # Load data
    # ##########################################################################
    print('reading data')
    # dem germany
    dem = np.loadtxt(fn_dem, delimiter=';').astype(int)
    ##    ids =['00001', '00002', '00003', '00004', '00006', '00007', '00008',
    ##       '00009', '00010', '00012', '00013', '00014', '00015', '00016',
    ####       '00017', '00018', '00019', '00020', '00021', '00022', '00023',
    ####       '00024', '00025', '00026', '00027', '00028', '00029', '00030',
    ####       '00031', '00032', '00033', '00034', '00035', '00036', '00037',
    ####       '00038', '00039', '00040', '00041', '00042', '00043', '00044',
    ##       '00045', '00046', '00047', '00048', '00050', '00051', '00052',
    ##       '00053']
    # get data with the SYNOPSE II functions ;-)
    data_daily = z_functions.Get_Daily_Data(
        fn_precip,
        start_year,
        end_year  ##, ids = ids
    )

    # meta data
    ts_iso = data_daily.df.index.map(
        lambda x: datetime.datetime.strftime(x, '%Y-%m-%dT%H:%M:%S'))
    ts_year = data_daily.df.index.year

    # Preprocessing
    # ##########################################################################
    print 'aggregating data'
    for iagg, inan_max in zip(agg, nan_max):
        data_daily.get_filtered_timeseries(iagg, inan_max)

    # Isoformat transformation
    ts_month = data_daily.data['M'].index.map(
        lambda x: datetime.datetime.strftime(x, '%Y-%m-%dT%H:%M:%S'))
    ts_year = data_daily.data['A'].index.map(
        lambda x: datetime.datetime.strftime(x, '%Y-%m-%dT%H:%M:%S'))

    # Create output hdfs
    # ##########################################################################
    print 'create hdf output files'

    # Monthly
    # --------------------------------------------------------------------------
    hf = tables.open_file(fn_month, 'w', filters=tables.Filters(complevel=6))

    # Write kriged data to file
    hf.create_carray(where=hf.root,
                     name='monthly_inter',
                     atom=tables.FloatAtom(dflt=np.nan),
                     shape=(dem.shape[0], ts_month.shape[0]),
                     chunkshape=(dem.shape[0], 1),
                     title=('Interpolated monthly rainfall values (IDW).'))

    # Write metadata to hdf file
    # Timestamps
    hf.create_carray(where=hf.root,
                     name='ts_iso',
                     atom=tables.StringAtom(itemsize=19, dflt=''),
                     shape=ts_month.shape,
                     chunkshape=ts_month.shape,
                     title='Timestamps in isoformat.')
    hf.root.ts_iso[:] = ts_month

    # xyz values
    hf.create_group(where=hf.root,
                    name='coord_utm',
                    title=('UTM-coordinates for a 5 km grid of Germany.'))
    hf.create_carray(where=hf.root.coord_utm,
                     name='x',
                     atom=tables.Int64Atom(dflt=-99),
                     shape=(dem.shape[0], ),
                     chunkshape=(dem.shape[0], ),
                     title='UTM-coordinates in x direction.')
    hf.create_carray(where=hf.root.coord_utm,
                     name='y',
                     atom=tables.Int64Atom(dflt=-99),
                     shape=(dem.shape[0], ),
                     chunkshape=(dem.shape[0], ),
                     title='UTM-coordinates in y direction.')
    hf.root.coord_utm.x[:] = dem[:, 0]
    hf.root.coord_utm.y[:] = dem[:, 1]

    # Close hdf file
    hf.close()

    # Yearly
    # --------------------------------------------------------------------------
    hf = tables.open_file(fn_year, 'w', filters=tables.Filters(complevel=6))

    # Write kriged data to file
    hf.create_carray(where=hf.root,
                     name='yearly_inter',
                     atom=tables.FloatAtom(dflt=np.nan),
                     shape=(dem.shape[0], ts_year.shape[0]),
                     chunkshape=(dem.shape[0], 1),
                     title=('Interpolated yearly rainfall values (IDW).'))

    # Write metadata to hdf file
    # Timestamps
    hf.create_carray(where=hf.root,
                     name='ts_iso',
                     atom=tables.StringAtom(itemsize=19, dflt=''),
                     shape=ts_year.shape,
                     chunkshape=ts_year.shape,
                     title='Timestamps in isoformat.')
    hf.root.ts_iso[:] = ts_year

    # xyz values
    hf.create_group(where=hf.root,
                    name='coord_utm',
                    title=('UTM-coordinates for a 5 km grid of Germany.'))
    hf.create_carray(where=hf.root.coord_utm,
                     name='x',
                     atom=tables.Int64Atom(dflt=-99),
                     shape=(dem.shape[0], ),
                     chunkshape=(dem.shape[0], ),
                     title='UTM-coordinates in x direction.')
    hf.create_carray(where=hf.root.coord_utm,
                     name='y',
                     atom=tables.Int64Atom(dflt=-99),
                     shape=(dem.shape[0], ),
                     chunkshape=(dem.shape[0], ),
                     title='UTM-coordinates in y direction.')
    hf.root.coord_utm.x[:] = dem[:, 0]
    hf.root.coord_utm.y[:] = dem[:, 1]

    # Close hdf file
    hf.close()

    # Preprocessing
    # ##########################################################################
    # ##########################################################################

    # smoothed elevation of gauges
    hdf = tables.open_file(fn_sme_gauges, 'r')
    # smoothing radii
    se_radii = hdf.root.sm_radii[:]
    # smoothing vector
    se_vector = hdf.root.sm_vec[:]
    # gauges for smoothed elevation arrays
    se_ids = hdf.root.id[:]
    bool_ids = np.in1d(se_ids, data_daily.ids)
    # smoothed elevation
    se_sme = hdf.root.sme[:][bool_ids]
    se_ids = hdf.root.id[:][bool_ids]
    hdf.close()

    # Calculate correlations with smoothed elevation
    # ##########################################################################
    print 'find smoothed elevation'
    corr_month = np.full(
        (ts_month.shape[0], se_vector.shape[0], se_radii.shape[0]),
        fill_value=np.nan)
    corr_year = np.full(
        (ts_year.shape[0], se_vector.shape[0], se_radii.shape[0]),
        fill_value=np.nan)

    # monthly precipitation
    # ---------------------
    for ivec, vec in enumerate(se_vector):
        for iradii, radii in enumerate(se_radii):
            for imonth, month in enumerate(ts_month):
                # finite values
                fin_bool = np.isfinite(data_daily.data['M'].values[imonth])
                # calculate correlation coefficient : smoothed elevation and
                # precipitation
                corr_month[imonth, ivec, iradii] = (np.corrcoef(
                    se_sme[fin_bool, ivec, iradii],
                    data_daily.data['M'].values[imonth, fin_bool])[0, 1])
    # mean monthly correlations (mean over months)
    meancorr_month = np.mean(corr_month, axis=0)
    # find smoothing vector and radius for maximum correlation
    idxs_maxmonth = np.where(meancorr_month == np.max(meancorr_month))
    print 'monthly: {} / {} km'.format(se_vector[idxs_maxmonth[0]][0],
                                       se_radii[idxs_maxmonth[1]][0])
    # smoothed elevation gauges
    smgauges_month = se_sme[:, idxs_maxmonth[0], idxs_maxmonth[1]]

    # smoothed grid elevation for monthly precipitation
    hdf = tables.open_file(fn_sme_grid, 'r')
    smgrid_month = hdf.root.sme[:, idxs_maxmonth[0],
                                idxs_maxmonth[1]].flatten()
    hdf.close()

    # save information to file
    with open(os.path.join(outpath_misc, 'monthly_se_info.txt'), 'w') as f:
        f.write('direction: {}\n'.format(se_vector[idxs_maxmonth[0]][0]))
        f.write('radius: {}'.format(se_radii[idxs_maxmonth[1]][0]))

    # yearly precipitation
    # --------------------
    for ivec, vec in enumerate(se_vector):
        for iradii, radii in enumerate(se_radii):
            for iyear, year in enumerate(ts_year):
                # finite values
                fin_bool = np.isfinite(data_daily.data['A'].values[iyear])
                # calculate correlation coefficient : smoothed elevation and
                # precipitation
                corr_year[iyear, ivec, iradii] = (np.corrcoef(
                    se_sme[fin_bool, ivec, iradii],
                    data_daily.data['A'].values[iyear, fin_bool])[0, 1])
    # mean yearly correlations (mean over years)
    meancorr_year = np.mean(corr_year, axis=0)
    # find smoothing vector and radius for maximum correlation
    idxs_maxyear = np.where(meancorr_year == np.max(meancorr_year))
    print 'yearly: {} / {} km'.format(se_vector[idxs_maxyear[0]][0],
                                      se_radii[idxs_maxyear[1]][0])

    # smoothed elevation gauges
    smgauges_year = se_sme[:, idxs_maxyear[0], idxs_maxyear[1]]

    # smoothed grid elevation for yearly precipitation
    hdf = tables.open_file(fn_sme_grid, 'r')
    smgrid_year = hdf.root.sme[:, idxs_maxyear[0], idxs_maxyear[1]].flatten()
    hdf.close()

    # save information to file
    with open(os.path.join(outpath_misc, 'yearly_se_info.txt'), 'w') as f:
        f.write('direction: {}\n'.format(se_vector[idxs_maxyear[0]][0]))
        f.write('radius: {}'.format(se_radii[idxs_maxyear[1]][0]))

    # Variograms
    # ##########################################################################
    print 'estimate variograms'

    # Set variogram
    vario = vf.Variogram()
    # width of distance classes of the variogram (5 km)
    vario.setwidthclasses(5000.)

    # empirical variogram array
    distances = np.arange(2500., 102500., 5000.)
    empvarios_month = np.full((distances.shape[0], ts_month.shape[0]),
                              fill_value=np.nan)

    # bounds of the variogram parameters for the variogram fit
    bounds = {
        'spherical range': [5000., 100000.],
        'spherical sill': [0., 100000.],
        'exponential range': [5000., 100000.],
        'exponential sill': [0., 100000.],
        'gauss range': [5000., 100000.],
        'gauss sill': [0., 100000.]
    }

    # monthly precipitation
    # --------------------------------------------------------------------------
    print 'monthly precip'
    # calculate empirical variograms for each month
    for imonth, month in enumerate(ts_month):
        ##print imonth
        # finite values
        fin_bool = np.isfinite(data_daily.data['M'].values[imonth])
        # empirical variograms
        vario.setxcoord(data_daily.x[fin_bool].values)
        vario.setycoord(data_daily.y[fin_bool].values)
        vario.setdata(data_daily.data['M'].values[imonth][fin_bool])
        vario.calc_expervar()
        # for which distances
        bool_dist = np.in1d(vario.expervar[0], distances)
        empvarios_month[bool_dist, imonth] = vario.expervar[1]

    # fit theoretical variogram to every month of the year
    variomod_month = pd.Series().astype(str)
    for year_month in range(1, 13):
        print year_month
        # varios months belonging to the year_month
        empvarios = empvarios_month[:, data_daily.data['M'].index.month ==
                                    year_month]
        # mean vario
        meanvario = np.nanmean(empvarios, axis=1)
        # fit variogram
        dist_vario = np.vstack((distances, meanvario))
        nb_dec, sillbound = 2, 100000.
        variomod = vf.find_bestvario_expervar(
            dist_vario,
            bounds,
            nb_dec,
            sillbound,
            variomods=['exponential', 'gauss', 'spherical'])

        # calculate theoretical variogram values
        plotdistances = np.arange(0., distances[-1] + 2500., 100.)
        vario = vf.Variogram()
        vario.name = variomod
        vario.get_params_name()
        vario.get_theovartype_name()
        vario.calc_theovar(plotdistances)
        variomod_month[str(year_month)] = variomod

        # Control plot
        if save_plots:
            plt.title('monthly variogram / month: {}'.format(year_month))
            plt.plot(plotdistances, vario.theovar, 'r-', lw=3)
            plt.plot(distances, meanvario, 'kx')
            plt.tight_layout()
            plt.savefig(
                os.path.join(outpath_misc,
                             'monthly_vario_{}.png'.format(year_month)))
            plt.close()

    variomod_month.to_csv(os.path.join(outpath_misc, 'monthly_vario_fit.csv'),
                          sep=';')
    # read the data to have it in a consistent structure compared to other
    # scripts
    variomod_month = pd.read_csv(os.path.join(outpath_misc,
                                              'monthly_vario_fit.csv'),
                                 sep=';',
                                 header=None,
                                 index_col=0)[1]

    # yearly precipitation
    # --------------------------------------------------------------------------
    # Set variogram
    vario = vf.Variogram()
    # width of distance classes of the variogram (5 km)
    vario.setwidthclasses(5000.)

    empvarios_year = np.full((distances.shape[0], ts_year.shape[0]),
                             fill_value=np.nan)
    print 'yearly precip'

    # calculate empirical variograms for each year
    for iyear, year in enumerate(ts_year):
        print iyear
        # finite values
        fin_bool = np.isfinite(data_daily.data['A'].values[iyear])
        # empirical variograms
        vario.setxcoord(data_daily.x[fin_bool].values)
        vario.setycoord(data_daily.y[fin_bool].values)
        vario.setdata(data_daily.data['A'].values[iyear][fin_bool])
        vario.calc_expervar()
        # for which distances
        bool_dist = np.in1d(vario.expervar[0], distances)
        empvarios_year[bool_dist, iyear] = vario.expervar[1]

    # fit theoretical variogram to every month of the year
    variomod_year = pd.Series().astype(str)
    for iyear, year in enumerate(data_daily.data['A'].index.year):
        # varios for every year
        empvarios = empvarios_year[:, iyear]
        # fit variogram
        dist_vario = np.vstack((distances, empvarios))
        nb_dec, sillbound = 2, 100000.
        variomod = vf.find_bestvario_expervar(
            dist_vario,
            bounds,
            nb_dec,
            sillbound,
            variomods=['exponential', 'gauss', 'spherical'])

        # calculate theoretical variogram values
        plotdistances = np.arange(0., distances[-1] + 2500., 100.)
        vario = vf.Variogram()
        vario.name = variomod
        vario.get_params_name()
        vario.get_theovartype_name()
        vario.calc_theovar(plotdistances)
        variomod_year[str(year)] = variomod

        # Control plot
        if save_plots:
            plt.title('yearly variogram / year: {}'.format(year))
            plt.plot(plotdistances, vario.theovar, 'r-', lw=3)
            plt.plot(distances, empvarios, 'kx')
            plt.tight_layout()
            plt.savefig(
                os.path.join(outpath_misc, 'yearly_vario_{}.png'.format(year)))
            plt.close()

    variomod_year.to_csv(os.path.join(outpath_misc, 'yearly_vario_fit.csv'),
                         sep=';')
    # read the data to have it in a consistent structure compared to other
    # scripts
    variomod_year = pd.read_csv(os.path.join(outpath_misc,
                                             'yearly_vario_fit.csv'),
                                sep=';',
                                header=None,
                                index_col=0)[1]

    # Interpolation
    # ##########################################################################
    # ##########################################################################

    print 'monthly interpolation'

    # external drift kriging
    kriging = kf.Kriging()
    # loop over timesteps
    for its, ts in enumerate(ts_month):
        print 'its: {} / {}'.format(its + 1, ts_month.shape[0])

        # Select only finite values
        mask_precip = np.isfinite(data_daily.data['M'].values[its])
        precip_ts = data_daily.data['M'].values[its, mask_precip]
        xy = np.vstack(
            (data_daily.x[mask_precip], data_daily.y[mask_precip])).T

        # get variogram of the month and set the variogram for kriging
        variomod = variomod_month[data_daily.data['M'].index.month[its]]
        vario = vf.Variogram()
        vario.name = variomod
        vario.get_params_name()
        vario.get_theovartype_name()

        # Get external drift of gauges
        ed_gauges = (smgauges_month.flatten())[mask_precip]

        ##        # control plot
        ##            plt.scatter(dem[:, 0], dem[:, 1], c=smgrid_month,
        ##                        lw=0, s=20, marker='o')
        ##            plt.scatter(data_daily.x, data_daily.y, c=smgauges_month,
        ##                        lw=0, s=20, marker='s')
        ##            plt.show()
        ##            plt.close()

        # controls
        kriging.setcontrols(xy)
        kriging.setcontrolvalues(precip_ts)
        kriging.setcontrols_ed(ed_gauges)
        # targets
        kriging.settargets(dem[:, :2])
        kriging.settargets_ed(smgrid_month)

        kriging.krige_values(vario,
                             method='external drift kriging',
                             min_stations=10,
                             positive_weights='no')

        # do not allow for negative values
        target_values = kriging.kriged_values
        # checking for error values (negative values)
        target_values[target_values < 0.] = 0.
        print kriging.kriging_weights[0]
        print target_values[0]
        # control plot
        plt.scatter(dem[:, 0],
                    dem[:, 1],
                    c=target_values,
                    lw=0,
                    s=20,
                    marker='o')
        plt.scatter(xy[:, 0], xy[:, 1], c=precip_ts, lw=0, s=20, marker='s')
        # plt.show()
        plt.savefig('montly_interpolation.png', dpi=600)
        plt.close()

        # Write interpolated data to hdf file
        hf = tables.open_file(fn_month, 'r+')
        hf.root.monthly_inter[:, its] = target_values
        hf.close()

    # ##########################################################################
    print 'yearly interpolation'

    # loop over timesteps
    for its, ts in enumerate(ts_year):
        print 'its: {} / {}'.format(its + 1, ts_year.shape[0])

        # Select only finite values
        mask_precip = np.isfinite(data_daily.data['A'].values[its])
        precip_ts = data_daily.data['A'].values[its, mask_precip]
        xy = np.vstack(
            (data_daily.x[mask_precip], data_daily.y[mask_precip])).T

        # get variogram of the year and set the variogram for kriging
        variomod = variomod_year[data_daily.data['A'].index.year[its]]
        vario = vf.Variogram()
        vario.name = variomod
        vario.get_params_name()
        vario.get_theovartype_name()

        # Get external drift of gauges
        ed_gauges = (smgauges_year.flatten())[mask_precip]

        # external drift kriging
        kriging = kf.Kriging()
        # controls
        kriging.setcontrols(xy)
        kriging.setcontrolvalues(precip_ts)
        kriging.setcontrols_ed(ed_gauges)
        # targets
        kriging.settargets(dem[:, :2])
        kriging.settargets_ed(smgrid_year)

        kriging.krige_values(vario,
                             method='external drift kriging',
                             min_stations=10,
                             positive_weights='no')
        # do not allow for negative values
        target_values = kriging.kriged_values
        # checking for error values (negative values)
        target_values[target_values < 0.] = 0.

        # control plot
        plt.scatter(dem[:, 0],
                    dem[:, 1],
                    c=target_values,
                    lw=0,
                    s=20,
                    marker='o')
        plt.scatter(xy[:, 0], xy[:, 1], c=precip_ts, lw=0, s=20, marker='s')
        plt.savefig('yearly_interpolation.png', dpi=600)
        # plt.show()
        plt.close()

        # Write interpolated data to hdf file
        hf = tables.open_file(fn_year, 'r+')
        hf.root.yearly_inter[:, its] = target_values
        hf.close()
Exemple #25
0
def createCarray(file, path, name, shape, atom=tables.FloatAtom()):
    file.create_carray(path, name, atom, shape)
Exemple #26
0
    def initialize_database(self, **kargs):
        """
        Initializes the EventDatabase.  Adds a group 'events' with
        table 'eventsTable' and matrices 'raw_data', 'levels', and 'level_lengths'.

        :param kargs: Dictionary - includes:
                        -maxEventLength: Maximum number of datapoints for an event to be added.
        """
        if 'maxEventLength' in kargs:
            if kargs['maxEventLength'] > self.max_event_length:
                self.max_event_length = kargs['maxEventLength']
        if 'events' not in self.root:
            self.createGroup(self.root, 'events', 'Events')

        if not 'eventTable' in self.root.events:
            self.createTable(self.root.events, 'eventTable', _Event,
                             'Event parameters')
            self.event_row = None

        filters = tb.Filters(complib='blosc', complevel=4)
        shape = (0, self.max_event_length)
        a = tb.FloatAtom()
        b = tb.IntAtom()

        if not 'raw_data' in self.root.events:
            self.createEArray(self.root.events,
                              'raw_data',
                              a,
                              shape=shape,
                              title="Raw data points",
                              filters=filters)

        if not 'levels' in self.root.events:
            self.createEArray(self.root.events,
                              'levels',
                              a,
                              shape=shape,
                              title="Cusum levels",
                              filters=filters)

        if not 'level_lengths' in self.root.events:
            self.createEArray(self.root.events,
                              'level_lengths',
                              b,
                              shape=shape,
                              title="Lengths of the cusum levels",
                              filters=filters)

        # Create/init the debug group if needed.
        if 'debug' in kargs and kargs['debug']:
            if not 'debug' in self.root:
                self.createGroup(self.root, 'debug', 'Debug')
            debug_shape = (kargs['n_channels'], kargs['n_points'])
            if not 'data' in self.root.debug:
                self.createCArray(self.root.debug,
                                  'data',
                                  a,
                                  shape=debug_shape,
                                  title="Raw data",
                                  filters=filters)

            if not 'baseline' in self.root.debug:
                self.createCArray(self.root.debug,
                                  'baseline',
                                  a,
                                  shape=debug_shape,
                                  title="Baseline data",
                                  filters=filters)

            if not 'threshold_positive' in self.root.debug:
                self.createCArray(self.root.debug,
                                  'threshold_positive',
                                  a,
                                  shape=debug_shape,
                                  title="Raw data",
                                  filters=filters)

            if not 'threshold_negative' in self.root.debug:
                self.createCArray(self.root.debug,
                                  'threshold_negative',
                                  a,
                                  shape=debug_shape,
                                  title="Raw data",
                                  filters=filters)
Exemple #27
0
    def test04_vlarray(self):
        """Check dtype accessor for VLArray objects."""

        a = self.h5file.create_vlarray('/', 'array', tb.FloatAtom())
        self.assertEqual(a.dtype, a.atom.dtype)
Exemple #28
0
    def scan(self):
        # Output data structures
        scan_parameter_values = self.scan_parameters.PlsrDAC
        shape=(len(scan_parameter_values), self.max_data_index)
        atom = tb.FloatAtom()
        data_out = self.raw_data_file.h5_file.create_carray(self.raw_data_file.h5_file.root, name='PlsrDACwaveforms', title='Waveforms from transient PlsrDAC calibration scan', atom=atom, shape=shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
        shape=(self.max_data_index,)
        atom = tb.FloatAtom()
        time_out = self.raw_data_file.h5_file.create_carray(self.raw_data_file.h5_file.root, name='Times', title='Time values', atom=atom, shape=shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
        data_out.attrs.scan_parameter_values = scan_parameter_values
        data_out.attrs.enable_double_columns = self.enable_double_columns
        data_out.attrs.fit_ranges = self.fit_ranges
        data_out.attrs.trigger_level_offset = self.trigger_level_offset
        trigger_levels = []

        progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(scan_parameter_values), term_width=80)
        progress_bar.start()

        for index, scan_parameter_value in enumerate(scan_parameter_values):
            if self.stop_run.is_set():
                break
            # Update PlsrDAC parameter
            self.set_scan_parameters(PlsrDAC=scan_parameter_value)  # set scan parameter
            self.write_global_register('PlsrDAC', scan_parameter_value)  # write to FE
            self.dut['Oscilloscope'].set_acquire_mode('SAMple')
            self.dut['Oscilloscope'].set_acquire_stop_after("RUNSTop")
            self.dut['Oscilloscope'].set_acquire_state("RUN")
            time.sleep(1.5)
            self.dut['Oscilloscope'].force_trigger()
            self.dut['Oscilloscope'].set_acquire_state("STOP")
            data = self.dut['Oscilloscope']._intf._resource.query_binary_values("DATA:SOURCE CH%d;:CURVe?" % self.channel, datatype='h', is_big_endian=True)
            self.preamble = self.dut['Oscilloscope'].get_parameters(channel=self.channel)
            times, voltages, time_unit, voltage_unit = interpret_oscilloscope_data(self.preamble, data)
            if len(data):
                trigger_level = (np.mean(voltages) - self.trigger_level_offset * 1e-3) / 2.0 + self.trigger_level_offset * 1e-3
            else:
                trigger_level = trigger_levels[-1]
            self.dut['Oscilloscope'].set_trigger_level(trigger_level)
            self.dut['Oscilloscope'].set_vertical_scale(min(self.vertical_scale, (np.mean(voltages) + 0.2 * np.mean(voltages)) / 10), channel=self.channel)
            #self.dut['Oscilloscope'].set_vertical_scale(0.05, channel=self.channel)

            if self.show_debug_plots:
                plt.clf()
                plt.grid()
                plt.plot(times * 1e9, voltages * 1e3, label='PlsrDAC Pulse')
                plt.axhline(y=trigger_level * 1e3, linewidth=2, linestyle="--", color='r', label='Trigger (%0.1f mV)' % (trigger_level * 1e3))
                plt.xlabel('Time [ns]')
                plt.ylabel('Voltage [mV]')
                plt.legend(loc=0)
                plt.show()

            # Setup data aquisition and start scan loop
            self.dut['Oscilloscope'].set_acquire_mode('AVErage')  # average to get rid of noise and keeping high band width
            self.dut['Oscilloscope'].set_acquire_stop_after("SEQuence")
            self.dut['Oscilloscope'].set_acquire_state("RUN")
            time.sleep(1.5)
            super(PlsrDacTransientCalibrationAdvanced, self).scan()  # analog scan loop
            self.dut['Oscilloscope'].set_acquire_state("STOP")
            # get final number of data points
#             if not self.dut['Oscilloscope'].get_number_points():
#                 raise RuntimeError()
            data = self.dut['Oscilloscope']._intf._resource.query_binary_values("DATA:SOURCE CH%d;:CURVe?" % self.channel, datatype='h', is_big_endian=True)
            self.preamble = self.dut['Oscilloscope'].get_parameters(channel=self.channel)
            times, voltages, time_unit, voltage_unit = interpret_oscilloscope_data(self.preamble, data)
            data_out[index, :] = voltages[:]
            trigger_level = float(self.dut['Oscilloscope'].get_trigger_level())
            trigger_levels.append(trigger_level)
            progress_bar.update(index)

            if self.show_debug_plots:
                plt.clf()
                plt.ylim(0, 1500)
                plt.grid()
                plt.plot(times * 1e9, voltages * 1e3, label='PlsrDAC Pulse')
                plt.axhline(y=trigger_level * 1e3, linewidth=2, linestyle="--", color='r', label='Trigger (%0.1f mV)' % (trigger_level * 1e3))
                plt.xlabel('Time [ns]')
                plt.ylabel('Voltage [mV]')
                plt.legend(loc=0)
                plt.show()

            self.dut['Oscilloscope'].set_vertical_scale(self.vertical_scale, channel=self.channel)

        time_out[:] = times
        data_out.attrs.trigger_levels = trigger_levels
        progress_bar.finish()
Exemple #29
0
    def scan(self):
        # Output data structures
        scan_parameter_values = self.scan_parameters.PlsrDAC
        shape = (len(scan_parameter_values), self.data_index)
        atom = tb.FloatAtom()
        data_out = self.raw_data_file.h5_file.create_carray(self.raw_data_file.h5_file.root, name='PlsrDACwaveforms', title='Waveforms from transient PlsrDAC calibration scan', atom=atom, shape=shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
        shape = (self.data_index,)
        atom = tb.FloatAtom()
        time_out = self.raw_data_file.h5_file.create_carray(self.raw_data_file.h5_file.root, name='Times', title='Time values', atom=atom, shape=shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
        data_out.attrs.scan_parameter_values = scan_parameter_values
        data_out.attrs.enable_double_columns = self.enable_double_columns
        data_out.attrs.fit_ranges = self.fit_ranges
        data_out.attrs.trigger_level_offset = self.trigger_level_offset
        trigger_levels = []

        progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(scan_parameter_values), term_width=80)
        progress_bar.start()

        for index, scan_parameter_value in enumerate(scan_parameter_values):
            if self.stop_run.is_set():
                break
            # Update PlsrDAC parameter
            self.set_scan_parameters(PlsrDAC=scan_parameter_value)  # set scan parameter
            self.write_global_register('PlsrDAC', scan_parameter_value)  # write to FE
            self.dut['Oscilloscope'].set_trigger_mode("AUTO")
            self.dut['Oscilloscope'].set_trigger_type("EDGe")
            self.dut['Oscilloscope'].set_acquire_mode('SAMple')
            self.dut['Oscilloscope'].set_acquire_stop_after("RUNSTop")
            self.dut['Oscilloscope'].set_acquire_state("RUN")
            time.sleep(1.5)
            self.dut['Oscilloscope'].force_trigger()
            self.dut['Oscilloscope'].set_acquire_state("STOP")
            data = self.dut['Oscilloscope']._intf._resource.query_binary_values("DATA:SOURCE CH%d;:CURVe?" % self.channel, datatype='h', is_big_endian=True)
            self.preamble = self.dut['Oscilloscope'].get_parameters(channel=self.channel)
            times, voltages, time_unit, voltage_unit = interpret_oscilloscope_data(self.preamble, data)
            if len(data):
                trigger_level = (np.mean(voltages) - self.trigger_level_offset * 1e-3) / 2.0 + self.trigger_level_offset * 1e-3
            else:
                trigger_level = trigger_levels[-1]
            self.dut['Oscilloscope'].set_trigger_level(trigger_level)
            self.dut['Oscilloscope'].set_vertical_scale(min(self.vertical_scale, (np.mean(voltages) + 0.2 * np.mean(voltages)) / 10), channel=self.channel)
#             self.dut['Oscilloscope'].set_vertical_scale(0.05, channel=self.channel)

            # Setup data aquisition and start scan loop
            self.dut['Oscilloscope'].set_trigger_mode("NORMal")
            self.dut['Oscilloscope'].set_trigger_type("PULSe")
            self.dut['Oscilloscope'].set_acquire_mode('AVErage')  # average to get rid of noise and keeping high band width
            self.dut['Oscilloscope'].set_acquire_stop_after("SEQuence")
            self.dut['Oscilloscope'].set_acquire_state("RUN")
            time.sleep(1.5)
            super(PlsrDacTransientCalibration, self).scan()  # analog scan loop
            self.dut['Oscilloscope'].set_acquire_state("STOP")
            if self.dut['Oscilloscope'].get_number_waveforms() == 0:
                logging.warning("No acquisition taking place.")
            data = self.dut['Oscilloscope']._intf._resource.query_binary_values("DATA:SOURCE CH%d;:CURVe?" % self.channel, datatype='h', is_big_endian=True)
            self.preamble = self.dut['Oscilloscope'].get_parameters(channel=self.channel)
            times, voltages, time_unit, voltage_unit = interpret_oscilloscope_data(self.preamble, data)
            data_out[index, :] = voltages[:]
            trigger_level = float(self.dut['Oscilloscope'].get_trigger_level())
            trigger_levels.append(trigger_level)
            progress_bar.update(index)
            self.dut['Oscilloscope'].set_vertical_scale(self.vertical_scale, channel=self.channel)

        time_out[:] = times
        data_out.attrs.trigger_levels = trigger_levels
        progress_bar.finish()
Exemple #30
0
for cmph in cmph_covariates:
    print 'Subsetting for %s'%cmph
    lon_,lat_,data = map_utils.CRU_extract('.','%s'%cmph, zip=False)
    lon_.sort()
    lat_.sort()
    # data = map_utils.interp_geodata(lon_, lat_, data, lon[lon_min_i:lon_max_i], lat[lon_min_i:lon_max_i])
    data = map_utils.grid_convert(basemap.interp(map_utils.grid_convert(data,'y-x+','y+x+'), lon_, lat_, *np.meshgrid(lon[lon_min_i:lon_max_i],lat[lat_min_i:lat_max_i])),'y+x+','x+y+')
    for res in [5]:
        hf_out = tb.openFile(os.path.join('%ik-covariates'%res,cmph.lower()+'.hdf5'),'w')
        hf_out.createArray('/','lon',lon[lon_min_i:lon_max_i][::res])
        hf_out.createArray('/','lat',lat[lat_min_i:lat_max_i][::res])
        
        d = map_utils.grid_convert(data[::res,::res], 'x+y+','y-x+')
        
        hf_out.createCArray('/','data',atom=tb.FloatAtom(),shape=d.shape,filters=tb.Filters(complevel=1,complib='zlib'))
        hf_out.createCArray('/','mask',atom=tb.BoolAtom(),shape=d.shape,filters=tb.Filters(complevel=1,complib='zlib'))
        hf_out.root.data.attrs.view = 'y-x+'
        
        hf_out.root.data[:]=d
        hf_out.root.mask[:] = clipped_pete_mask
        
        hf_out.close()

glob = tb.openFile('Globcover.hdf5')
for c in glob_channels:
    subset_and_writeout(glob, 'globcover-channel-%i'%c, 3, glob_missing, lambda x:x==c)
glob.close()

# Reconcile the masks
print 'Finding the conservative mask'