Exemple #1
0
 def __call__(self, parser, args, values, option_string = None):
     """
     Processes argument values, automatically selected address
     if applicable
     """
     from h5py import File as h5
     if len(values) == 1:
         in_path = values[0]
         with h5(in_path) as in_h5:
             if len(in_h5.keys()) == 1:
                 values = [in_path, in_h5.keys()[0]]
             else:
                 raise argparse.ArgumentTypeError(
                   "Unable to determine target address for " +
                   "'{0}', ".format(self.dest) +
                   "in file '{0}', ".format(in_path) +
                   "specify manually with second argument")
     elif len(values) == 2:
         in_path, in_address = values
         with h5(in_path) as in_h5:
             if not in_address in in_h5:
                 raise argparse.ArgumentTypeError(
                   "Target address '{0}' ".format(in_address) +
                   "not found in file '{0}'. ".format(in_path))
     else:
         raise argparse.ArgumentTypeError(
           "Expected {0} ".format(nargs) +
           "arguments for '{0}', ".format(self.dest) +
           "recieved {0}".format(len(values)))
     setattr(args, self.dest, values)
Exemple #2
0
    def __init__(self, log, coord, output, preexisting_slice, incoming_slice,
                 attrs = {}, **kwargs):
        """
        """
        from h5py import File as h5
        from collections import OrderedDict

        super(Association_Block_Accumulator, self).__init__(**kwargs)
        self.log_path,   self.log_address   = log
        self.coord_path, self.coord_address = coord
        self.out_path                       = output

        with h5(self.log_path) as log_h5, h5(self.coord_path) as coord_h5:
            # Need to support omitting the beginning and end of trajectories
            coord_shape       = coord_h5[self.coord_address].shape
            self.n_molecule_1 = coord_shape[1]
            self.n_molecule_2 = coord_shape[2]
            self.volume       = np.mean(log_h5[self.log_address]["volume"])
        self.conc_molecule_1  = concentration(self.n_molecule_1, self.volume)
        self.conc_molecule_2  = concentration(self.n_molecule_2, self.volume)
        self.count            = np.zeros((coord_shape[0], self.n_molecule_1),
                                  np.int8)

        self.preexisting_slice = preexisting_slice
        self.incoming_slice    = incoming_slice

        # Prepare datasets
        self.datasets  = OrderedDict(
          pair_association = dict(
            attrs   = {},
            data    = np.zeros((self.n_molecule_1, self.n_molecule_2),
              dtype = [("pbound",       "f4"), ("pbound se",       "f4"),
                       ("mean fpt on",  "f4"), ("mean fpt on se",  "f4"),
                       ("mean fpt off", "f4"), ("mean fpt off se", "f4"),
                       ("kon",          "f4"), ("kon se",          "f4"),
                       ("koff",         "f4"), ("koff se",         "f4")])),
          overall_association = dict(
            attrs   = {},
            data    = np.zeros(1,
              dtype = [("pbound",       "f4"), ("pbound se",       "f4"),
                       ("mean fpt on",  "f4"), ("mean fpt on se",  "f4"),
                       ("mean fpt off", "f4"), ("mean fpt off se", "f4"),
                       ("kon",          "f4"), ("kon se",          "f4"),
                       ("koff",         "f4"), ("koff se",         "f4")])),
         events = dict(
          attrs   = {},
          data    = []))

        super(Association_Block_Accumulator, self).__init__(**kwargs)
Exemple #3
0
    def get_preexisting_slice(self, force = False, **kwargs):
        """
        Determines slice to which preexisting data in outputs
        corresponds

        **Arguments**
            :*force*: Even if preexisting data is present, disregard
        """
        from warnings import warn
        from h5py import File as h5

        out_path, out_address = self.outputs[1]
        with h5(out_path) as out_h5:
            if out_address in out_h5:
                if force:
                    del out_h5[out_address]
                    self.preexisting_slice = None
                else:
                    blocks = np.array(out_h5[out_address])
                    attrs  = dict(out_h5[out_address].attrs)
                    if (blocks["stop"][-1] - blocks["start"][-1] !=
                      attrs["block_size"]):
                        self.preexisting_slice = slice(blocks["start"][0],
                          blocks["stop"][-2], 1)
                    else:
                        self.preexisting_slice = slice(blocks["start"][0],
                          blocks["stop"][-1], 1)
            else:
                self.preexisting_slice = None
Exemple #4
0
def fit_ne_distribution(fname_inp, nbin=50):
    """
    - leer la coleccion de valores del nro de interacc esenciales
    - construir distribucion de dichos valores y fitear con gaussiana
    """
    from scipy.optimize import curve_fit
    # leemos la data de la simulacion anterior (la coleccion de valores
    # del nro de interacciones esenciales)
    with h5(fname_inp,'r') as f:
        ne = f['ne'][3:]  # obviamos sus primeros tres valores para 
                          # olvidarnos un poco de la configuracion original
                          # NOTE: la config original corresponde al `f['ne'][0]`.

    # hallamos su distribucion frecuentista
    # NOTE:
    #   - `hx_` son los bordes del dominio bineado
    #   - density=True es para q devuelva una distribucion con area=1.
    hc, hx_ = np.histogram(ne, bins=nbin, density=True) 
    hx = 0.5*(hx_[:-1] + hx_[1:])  # `hx` son los valores centrados de c/bin
    hx_mean = (hx*hc).sum()/hc.sum()

    # hacemos el ajuste de la distribucion frecuentista para hallar
    # su densidad de probabilidad
    popt, pcov = curve_fit(
        f = gauss, 
        xdata = hx,
        ydata = hc,
        p0 = [1., hx_mean, 30.], # semillas
    )
    return popt, pcov
Exemple #5
0
    def __init__(self, preexisting_slice, incoming_slice, grid, bandwidth,
        frames_per_block, outputs, attrs = {}, **kwargs):
        """
        Initializes accumulator

        **Arguments:**
            :*preexisting_slice*: Slice containing frame indices whose
                                  results were included in *outputs*
                                  before this invocation of program
            :*incoming_slice*:    Slice containting frame indices whose
                                  results are to be added to *outputs*
                                  during this invocation of program
            :*grid*:              Grid on which to calculate pdf
            :*bandwidth*:         Kernel bandwidth
            :*frames_per_block*:  Number of frames present in each
                                  incoming block
            :*outputs*:           Path to h5 file and address within h5
                                  file for each output dataset; list of
                                  tuples
            :*attrs*:             Attributes to add to dataset
        """
        from h5py import File as h5

        # Input
        self.preexisting_slice = preexisting_slice
        self.incoming_slice    = incoming_slice
        self.received_slices   = []
        
        # Action
        self.grid = np.squeeze(np.array(eval(grid)))
        self.frames_per_block = frames_per_block

        # Output
        self.outputs = outputs

        # Prepare dataset
        pdist = np.zeros(self.grid.size,
          dtype = [("center",      "f4"), ("kde",         "f8"),
                   ("probability", "f8"), ("free energy", "f4"),
                   ("pmf",         "f4")])
        pdist["center"]       = self.grid
        attrs["center units"] = "A"
        attrs["bandwidth"]    = bandwidth

        out_path, out_address = self.outputs[1]
        with h5(out_path) as out_h5:
            if out_address in out_h5:
                blocks = np.array(out_h5[out_address])
                blocks = list(blocks[blocks["stop"]
                           <= self.preexisting_slice.stop])
                for block in blocks:
                    pdist["kde"] += block["kde"]
            else:
                blocks = []
        self.datasets = {
          self.outputs[0]: dict(data = pdist,  attrs = attrs),
          self.outputs[1]: dict(data = blocks, attrs = dict(
            block_size = self.frames_per_block))}

        super(KDE_Block_Accumulator, self).__init__(**kwargs)
Exemple #6
0
    def load(self, data_name, **kws):
        """
        para leer la data de histogramas Auger
        """
        f5 = h5(self.fname_inp, 'r')
        ch_Eds = (3, 4, 5)
        # get the global-average histogram
        nEd = 50
        typic = np.zeros(nEd, dtype=np.float32)
        for i in range(nEd):
            Ed = i * 20. + 10.
            typic[i] = f5['mean/corr_%04dMeV' % Ed].value

        t_utc, CRs = read_hsts_data(self.fname_inp, typic, ch_Eds)
        print " -------> variables leidas!"

        VARS = {}  #[]
        VARS['CRs.' + data_name] = {
            'value': CRs,
            'lims': [-1.0, 1.0],
            'label': 'Auger (muon band) [%]'
        }

        return {
            't_utc': t_utc,
            'VARS': VARS,
        }
Exemple #7
0
 def __str__(self):
     """
     Prepares string representation of output data
     """
     if hasattr(self, "datasets"):
         stateprob_ds = np.array(self.datasets.values()[0]["data"])
         titles = ""
     else:
         from h5py import File as h5
         out_path, out_address = self.outputs[0]
         with h5(out_path) as out_h5:
             if out_address in out_h5:
                 stateprob_ds = np.array(out_h5[out_address])
                 titles = "Dataset present at {0}[{1}]\n".format(
                   out_path, out_address)
             else:
                 return "Dataset {0}[{1}] does not exist".format(
                   out_path, out_address)
     values = ""
     for i, field in enumerate(stateprob_ds.dtype.names):
         if i % 2 == 0:
             titles += "{0:>12} {1:<10}  ".format(field, "(se)")
             values += "{0:>12.4f} ".format(float(stateprob_ds[field]))
         else:
             values += "{0:<12}".format("({0:<.4f})".format(
                         float(stateprob_ds[field])))
     return "{0}\n{1}".format(titles, values)
Exemple #8
0
    def get_final_slice(self, debug = False, **kwargs):
        """
        """
        from warnings import warn
        from h5py import File as h5

        if debug: print(self.inputs)

        in_starts   = []
        in_stops    = []
        for in_path, in_address in self.inputs:
            with h5(in_path) as in_h5:
                attrs = dict(in_h5[in_address].attrs)
                if "slice" in attrs:
                    in_starts += [eval(attrs["slice"]).start]
                    in_stops  += [eval(attrs["slice"]).stop]
                else:
#                    warn("'slice' not found in dataset " +
#                      "'{0}:{1}' attributes; ".format(in_path, in_address) +
#                      "assuming first dimension is time")
                    in_starts += [0]
                    in_stops  += [in_h5[in_address].shape[0]]
        in_start = np.max(in_starts)
        in_stop  = np.min(in_stops)
        self.final_slice = slice(in_start, in_stop, 1)
Exemple #9
0
    def __call__(self, **kwargs):
        """
        Runs analysis
        """
        from h5py import File as h5

        if self.incoming_slice is None:
            return

        # Load input data
        with h5(self.assign_path) as assign_h5:
            in_assign    = np.array(assign_h5[self.assign_address
                             + "assignment"])
            in_assign_at = dict(assign_h5[self.assign_address
                             + "assignment"].attrs)
        self.orig_states = eval(in_assign_at.pop("states").replace("inf",
                             "np.inf"))

        if  (self.orig_states[0][0] == "unbound"
        and  self.orig_states[1][0] == "bound"):
            out_assign  = np.array(np.sum(in_assign == 2, axis = 2), np.int8)+1
            self.states = [("unbound", self.orig_states[0][1])] + \
              [("{0} bound".format(i), self.orig_states[1][1]) 
              for i in range(1, np.max(out_assign + 1))]
        else:
            raise Exception("Unable to understand state descriptions, " +
              "necessary logic not yet implemented")
        out_assign_at = dict(
          slice  = in_assign_at.pop("slice"),
          states = str(self.states))
        self.datasets = {self.outputs[0]: dict(data = out_assign,
                          attrs = out_assign_at)}
Exemple #10
0
def read_hsts_data(fname, typic, ch_Eds):
    """
    code adapted from ...ch_Eds_smoo2.py
    """
    f = h5(fname, 'r')

    # initial date
    datestr = f['date_ini'].value
    yyyy, mm, dd = map(int, datestr.split('-'))
    INI_DATE = datetime(yyyy, mm, dd)

    # final date
    datestr = f['date_end'].value
    yyyy, mm, dd = map(int, datestr.split('-'))
    END_DATE = datetime(yyyy, mm, dd)

    date = INI_DATE
    tt, rr = [], []
    ntot, nt = 0, 0
    while date < END_DATE:
        yyyy, mm, dd = date.year, date.month, date.day
        path = '%04d/%02d/%02d' % (yyyy, mm, dd)
        try:
            dummy = f[path]  # test if this exists!
        except:
            date += timedelta(days=1)  # next day...
            continue

        ntanks = f['%s/tanks' % path][...]
        cc = ntanks > 150.
        ncc = cc.nonzero()[0].size

        if ncc > 1:  #mas de un dato tiene >150 tanques
            time = f['%s/t_utc' % path][...]  # utc secs
            cts, typ = np.zeros(96, dtype=np.float64), 0.0
            for i in ch_Eds:
                Ed = i * 20. + 10.
                cts += f['%s/cts_temp-corr_%04dMeV' % (path, Ed)][...]
                typ += typic[i]  # escalar

            cts_norm = cts / typ
            #aux  = np.nanmean(cts_norm[cc])
            tt += [time[cc]]
            rr += [cts_norm[cc]]
            ntot += 1  # files read ok
            nt += ncc  # total nmbr ok elements

        date += timedelta(days=1)  # next day...

    #--- converting tt, rr to 1D-numpy.arrays
    t, r = nans(nt), nans(nt)
    ini, end = 0, 0
    for i in range(ntot):
        ni = len(tt[i])
        t[ini:ini + ni] = tt[i]
        r[ini:ini + ni] = rr[i]
        ini += ni

    f.close()
    return t, r
Exemple #11
0
    def __init__(self, coord, states, output, **kwargs):
        """
        Initializes generator

        **Arguments:**
            :*coord*:  Coordinates used to make assignments
            :*output*: Tuple including path to h5 file and address
                       within h5 file
            :*force*:  Run analysis even if no new data is present
        """
        from h5py import File as h5
        from MDclt import parse_states

        # Input
        # In the long term, it is probably appropriate to have some
        #   settings to control how multiple molecules are handled
        # Also necessary to handle multiple coordinate dimensions
        #   appropriately
        self.coord_path, self.coord_address = coord
        self.inputs  = [(self.coord_path, self.coord_address)]
        with h5(self.coord_path) as coord_h5:
            coord_shape           = coord_h5[self.coord_address].shape
        self.i = 0
        if len(coord_shape) > 1:
            self.n_molecule_1 = coord_shape[1]
        else:
            self.n_molecule_1 = 1
        self.j = 0
        if len(coord_shape) > 2:
            self.n_molecule_2 = coord_shape[2]
        else:
            self.n_molecule_2 = 1

        # Action
        self.frames_per_block = coord_shape[0] # Use whole trajectory
        self.states           = parse_states(states)

        # Output
        output[1]    = output[1].rstrip("assignment")
        self.outputs = [(output[0], os.path.normpath(output[1] + "//assignment"),
          coord_shape)]

        super(Assign_Block_Generator, self).__init__(inputs = self.inputs,
          outputs = self.outputs, **kwargs)

        # Does not yet support extension, must recalculate entire
        #   dataset
        if self.preexisting_slice != self.final_slice:
            self.incoming_slice = self.final_slice

        if self.incoming_slice is not None:
            self.current_start = self.incoming_slice.start
            self.current_stop  = self.incoming_slice.start + \
                                 self.frames_per_block
            self.final_stop    = self.final_slice.stop
Exemple #12
0
    def get_preexisting_slice(self, force = False, **kwargs):
        """
        Determines slice to which preexisting data in outputs
        corresponds

        NOTE: Should not initialize empty dataset here, as
           the dtype may be unknown until the analysis is
           started or completed

        **Arguments**
            :*force*: Even if preexisting data is present, disregard
        """
        from warnings import warn
        from h5py import File as h5

        out_starts = []
        out_stops  = []
        for output in self.outputs:
            if len(output) == 2:
                out_path, out_address = output
            elif len(output) == 3:
                out_path, out_address, out_shape = output
            with h5(out_path) as out_h5:
                if out_address in out_h5:
                    if force:
                        del out_h5[out_address]
                        out_starts += [np.nan]
                        out_stops  += [np.nan]
                    else:
                        attrs = dict(out_h5[out_address].attrs)
                        if "slice" in attrs:
                            out_starts += [eval(attrs["slice"]).start]
                            out_stops  += [eval(attrs["slice"]).stop]
                        else:
#                            warn("'slice' not found in dataset " +
#                              "'{0}:{1}' attributes; ".format(out_path,
#                              out_address) +
#                              "assuming first dimension is time")
                            out_starts += [0]
                            out_stops  += [out_h5[out_address].shape[0]]
                else:
                    out_starts += [np.nan]
                    out_stops  += [np.nan]
        if force:
            self.preexisting_slice = None
        elif len(set(out_starts)) != 1 or len(set(out_stops)) != 1:
            raise Exception("Preexising output datasets correspond to " +
              "different slices, use '--force' to overwrite all")
        elif np.isnan(out_starts[0]) or np.isnan(out_stops[0]):
            self.preexisting_slice = None
        else:
            self.preexisting_slice = slice(out_starts[0], out_stops[0], 1)
Exemple #13
0
    def next(self):
        """
        Prepares and returns next Block of analysis
        """
        from h5py import File as h5

        if (self.incoming_slice is None
        or  self.current_start >= self.final_stop):
            raise StopIteration()
        else:
            # Determine slice indexes
            if self.n_molecule_1 >= 1 and self.n_molecule_2 >= 1:
                block_slice = (slice(self.current_start, self.current_stop, 1),
                                self.i, self.j)
                attrs       = {"slice": str(block_slice[0])}
            else:
                block_slice = slice(self.current_start, self.current_stop, 1)
                attrs       = {"slice": str(block_slice)}

            # Load primary data from these indexes
            #   NOTE: It is necessary to round to the scaleoffset in
            #   order to ensure that the same results are obtained for
            #   fresh and extended datasets
            with h5(self.coord_path) as coord_h5:
                scaleoffset = coord_h5[self.coord_address].scaleoffset
                print(scaleoffset, self.coord_address, block_slice)
                block_coord = np.array(coord_h5[self.coord_address]
                                [block_slice])
                if scaleoffset is not None:
                    scaleoffset = int(str(scaleoffset)[0])
                    block_coord = np.round(block_coord, scaleoffset)

            # Iterate
            self.j     += 1
            if self.j  == self.n_molecule_2:
                self.i += 1
                self.j  = 0
            if self.i == self.n_molecule_1:
                self.current_start += self.frames_per_block
                self.current_stop   = min(self.current_start + 
                                        self.frames_per_block, self.final_stop)
                self.i  = 0
                self.j  = 0

            # Return new block
            return Assign_Block(
              coord   = block_coord,
              states  = self.states,
              slc     = block_slice,
              outputs = self.outputs,
              attrs   = attrs)
Exemple #14
0
    def __init__(self,
                 log, coord,
                 bound, unbound,
                 output, **kwargs):
        """
        Initializes generator

        **Arguments:**
            :*log*:    Simulation log
            :*coord*:  Coordinates used to generate pmf
            :*output*: List including path to h5 file and address
                       within h5 file
            :*force*:  Run analysis even if no new data is present
        """
        import warnings
        from h5py import File as h5

        # Input
        self.log_path,   self.log_address   = log
        self.coord_path, self.coord_address = coord

        # Action
        self.bound   = bound
        self.unbound = unbound
        with h5(self.log_path) as log_h5, h5(self.coord_path) as coord_h5:
            coord_shape      = coord_h5[self.coord_address].shape
            self.volume      = np.mean(log_h5[self.log_address]["volume"])
        self.n_molecule_1    = coord_shape[1]
        self.n_molecule_2    = coord_shape[2]
        self.i               = 0
        self.j               = 0
        self.conc_single     = concentration(1,                 self.volume)
        self.conc_molecule_1 = concentration(self.n_molecule_1, self.volume)
        self.conc_molecule_2 = concentration(self.n_molecule_2, self.volume)

        super(Association_Block_Generator, self).__init__(
          inputs = [log, coord], output = [output, "overall_association"],
          **kwargs)
Exemple #15
0
    def get_dataset_format(self, **kwargs):
        """
        Determines format of dataset
        """
        from h5py import File as h5

        out_path, out_address = self.outputs[0]

        with h5(out_path) as out_h5:
            if out_address in out_h5:
                # If dataset already exists, extract current dtype
                self.dtype    = out_h5[out_address].dtype
                self.new_keys = list(self.dtype.names)
                self.raw_keys = []
                for key in self.new_keys:
                    self.raw_keys += [r for r, n, _ in self.fields if n == key]
                self.attrs    = dict(out_h5[out_address].attrs)
            else:
                # Otherwise, determine fields present in infile
                raw_keys = []
                breaking = False
                with open(self.infiles[0], "r") as infile:
                    raw_text = [line.strip() for line in infile.readlines()]
                for i in xrange(len(raw_text)):
                    if breaking:  break
                    if raw_text[i].startswith("NSTEP"):
                        while True:
                            if raw_text[i].startswith("----------"):
                                breaking = True
                                break
                            for j, field in enumerate(
                              raw_text[i].split("=")[:-1]):
                                if j == 0:
                                    raw_keys += [field.strip()]
                                else:
                                    raw_keys += [" ".join(field.split()[1:])]
                            i += 1

                # Determine appropriate dtype of new data
                self.raw_keys = ["TIME(PS)"]
                self.new_keys = ["time"]
                self.dtype    = [("time", "f4")]
                self.attrs    = {"time units": "ns"}
                for raw_key, new_key, units in self.fields[1:]:
                    if raw_key in raw_keys:
                        self.raw_keys  += [raw_key]
                        self.new_keys  += [new_key]
                        self.dtype     += [(new_key, "f4")]
                        if units is not None:
                            self.attrs[new_key + " units"] = units
Exemple #16
0
    def next(self):
        """
        Prepares and yields next Block of analysis
        """
        from h5py import File as h5

        # No new trajectory to analyze
        if self.start_index == self.final_index:
            raise StopIteration()

        if self.i == self.n_molecule_1:
            raise StopIteration()
        else:
            # Determine slice indexes
            block_slice = (slice(self.start_index, self.final_index, 1),
                           slice(self.i), slice(self.j))

            # Load primary data from these indexes
            with h5(self.coord_path) as coord_h5:
                block_coord = np.array(coord_h5
                  [self.coord_address]
                  [self.start_index:self.final_index,self.i,self.j])
            block = Association_Block(
                     coord          = block_coord,
                     bound_cutoff   = self.bound,
                     unbound_cutoff = self.unbound,
                     conc_single    = self.conc_single,
                     slc            = block_slice)

            # Iterate
            self.j     += 1
            if self.j  == self.n_molecule_2:
                self.i += 1
                self.j  = 0

            # Return new block
            return block
Exemple #17
0
    def next(self):
        """
        Prepares and returns next Block of analysis
        """
        from h5py import File as h5

        if (self.incoming_slice is None
        or  self.current_start >= self.final_stop):
            raise StopIteration()
        else:
            # Determine slice indexes
            block_slice = slice(self.current_start, self.current_stop, 1)

            # Load primary data from these indexes
            #   NOTE: It is necessary to round to the scaleoffset in
            #   order to ensure that the same results are obtained for
            #   fresh and extended datasets
            with h5(self.coord_path) as coord_h5:
                scaleoffset = coord_h5[self.coord_address].scaleoffset
                block_coord = np.array(coord_h5[self.coord_address]
                                [block_slice])
                if scaleoffset is not None:
                    scaleoffset = int(str(scaleoffset)[0])
                    block_coord = np.round(block_coord, scaleoffset)

            # Iterate
            self.current_start += self.frames_per_block
            self.current_stop   = min(self.current_start+self.frames_per_block,
                                  self.final_stop)

            # Return new block
            return self.block_class(
              coord   = block_coord,
              outputs = self.outputs,
              slc     = block_slice,
              **self.block_kwargs)
Exemple #18
0
                    print " --> date: ", year, month, "; t: %4.4f" % rtime

                lat, lon = gg.latlons()
                cc  =  (lat<(Mlg_lat+dlat/2.)) & (lat>(Mlg_lat-dlat/2.))
                cc  &= (lon<(Mlg_lon+dlon/2.)) & (lon>(Mlg_lon-dlon/2.))
                val = gg['values']
                name = 'level_%04d' % level
                h[name][dname]   += [ val[cc].mean() ]
                t[name][dname]   += [ rtime ]

            gg = g.readline() # read next line

    # generate file 
    fname_out = '{odir}/test_{year:04d}.h5' .format(odir=dir_out, year=yyyy)
    print " -----> guardando: " + fname_out
    f5 = h5(fname_out, 'w')
    for level in LEVELS:
        lname = 'level_%04d' % level
        for dname in h[lname].keys():
            h[lname][dname] = np.array(h[lname][dname])
            t[lname][dname] = np.array(t[lname][dname])
            path = '%s/%s' % (lname, dname)
            f5['%s/t'%path] = t[lname][dname]
            f5['%s/h'%path] = h[lname][dname]

    f5.close()

"""
fig = figure(1, figsize=(6,4))
ax  = fig.add_subplot(111)
Exemple #19
0
    def __call__(self, **kwargs):
        """
        Runs analysis
        """
        from h5py import File as h5

        if self.incoming_slice is None:
            return

        # Load input data
        with h5(self.assign_path) as assign_h5:
            assignments = np.array(assign_h5[self.assign_address
                            + "assignment"])
            attrs       = dict(assign_h5[self.assign_address
                            + "assignment"].attrs)
        n_frames    = assignments.shape[0]
        n_states    = np.max(assignments) + 1
        state_names = ["unassigned"] + [name for name, _ in eval(
                        attrs["states"].replace("inf", "np.inf"))]

        # Calculate state probabilities
        Pstate             = np.zeros(n_states, np.float64)
        Pstate_se          = np.zeros(n_states, np.float64)
        assignments_for_FP = np.zeros((n_frames, n_states), np.float64)
        for i in range(n_states):
            Pstate[i] = float(assignments[assignments == i].size) / float(
                          assignments.size)
            if len(assignments.shape) > 1:
                assignments_for_FP[:,i] = (np.sum(assignments == i, axis = 1) 
                                            / assignments.shape[1])
            else:
                assignments_for_FP[:,i] = (assignments == i)

        # If all frames are assigned, remove state 0 (unassigned)
        if Pstate[0] == 0.0:
            n_states          -= 1
            Pstate             = Pstate[1:]
            Pstate_se          = Pstate_se[1:]
            assignments_for_FP = assignments_for_FP[:,1:]
            state_names        = state_names[1:]

        # Calculate standard error
        fp_block_averager = FP_Block_Averager(
          dataset    = assignments_for_FP,
          fieldnames = state_names)
        fp_block_averager()
        Pstate_se[:] = fp_block_averager.exp_fit_parameters[0]

        # Organize data
        dtype = [field for state in
                  [[("P {0}".format(name),"f4"),("P {0} se".format(name),"f4")]
                  for name in state_names] for field in state]
        stateprobs = np.zeros(1, dtype)
        for i in range(n_states):
            stateprobs[0]["P {0}".format(state_names[i])]    = Pstate[i]
            stateprobs[0]["P {0} se".format(state_names[i])] = Pstate_se[i]
        stateprobs_at = dict(
          states = attrs["states"],
          slice  = str(self.final_slice))
        self.datasets = {self.outputs[0]: dict(data = stateprobs,
                          attrs = stateprobs_at)}
Exemple #20
0
    def __call__(self, **kwargs):
        """
        Runs analysis
        """
        from h5py import File as h5

        if self.incoming_slice is None:
            return

        # Load input data
        with h5(self.pdist_path) as pdist_h5:
            pdist     = np.array(pdist_h5[self.pdist_address + "pdist"])
            pdist_at  = dict(pdist_h5[self.pdist_address + "pdist"].attrs)
            blocks    = np.array(pdist_h5[self.pdist_address + "blocks"])
            blocks_at = dict(pdist_h5[self.pdist_address + "blocks"].attrs)
        n_states    = len(self.states)
        state_names = self.states.keys()

        # Calculate state probabilities
        Pstate       = np.zeros(n_states, np.float64)
        Pstate_se    = np.zeros(n_states)
        state_slices = []
        for i, state_name in enumerate(self.states):
            dimensions = self.states[state_name]
            for d, dimension in enumerate(dimensions):
                inner, outer = dimensions[dimension]

                if "lower bound" in pdist.dtype.names:
                    min_index = np.abs(
                      pdist["lower bound"] - inner).argmin()
                    if np.isinf(outer):
                        max_index = pdist["upper bound"].size
                    else:
                        max_index = np.abs(
                          pdist["upper bound"] - outer).argmin() + 1
                    state_slice   = slice(min_index, max_index, 1)
                    state_slices += [state_slice]
                    Pstate[i] = float(
                                  np.nansum(pdist["count"][state_slice])) / \
                                float(
                                  np.nansum(pdist["count"]))
                else:
                    min_index = np.abs(
                      pdist["center"] - inner).argmin()
                    if np.isinf(outer):
                        max_index = pdist["center"].size
                    else:
                        max_index = np.abs(
                          pdist["center"] - outer).argmin() + 1
                    state_slice   = slice(min_index, max_index, 1)
                    state_slices += [state_slice]
                    Pstate[i] = float(
                                  np.nansum(pdist["kde"][state_slice])) / \
                                float(
                                  np.nansum(pdist["kde"]))

        # Calculate standard error
        fp_block_averager = PDist_FP_Block_Averager(
          dataset      = blocks[:-1],
          full_length  = blocks["stop"][-2],
          state_slices = state_slices,
          n_fields     = n_states,
          fieldnames   = state_names,
          factor       = blocks_at["block_size"])
        fp_block_averager()
        Pstate_se[:] = fp_block_averager.exp_fit_parameters[0]

        # Organize data
        dtype = [field for state in
                  [[("P {0}".format(name),"f4"),("P {0} se".format(name),"f4")]
                  for name in state_names] for field in state]
        stateprobs = np.zeros(1, dtype)
        for i in range(n_states):
            stateprobs[0]["P {0}".format(state_names[i])]    = Pstate[i]
            stateprobs[0]["P {0} se".format(state_names[i])] = Pstate_se[i]
        stateprobs_at = dict(
          states = str(self.states),
          slice  = str(self.final_slice))
        self.datasets = {self.outputs[0]: dict(data = stateprobs,
                          attrs = stateprobs_at)}
Exemple #21
0
                lat, lon = gg.latlons()
                cc = (lat < (Mlg_lat + dlat / 2.)) & (lat >
                                                      (Mlg_lat - dlat / 2.))
                cc &= (lon < (Mlg_lon + dlon / 2.)) & (lon >
                                                       (Mlg_lon - dlon / 2.))
                val = gg['values']
                name = 'level_%04d' % level
                h[name][dname] += [val[cc].mean()]
                t[name][dname] += [rtime]

            gg = g.readline()  # read next line

    # generate file
    fname_out = '{odir}/test_{year:04d}.h5'.format(odir=dir_out, year=yyyy)
    print " -----> guardando: " + fname_out
    f5 = h5(fname_out, 'w')
    for level in LEVELS:
        lname = 'level_%04d' % level
        for dname in h[lname].keys():
            h[lname][dname] = np.array(h[lname][dname])
            t[lname][dname] = np.array(t[lname][dname])
            path = '%s/%s' % (lname, dname)
            f5['%s/t' % path] = t[lname][dname]
            f5['%s/h' % path] = h[lname][dname]

    f5.close()
"""
fig = figure(1, figsize=(6,4))
ax  = fig.add_subplot(111)

ax.contourf(lon, lat, val)
Exemple #22
0
    help='HDF5 input',
)
pa = parser.parse_args()

# ------- Cargo la informacion del problema ------------- #
fname_graph = pa.fname_inp_txt  #'../data/yeast_LIT.txt'
fname_ess = '../data/Essential_ORFs_paperHe.txt'

graph = igraph.Graph.Read_Ncol(fname_graph, directed=False)
graph.simplify(multiple=True, loops=False)  # remover enlaces repetidos

n_nodes = len(graph.vs)  # nro total de nodos del grafo
n_edges = len(graph.es)  # nro total de enlaces

#--- leamos los fiteos de la distribucion de IBEPs
with h5(pa.fname_inp_h5, 'r') as f:
    #fit_A  = f['fit/A'].value
    fit_mu = f['fit/mu'].value  # valor medio
    fit_sigma = f['fit/sigma'].value  # sigma de la gaussiana

#--- parametros de la red real
N_e = ff.count_essential_nodes(fname_graph, fname_ess)  # nodos esenciales
N_ie = ff.calc_ne(g=graph, fname_ess=fname_ess)  # interacc esenciales

nbad = 0
N_realiz = 10000  # nro de realizaciones
beta = ff.nans(N_realiz, dtype=np.float32)
overlap = ff.nans(N_realiz, dtype=np.float32)
for ir in range(N_realiz):
    N_trials, N_overlap, n_e, n_ie = ff.beta_sorting(graph, N_e, N_ie, fit_mu,
                                                     fit_sigma)
Exemple #23
0
#!/usr/bin/env ipython
# -*- coding: utf-8 -*-
import funcs as ff
from h5py import File as h5
import argparse

#--- retrieve args
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-inp', '--fname_inp',
type=str,
default='./LIT.h5',
help='input filename of the network/graph.',
)
pa = parser.parse_args()


fname_inp = pa.fname_inp #'./test.h5'
popt, pcov = ff.fit_ne_distribution(fname_inp, nbin=50)

f = h5(fname_inp,'r+')
f['fit/A'] = popt[0]
f['fit/mu'] = popt[1]
f['fit/sigma'] = popt[2]

#EOF
Exemple #24
0
rranges = ( 
    slice(tau.min, tau.max, tau.delta()),
    slice(q.min, q.max, q.delta()),
    slice(off.min, off.max, off.delta()),
    slice(bp.min, bp.max, bp.delta()),
    slice(bo.min, bo.max, bo.delta()),
)
#--- start && run the fitter
data = np.array([t, fc, crs, b])
fit  = ff.fit_forbush(data, [tau_, q_, off_, bp_, bo_])
#fit.make_fit_brute(rranges)
#print fit.par

fit.par = {}
#--- output en hdf5
fo = h5(fname_out, 'r')
for pname in fo.keys():
    if pname=='grids':
        continue

    fit.par[pname] = fo[pname].value
    #fo[pname] = fit.par[pname]
print fit.par

#--- guardamos la grilla de exploracion
#fo['grids/tau'] = [tau.min, tau.max, tau.delta(), tau.n]
#fo['grids/q']   = [q.min, q.max, q.delta(), q.n]
#fo['grids/off'] = [off.min, off.max, off.delta(), off.n]
#fo['grids/bp']  = [bp.min, bp.max, bp.delta(), bp.n]
#fo['grids/bo']  = [bo.min, bo.max, bo.delta(), bo.n]
#------------------
Exemple #25
0
    def receive_block(self, **kwargs):
        """
        Stores a block of data in an h5 file
        """
        from h5py import File as h5

        try:
            out_h5s = {out_path:h5(out_path) for out_path in
                        set([output[0] for output in self.outputs])}

            while(True):

                block = yield

                if not hasattr(block, "datasets"):
                    continue

                for output, dataset in block.datasets.items():
                    if len(output) == 2:
                        out_path, out_address = output
                    elif len(output) == 3:
                        out_path, out_address, out_shape = output
                    out_h5 = out_h5s[out_path]

                    if "slc" in dataset:
                        out_slc = dataset["slc"]
                        if out_address in out_h5:
                            if out_h5[out_address].shape != out_shape:
                                out_h5[out_address].resize(size = out_shape)
                            
                        else:
                            out_dtype  = dataset["data"].dtype
                            out_kwargs = dict(
                              chunks      = True,
                              compression = "gzip")
                            out_kwargs.update(
                              dataset.get("kwargs", {}))
                            out_kwargs["maxshape"] = (None,) + out_shape[1:]
                            out_h5.create_dataset(out_address,
                              data = np.empty(out_shape, out_dtype),
                              **out_kwargs)
                        out_h5[out_address][out_slc] = dataset["data"]
                        if isinstance(out_slc, slice):
                            print("Dataset stored at {0}[{1}][{2}:{3}]".format(
                              out_path, out_address, out_slc.start, out_slc.stop))
                        elif isinstance(out_slc, tuple):
                            message = "Dataset stored at {0}[{1}][".format(
                              out_path, out_address)
                            for out_slc_dim in out_slc:
                                if isinstance(out_slc_dim, slice):
                                    message += "{0}:{1},".format(
                                      out_slc_dim.start, out_slc_dim.stop)
                                elif isinstance(out_slc_dim, int):
                                    message += "{0},".format(out_slc_dim)
                                else:
                                    raise Exception(
                                      "output slice not understood")
                            message = message[:-1] + "]"
                            print(message)
                        else:
                            raise Exception("output slice not understood")
                    else:
                        if out_address in out_h5:
                            del out_h5[out_address]
                        out_h5[out_address] = dataset["data"]
                        print("Dataset stored at {0}[{1}]".format(
                          out_path, out_address))

                    if "attrs" in dataset:
                        for key, value in dataset["attrs"].items():
                            out_h5[out_address].attrs[key] = value
        except GeneratorExit:
            pass
        finally:
            for out_h5 in out_h5s.values():
                out_h5.flush()
                out_h5.close()
Exemple #26
0
ne_ = []
for i in range(n_rewire):
    ne = ff.calc_ne(graph)
    print(" i, n_essential: ", i, ne)
    #he += np.histogram(ne, bins=nbin, range=[1500.,1900.], normed=False)[0]
    ne_ += [ne]  # save all values
    graph.rewire()
    #graph.rewire(int(n_nodes/2)) # recablear tantas veces como la mitad
    # del nro total de nodos

try:
    # guarda toda la secuencia de nros de interacciones
    # esenciales (IBEPs), para luego hacer histograma en la
    # cantidad de bines q mejor se vea.
    from h5py import File as h5
    fo = h5(pa.fname_out, 'w')
    fo['ne'] = np.array(ne_, dtype=np.int32)
    fo['n_rewire'] = n_rewire
    fo.close()
except ImportError:
    #--- guarda un histograma en ascii
    hc, hx_ = np.histogram(ne_, bins=nbin)
    hx = hx_[1:] - hx_[:-1]
    do = np.array([hx, hc]).T
    np.savetxt(pa.fname_out[:-3] + '.txt', do, fmt='%12.2f')

#--- make fig
fig = figure(1, figsize=(6, 4))
ax = fig.add_subplot(111)

#ax.plot(he, label='$P_E$')