Пример #1
0
    def dump_data(self, directory, time_stamp=False):
        """Save the data in directory. The files are prefixed with
        config.timestamp in HH.MM.SS_ format if time_stamp is True."""
        path = directory
        tables = []
        if not os.access(path, os.W_OK):
            config.LOGGER.warning('data directory: ' + path +
                                  ' is not writable')
            return
        else:
            if not os.access(path, os.F_OK):
                os.mkdir(path)
        if time_stamp:
            prefix = config.timestamp.strftime('%H.%M.%S') + '_'
        else:
            prefix = ''
        for table_id in self.data.children():
            table = moose.Table(table_id)

            tables.append(table)
            file_name = prefix + table.name + '.plot'
            file_path = os.path.join(path, file_name)
            if table.stepMode == 3:
                ts = numpy.linspace(0, self.simtime, len(table))
                numpy.savetxt(file_path, zip(ts, numpy.asarray(table)))
            else:
                table.dumpFile(file_path)
            config.LOGGER.info('Dumped data in %s' % (file_path))
        return tables
Пример #2
0
def loop_h5_alx(SEQS, CDTS, TMPS, NUMS, h5file, ppty, tpostproc_group, alx_kwargs):
    arrayname_pattern = alx_kwargs["arraynamepattern"]

    for seq in SEQS:
        alx_dd = calc_alx_dd(seq)
        for cdt in CDTS:
            arrayname = arrayname_pattern.format(seq=seq, cdt=cdt)
            if tpostproc_group.__contains__(arrayname):
                print "{0} HAS ALREADY EXISTED".format(arrayname)
                pass
            else:
                denominator, xcoln, ycoln = alx_dd[ppty]  # xcol, ycol name, respectively
                # pf is used to identify the tables in ogd
                pfpattern = alx_kwargs["pfpattern"]

                # collect tables in ogd
                tables = []
                for num in NUMS:
                    pf = pfpattern.format(seq=seq, cdt=cdt, num=num)
                    if h5file.root.ogd.__contains__(pf):
                        tables.append(h5file.getNode(h5file.root.ogd, pf))
                    else:
                        print "{0} doesn't exist, you know this? Right!".format(pf)

                # need to compare the axes first and at the same time get the
                # minimum length of all the data first, all could be done by
                # get_minimum_length function
                min_len, xaxis_ref = get_minimum_length(tables, xcoln)

                # starting to collect vales along the y axis
                ave_ds = []  # ave distances
                for table in tables:
                    distance = table.read(field=ycoln)[:min_len]
                    ave_ds.append(distance)

                ave_ds = np.array(ave_ds) / denominator
                alx_ave = np.average(ave_ds, axis=0)  # summing along the 0th axis
                # alx_std = np.std(ave_ds, axis=0)
                from scipy import stats

                alx_std = stats.sem(ave_ds, axis=0)

                # if np.array(alx_index_ref) in previously code, comparing two arrays raise complexity
                # alx_index = np.array(alx_index_ref)

                # transpose to be in consistend with tables in ogd
                alx_result = np.array([xaxis_ref, alx_ave, alx_std]).transpose()
                h5file.createArray(
                    tpostproc_group,
                    arrayname,
                    alx_result,
                    title=(
                        "average along the x axis over all replicas" "column 0, 1, 2 are x axis, ave, std, respectively"
                    ),
                )
            print "{0} IS DONE".format(arrayname)
    h5file.close()
Пример #3
0
 def loadData(self, symbolList, start, end):
     tables = []
     for vtSymbol in symbolList:
         data = self.cache.get(vtSymbol, start, end)
         if len(data):
             data["vtSymbol"] = vtSymbol
             data["symbol"], data["exchange"] = vtSymbol.split(":", 1)
             tables.append(data)
     if len(tables):
         frame = pd.concat(tables,
                           ignore_index=True).sort_values("datetime")
         return frame
     else:
         return pd.DataFrame()
Пример #4
0
def find_tables(h5file, table_names, group_name=''):
    tables = []
    for table_name in table_names:
        tables.append(find_table(h5file, table_name, group_name))
    return tables