Пример #1
0
def check_bbox_subset(response, req_lat_min, req_lat_max, req_lon_min, req_lon_max):
  """Asserts if the spatial extents of the data in the response are within the requested bbox of a spatial subset

  #####  CHECK_BBOX_SUBSET currently is not in use; placeholder for the next round of regression test work

  Arguments:
      response {response.Response} -- the response to display
      req_lat_min -- The minimimum latitude from the request bbox for a spatial subset
      req_lat_max -- The maximum latitude from the request bbox for a spatial subset
      req_lon_min -- The minimimum longitude from the request bbox for a spatial subset
      req_lon_max -- The maximum longitude from the request bbox for a spatial subset
  """

  data = H5File(BytesIO(response.content), 'r')

  attr_data = data['lat'][:]

  print('Orig min and max: ', attr_data.min(), attr_data.max() )
  lat_min = (attr_data.min() + 180) % 360 - 180
  lat_max = (attr_data.max() + 180) % 360 - 180
  print(lat_min)
  print(lat_max)

  assert lat_max <= req_lat_max
  assert lat_min >= req_lat_min

  attr_data = data['lon'][:]
  lon_min = (attr_data.min() + 180) % 360 - 180
  lon_max = (attr_data.max() + 180) % 360 - 180
  print(lon_min)
  print(lon_max)

  assert lon_max <= req_lon_max
  assert lon_min >= req_lon_min
Пример #2
0
    def _get_corrector(self):
        if self._dc is None:
            with H5File(self._directh5, 'r') as f:
                self._dc = DurationCorrector.from_kinetics_file(
                    f, self.istate, self.fstate, self.dtau, self.n_iters)

        return self._dc
Пример #3
0
def save_parameters_to_file(bunch_params, folder_path, file_name):
    if not os.path.exists(folder_path):
        os.makedirs(folder_path)
    file_path = os.path.join(folder_path, file_name + '.h5')
    with H5File(file_path, 'w') as h5_file:
        for param_name, param_data in bunch_params.items():
            h5_file.create_dataset(param_name, data=param_data)
Пример #4
0
    def calc_rate(self, i_iter=None, red=False, **kwargs):

        if i_iter is None:
            i_iter = self.n_iters

        dc = self._get_corrector() if red else None
        found = False
        with H5File(self._directh5, 'r') as f:
            for i in range(f['rate_evolution'].shape[0]):
                rate_evol = f['rate_evolution'][i, self.istate, self.fstate]
                start = rate_evol['iter_start']
                stop = rate_evol['iter_stop']

                if i_iter >= start and i_iter < stop:
                    rate = rate_evol['expected']
                    found = True
                    break

            if not found:
                self.log.error(
                    "Can't find rate evolution data for iteration %d!" %
                    i_iter)

        if dc:
            iters = np.arange(i_iter)
            correction = dc.correction(iters)
            rate *= correction

        return rate
Пример #5
0
def read_hipace_beam(file_path, plasma_dens):
    """Reads particle data from an HiPACE paricle file and returns it in the
    unis used by APtools.

    Parameters
    ----------
    file_path : str
        Path to the file with particle data

    plasma_dens : float
        Plasma density in units od cm^{-3} used to convert the beam data to
        non-normalized units

    Returns
    -------
    A tuple with 7 arrays containing the 6D phase space and charge of the
    particles.
    """
    s_d = plasma_skin_depth(plasma_dens)
    file_content = H5File(file_path)
    # sim parameters
    n_cells = file_content.attrs['NX']
    sim_size = (file_content.attrs['XMAX'] - file_content.attrs['XMIN'])
    cell_vol = np.prod(sim_size / n_cells)
    q_norm = cell_vol * plasma_dens * 1e6 * s_d**3 * ct.e
    # get data
    q = np.array(file_content.get('q')) * q_norm
    x = np.array(file_content.get('x2')) * s_d
    y = np.array(file_content.get('x3')) * s_d
    z = np.array(file_content.get('x1')) * s_d
    px = np.array(file_content.get('p2'))
    py = np.array(file_content.get('p3'))
    pz = np.array(file_content.get('p1'))
    return x, y, z, px, py, pz, q
Пример #6
0
def read_osiris_beam(file_path, plasma_dens):
    """Reads particle data from an OSIRIS paricle file and returns it in the
    unis used by APtools.

    Parameters
    ----------
    file_path : str
        Path to the file with particle data

    plasma_dens : float
        Plasma density in units od cm^{-3} used to convert the beam data to
        non-normalized units

    Returns
    -------
    A tuple with 7 arrays containing the 6D phase space and charge of the
    particles.
    """
    s_d = plasma_skin_depth(plasma_dens)
    file_content = H5File(file_path)
    # get data
    q = np.array(file_content.get('q')) * ct.e
    x = np.array(file_content.get('x2')) * s_d
    y = np.array(file_content.get('x3')) * s_d
    z = np.array(file_content.get('x1')) * s_d
    px = np.array(file_content.get('p2'))
    py = np.array(file_content.get('p3'))
    pz = np.array(file_content.get('p1'))
    return x, y, z, px, py, pz, q
Пример #7
0
 def save_cmap(self, name, fld_val, r_val, g_val, b_val, folder_path):
     if (fld_val.min() >= 0 and fld_val.max() <= 255
             and r_val.min() >= 0 and r_val.max() <= 255
             and g_val.min() >= 0 and g_val.max() <= 255
             and b_val.min() >= 0 and b_val.max() <= 255
             and len(fld_val) == len(r_val) == len(g_val) == len(b_val)
             and len(fld_val) <= self.max_len):
         file_path = self.create_file_path(name, folder_path)
         # Create H5 file
         file = H5File(file_path, "w")
         r_dataset = file.create_dataset("r", data=r_val)
         g_dataset = file.create_dataset("g", data=g_val)
         b_dataset = file.create_dataset("b", data=b_val)
         fld_dataset = file.create_dataset("field", data=fld_val)
         file.attrs["cmap_name"] = name
         file.close()
         # Add to available colormaps
         cmap = Colormap(file_path)
         if (os.path.normpath(folder_path) == self.cmaps_folder_path):
             self.default_cmaps.append(cmap)
         else:
             self.other_cmaps.append(cmap)
         return True
     else:
         return False
Пример #8
0
    def load(cls, arg: Union[str, bytes, PurePath, IO[bytes]]) -> 'State':
        """Load a pickled state from either a path, a file, or blob of bytes."""
        from nlisim.config import SimulationConfig  # prevent circular imports

        if isinstance(arg, bytes):
            arg = BytesIO(arg)

        with H5File(arg, 'r') as hf:
            time = hf.attrs['time']
            grid = RectangularGrid.load(hf)

            with StringIO(hf.attrs['config']) as cf:
                config = SimulationConfig(cf)

            state = cls(time=time, grid=grid, config=config)

            for module in config.modules:
                group = hf.get(module.name)
                if group is None:
                    raise ValueError(
                        f'File contains no group for {module.name}')
                try:
                    module_state = module.StateClass.load_state(state, group)
                except Exception:
                    print(f'Error loading state for {module.name}')
                    raise

                state._extra[module.name] = module_state

        return state
Пример #9
0
 def _OpenFile(self, timeStep):
     # The line below sets the attribute `_current_i` of openpmd_ts
     self.openpmd_ts._find_output(None, timeStep)
     # This finds the full path to the corresponding file
     fileName = self.openpmd_ts.h5_files[self.openpmd_ts._current_i]
     file_content = H5File(fileName, 'r')
     return file_content
def main():
    """The main function."""
    args = parse_args()

    in_dir = args.input
    out_path = args.output
    num_seeds = len(list(in_dir.glob('*')))

    afl_showmap = which('afl-showmap')
    if not afl_showmap:
        raise Exception('Cannot find `afl-showmap`. Check PATH')

    with H5File(out_path, 'w') as h5f:
        for seed in tqdm(in_dir.iterdir(),
                         desc='Generating `afl-showmap` coverage',
                         total=num_seeds, unit='seeds'):
            cov, exec_time = run_showmap(afl_showmap, seed, **vars(args))
            if cov.size == 0:
                continue

            compression = 'gzip' if cov.size > 1 else None
            dset = h5f.create_dataset(str(seed.relative_to(in_dir)),
                                      data=cov, compression=compression)
            dset.attrs['time'] = exec_time
            dset.attrs['size'] = seed.stat().st_size
Пример #11
0
    def __init__(self, directh5, istate, fstate, assignh5=None, **kwargs):
        n_iters = kwargs.pop("n_iters", None)
        ntpr = kwargs.pop("report_interval", 20)
        nstiter = kwargs.pop("n_steps_iter", 1000)

        if len(kwargs) > 0:
            for k in kwargs:
                print(k)
            raise ValueError("unparsed kwargs")

        dtau = float(ntpr) / nstiter

        with H5File(directh5, 'r') as f:
            state_labels = {}
            for i, raw_label in enumerate(f['state_labels']):
                label = raw_label.decode() if isinstance(raw_label,
                                                         bytes) else raw_label
                state_labels[label] = i
            if istate not in state_labels:
                raise ValueError(
                    f"istate not found: {istate}, available options are {list(state_labels.keys())}"
                )
            if fstate not in state_labels:
                raise ValueError(
                    f"istate not found: {fstate}, available options are {list(state_labels.keys())}"
                )
            istate = state_labels[istate]
            fstate = state_labels[fstate]
            cond_fluxes = f['conditional_fluxes'][slice(n_iters), istate,
                                                  fstate]

        if assignh5 is not None:
            with H5File(assignh5, 'r') as f:
                pops = f['labeled_populations'][slice(n_iters)]
                pops = pops.sum(axis=2)
        else:
            pops = None

        self._dc = None
        self._pops = pops
        self._cond_fluxes = cond_fluxes
        self._dtau = dtau
        self._directh5 = directh5
        self._assignh5 = assignh5
        self._istate = istate
        self._fstate = fstate
Пример #12
0
    def from_filename(cls, filename):
        """Read the struct from a file given its path."""
        from h5py import File as H5File

        if not str(filename).endswith(".h5"):
            raise RuntimeError("Extension is not .h5")

        with H5File(str(filename)) as f:
            return cls.from_h5obj(f)
Пример #13
0
 def _OpenFile(self, timeStep):
     fileName = self.dataName + "-"
     if self.speciesName != "":
         fileName += self.speciesName + "-"
     fileName += str(timeStep).zfill(6)
     ending = ".h5"
     file_path = self.location + "/" + fileName + ending
     file_content = H5File(file_path, 'r')
     return file_content
Пример #14
0
 def WriteDataToFile(self, location, fileName):
     h5file = H5File(location + "/" + fileName + ".h5", "w")
     for key in self._wholeSimulationQuantities:
         dataSet = h5file.create_dataset(
             key,
             data=self._wholeSimulationQuantities[key].GetAllDataInISUnits(
             ))
         dataSet.attrs["Units"] = self._wholeSimulationQuantities[
             key].GetDataISUnits()
     h5file.close()
Пример #15
0
    def _OpenFile(self, timeStep):
        if self.speciesName != "":
            fileName = 'density_' + self.speciesName + '_' + self.dataName
        else:
            fileName = 'field_' + self.dataName

        fileName += '_' + str(timeStep).zfill(6)
        ending = ".h5"
        file_path = self.location + "/" + fileName + ending
        file_content = H5File(file_path, 'r')
        return file_content
Пример #16
0
    def __startup(self):
        with H5File(self.h5_file_path, 'r') as f:
            for main_group_key in f.keys():
                main_group = f[main_group_key]
                for group_key in main_group.keys():
                    group = main_group[group_key]
                    self.__parse_group_info(group)
                    if self.n_batches != 0 and (self.n_fft/self.batch_size) >= self.n_batches:
                        break

        self.target_group = self.groups[0]
Пример #17
0
 def validate(self, obj, value):
     """Overwritten from parent to ensure that the string is path to a
     valid keras model.
     """
     super(KerasModelWeights, self).validate(obj, value)
     if value:
         with H5File(value, 'r') as f_in:
             if 'model_config' not in f_in.attrs:
                 raise TraitError(
                     '{} does not contain a valid keras model.'.format(
                         value))
     return value
Пример #18
0
def save_bunch_to_file(bunch, folder_path, file_name):
    if not os.path.exists(folder_path):
        os.makedirs(folder_path)
    file_path = os.path.join(folder_path, file_name + '.h5')
    with H5File(file_path, 'w') as h5_file:
        h5_file.create_dataset('x', data=bunch.x)
        h5_file.create_dataset('y', data=bunch.y)
        h5_file.create_dataset('xi', data=bunch.xi)
        h5_file.create_dataset('px', data=bunch.px)
        h5_file.create_dataset('py', data=bunch.py)
        h5_file.create_dataset('pz', data=bunch.pz)
        h5_file.create_dataset('q', data=bunch.q)
        h5_file.attrs['prop_dist'] = bunch.prop_distance
Пример #19
0
def calc_avg_rate(directh5_path, istate, fstate, **kwargs):
    """
    Return the raw or RED-corrected rate constant with the confidence interval.

    ---------
    Arguments
    ---------
    dt: timestep (ps)
    nstiter: duration of each iteration (number of steps)
    ntpr: report inteval (number of steps)

    """

    n_iters = kwargs.pop("n_iters", None)

    ntpr = kwargs.pop("report_interval", 20)
    nstiter = kwargs.pop("n_steps_iter", 1000)
    callback = kwargs.pop("callback", None)

    red = kwargs.pop("red", False)

    if len(kwargs) > 0:
        raise ValueError("unparsed kwargs")

    dtau = float(ntpr) / nstiter
    dc = None

    with H5File(directh5_path, 'r') as directh5:
        if n_iters is None:
            n_iters = directh5['rate_evolution'].shape[0]

        rate_evol = directh5['rate_evolution'][n_iters - 1, istate, fstate]
        rate = rate_evol['expected']

        if red:
            dc = DurationCorrector.from_kinetics_file(directh5, istate, fstate,
                                                      dtau, n_iters)

    if callback is not None:
        kw = {"correction": dc}
        callback(**kw)

    iters = np.arange(n_iters)

    correction = dc.correction(iters) if dc else 1.0

    rate *= correction

    return rate
Пример #20
0
def main():
    """The main function."""
    args = parse_args()

    in_hdf5 = args.input
    out_pdf = args.output

    print('Reading %s...' % in_hdf5)
    cov_data = {}
    with H5File(in_hdf5, 'r') as h5_file:
        for cov_file, cov in h5_file.items():
            df_cov = np.zeros(MAP_SIZE, dtype=np.uint8)
            if len(cov.shape) == 0:
                edge, count = cov[()]
                df_cov[edge] = count
            else:
                for edge, count in cov:
                    df_cov[edge] = count
            cov_data[cov_file] = list(df_cov)

    df = pd.DataFrame.from_dict(cov_data, orient='index')
    x = StandardScaler().fit_transform(df)
    if len(df) <= 1:
        sys.stderr.write('Not enough seeds to perform PCA')
        sys.exit(1)

    # Compute PCA
    # TODO determine the number of components
    print('Computing PCA...')
    pca = PCA(n_components=2)
    pca_scores = pd.DataFrame(pca.fit_transform(x),
                              columns=['PCA 1', 'PCA 2']).set_index(df.index)

    # Configure plot
    rc('pdf', fonttype=42)
    rc('ps', fonttype=42)
    plt.style.use('ggplot')

    # Plot PCA
    print('Plotting...')
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.scatter(pca_scores['PCA 1'], pca_scores['PCA 2'], marker='x', alpha=0.5)
    ax.set_xlabel('Component 1')
    ax.set_ylabel('Component 2')

    fig.savefig(out_pdf, bbox_inches='tight')
    print('%s coverage plotted at %s' % (in_hdf5, out_pdf))
Пример #21
0
    def save(self, arg: Union[str, PurePath, IO[bytes]]) -> None:
        """Save the current state to the file system."""
        with H5File(arg, 'w') as hf:
            hf.attrs['time'] = self.time
            hf.attrs['config'] = str(
                self.config)  # TODO: save this in a different format
            self.grid.save(hf)

            for module in self.config.modules:
                module_state = cast('ModuleState', getattr(self, module.name))
                group = hf.create_group(module.name)
                try:
                    module_state.save_state(group)
                except Exception:
                    print(f'Error serializing {module.name}')
                    raise
Пример #22
0
def read_openpmd_beam(file_path, species_name):
    """Reads particle data from a h5 file following the openPMD standard and
    returns it in the unis used by APtools.

    Parameters
    ----------
    file_path : str
        Path to the file with particle data

    species_name : str
        Name of the particle species

    Returns
    -------
    A tuple with 7 arrays containing the 6D phase space and charge of the
    particles.
    """
    file_content = H5File(file_path, mode='r')
    # get base path in file
    iteration = list(file_content['/data'].keys())[0]
    base_path = '/data/{}'.format(iteration)
    # get path under which particle data is stored
    particles_path = file_content.attrs['particlesPath'].decode()
    # get species
    beam_species = file_content[join_infile_path(base_path, particles_path,
                                                 species_name)]
    # get data
    mass = beam_species['mass']
    charge = beam_species['charge']
    position = beam_species['position']
    position_off = beam_species['positionOffset']
    momentum = beam_species['momentum']
    m = mass.attrs['value'] * mass.attrs['unitSI']
    q = charge.attrs['value'] * charge.attrs['unitSI']
    x = (position['x'][:] * position['x'].attrs['unitSI'] +
         position_off['x'].attrs['value'] * position_off['x'].attrs['unitSI'])
    y = (position['y'][:] * position['y'].attrs['unitSI'] +
         position_off['y'].attrs['value'] * position_off['y'].attrs['unitSI'])
    z = (position['z'][:] * position['z'].attrs['unitSI'] +
         position_off['z'].attrs['value'] * position_off['z'].attrs['unitSI'])
    px = momentum['x'][:] * momentum['x'].attrs['unitSI'] / (m * ct.c)
    py = momentum['y'][:] * momentum['y'].attrs['unitSI'] / (m * ct.c)
    pz = momentum['z'][:] * momentum['z'].attrs['unitSI'] / (m * ct.c)
    w = beam_species['weighting'][:]
    q *= w
    return x, y, z, px, py, pz, q
Пример #23
0
    def get(self, key):

        if self.db_type == "lmdb":
            env = self.env
            with env.begin(write=False) as txn:
                byteflow = txn.get(key.encode())
            f_input = BytesIO(byteflow)
        elif self.db_type == "pth":
            f_input = self.feat_file[key]
        elif self.db_type == "h5":
            f_input = H5File(self.db_path, "r")[key]
        else:
            f_input = join(self.db_path, key + self.ext)

        # load image
        feat = self.loader(f_input)

        return feat
Пример #24
0
def save_output_hdf5(blobs,
                     proto_path,
                     model_path,
                     path_out,
                     path_names,
                     h5mode='a',
                     name_column=0,
                     gpu=0,
                     phase=None):
    import csv
    import caffe
    from h5py import File as H5File
    if phase is None:
        phase = caffe.TEST
    try:
        os.makedirs(os.path.dirname(path_out))
    except:
        pass
    names = map(lambda x: x[name_column], csv.reader(open(path_names, 'r')))
    with H5File(path_out, h5mode) as h5d:
        if gpu < 0:
            caffe.set_mode_cpu()
        else:
            caffe.set_mode_gpu()
            caffe.set_device(gpu)
        net = caffe.Net(proto_path, model_path, phase)
        i = 0
        while True:
            ret = net.forward(blobs=blobs)
            for s in xrange(ret[blobs[0]].shape[0]):
                if len(names) == i:
                    return
                try:
                    h5d.create_group(names[i])
                except ValueError:
                    pass
                for b in blobs:
                    try:
                        h5d[names[i]][b] = ret[b][s].copy()
                    except (ValueError, RuntimeError):
                        del h5d[names[i]][b]
                        h5d[names[i]][b] = ret[b][s].copy()
                i += 1
Пример #25
0
    def from_kinetics_file(directh5, istate, fstate, dtau, n_iters=None):
        iter_slice = slice(n_iters)

        if isinstance(directh5, H5File):
            dataset = directh5['durations'][iter_slice]
        else:
            with H5File(directh5, 'r') as directh5:
                dataset = directh5['durations'][iter_slice]

        torf = np.logical_and(dataset['istate'] == istate,
                              dataset['fstate'] == fstate)
        torf = np.logical_and(torf, dataset['weight'] > 0)

        durations = dataset['duration']
        weights = dataset['weight']

        weights[~torf] = 0.0  # mask off irrelevant flux

        return DurationCorrector(durations, weights, dtau)
Пример #26
0
    def __data_generation(self):
        """Generates data containing in batch_size"""
        X = np.empty((self.batch_size, self.n_time_steps, self.fft_size), dtype=np.float64)
        Y = np.empty((self.batch_size, self.fft_size), dtype=np.float64)

        # Generate data
        with H5File(self.h5_file_path, 'r') as f:
            for idx in range(self.batch_size):
                # Store sample
                if not self.batch:
                    for idx_time in range(self.n_time_steps - 1):
                        self.batch.append(f[self.target_group.path + '/DIRTY/DB'][self.fft_idx, :, :self.fft_size])
                        self.fft_idx += 1
                self.batch.append(f[self.target_group.path + '/DIRTY/DB'][self.fft_idx, :, :self.fft_size])
                X[idx, :, :] = self.batch
                Y[idx, :] = f[self.target_group.path + '/CLEAN/DB'][self.fft_idx, 0, :self.fft_size]
                self.fft_idx += 1
                if self.fft_idx >= self.target_group.n_fft:
                    self.__increment_target_group()

        return X, Y
Пример #27
0
 def save_opacity(self, name, field_values, opacity_values,
                  folder_path):
     if (field_values.min() >= 0 and field_values.max() <= 255
             and opacity_values.min() >= 0 and opacity_values.max() <= 1
             and len(field_values) == len(opacity_values)
             and len(opacity_values) <= self.max_len):
         file_path = self.create_file_path(name, folder_path)
         # Create H5 file
         file = H5File(file_path, "w")
         opacity_dataset = file.create_dataset("opacity",
                                               data=opacity_values)
         field_dataset = file.create_dataset("field", data=field_values)
         file.attrs["opacity_name"] = name
         file.close()
         # Add to available opacities
         opacity = Opacity(file_path)
         if (os.path.normpath(folder_path) == self.opacity_folder_path):
             self.default_opacities.append(opacity)
         else:
             self.other_opacities.append(opacity)
         return True
     else:
         return False
Пример #28
0
 def get_file(self):
     file = H5File(self.file_path, "r")
     return file
Пример #29
0
    def LoadOsirisData(self):
        """Osiris Loader"""
        keyFolderNames = ["DENSITY", "FLD", "PHA", "RAW"]
        mainFolders = os.listdir(self._dataLocation)
        for folder in mainFolders:
            subDir = self._dataLocation + "/" + folder
            if folder == keyFolderNames[0]:
                speciesNames = os.listdir(subDir)
                for species in speciesNames:
                    if os.path.isdir(os.path.join(subDir, species)):
                        self.AddSpecies(Species(species))
                        speciesFields = os.listdir(subDir + "/" + species)
                        for field in speciesFields:
                            if os.path.isdir(
                                    os.path.join(subDir + "/" + species,
                                                 field)):
                                fieldLocation = subDir + "/" + species + "/" + field
                                fieldName = field
                                timeSteps = self.GetTimeStepsInOsirisLocation(
                                    fieldLocation)
                                if timeSteps.size != 0:
                                    self.AddFieldToSpecies(
                                        species,
                                        FolderField(
                                            "Osiris", fieldName,
                                            self.
                                            GiveStandardNameForOsirisQuantity(
                                                fieldName), fieldLocation,
                                            timeSteps, species))
            elif folder == keyFolderNames[1]:
                domainFields = os.listdir(subDir)
                for field in domainFields:
                    if os.path.isdir(os.path.join(subDir, field)):
                        fieldLocation = subDir + "/" + field
                        fieldName = field
                        timeSteps = self.GetTimeStepsInOsirisLocation(
                            fieldLocation)
                        if timeSteps.size != 0:
                            self.AddDomainField(
                                FolderField(
                                    "Osiris", fieldName,
                                    self.GiveStandardNameForOsirisQuantity(
                                        fieldName), fieldLocation, timeSteps))

            elif folder == keyFolderNames[3]:
                subDir = self._dataLocation + "/" + folder
                speciesNames = os.listdir(subDir)
                for species in speciesNames:
                    if os.path.isdir(os.path.join(subDir, species)):
                        self.AddSpecies(Species(species))
                        dataSetLocation = subDir + "/" + species
                        timeSteps = self.GetTimeStepsInOsirisLocation(
                            dataSetLocation)
                        if timeSteps.size != 0:
                            file_path = dataSetLocation + "/" + "RAW-" + species + "-" + str(
                                timeSteps[0]).zfill(6) + ".h5"
                            file_content = H5File(file_path, 'r')
                            for dataSetName in list(file_content):
                                if dataSetName == "tag":
                                    self.AddRawDataTagsToSpecies(
                                        species,
                                        RawDataTags("Osiris", dataSetName,
                                                    dataSetLocation, timeSteps,
                                                    species, dataSetName))
                                else:
                                    self.AddRawDataToSpecies(
                                        species,
                                        FolderRawDataSet(
                                            "Osiris", dataSetName,
                                            self.
                                            GiveStandardNameForOsirisQuantity(
                                                dataSetName), dataSetLocation,
                                            timeSteps, species, dataSetName))
                            file_content.close()
Пример #30
0
## get arguments
def parse_command_line():
    parser = OptionParser(description="Merge banksim .h5 files together, maximizing match for each injection over bank fragments as necessary. We assume that all banksims were given the identical HL-INJECTIONS file and disjoint bank fragments.")
    parser.add_option("-o", "--output", help="Write output to hdf5 output")
    parser.add_option("-v", "--verbose", default=False, action="store_true", help="Tell me everything you know.")

    opts, args = parser.parse_args()

    return opts, args

opts, args = parse_command_line()

if not args: sys.exit("Nothing to do")

# initialize output file
outfile = H5File(opts.output, "w")

# based on the first file...
with H5File(args[0], "r") as infile:
    # populate sims completely
    out_sims = outfile.create_dataset("/sim_inspiral", data=infile["sim_inspiral"], compression='gzip', compression_opts=1)
    outfile.flush()

    # copy process and process params table
    # FIXME: only takes metadata from first file!
    outfile.create_dataset("/process", data=infile["/process"])
    outfile.create_dataset("/process_params", data=infile["/process_params"])

    # but we'll have to build up sngls and the match map as we go
    out_map = np.zeros(shape=infile["/match_map"].shape,
                       dtype=infile["/match_map"].dtype)