Пример #1
0
    def __attrs_post_init__(self):
        """ Initialise from MATLAB *.mat file. """
        self.debug('Grid initialising')
        self.debug('Received data handle: ', self.data_handle)

        self.debug('Initialising grid...')
        self.grid = self.data_handle['/data/grid']
        self.dim = self._initialise_field('dim').astype(int)
        self.dx = self._initialise_field('dx')
        self.x_min = self._initialise_field('min')
        self.x_max = self._initialise_field('max')

        ## Maximum index along each dimension
        self.index_min = numpy.zeros(self.dx.shape).flatten()
        ## TODO: Verify maximum index as len - 1
        self.index_max = numpy.divide(self.x_max - self.x_min, self.dx).astype(int) 
        self.N = self._initialise_field('N').astype(int)

        self.N_data = numpy.prod(self.N)

        ## TODO: Decide and time complexity that is bearable
        # for single desktop workstation
        if self.N_data > 20000:
            print('WARNING: Complexity is very high ({})'.format(self.N_data))

        ## Initialise using loadmat to parse MATLAB cell array
        self.boundary = hdf5storage.read(path='/data/grid/bdry/', filename=self.data_path)

        ## Initialise using loadmat to parse MATLAB cell array
        self.vs = hdf5storage.read(path='/data/grid/vs/', filename=self.data_path)

        if self.show_config:
            self.print_config()
Пример #2
0
def rdecog (fn):
  d = {}
  d['dat'] = hdf5storage.read(path='ecog/ftrip/trial',filename=fn)[0][0]
  d['nchan'] = d['dat'].shape[0]
  label = hdf5storage.read(path='ecog/ftrip/label',filename=fn)
  d['label'] = [l[0][0][0] for l in label]
  d['sampr'] = hdf5storage.read(path='ecog/ftrip/fsample',filename=fn)[0][0]
  d['tt'] = hdf5storage.read(path='ecog/ftrip/time',filename=fn)[0][0][0,:]
  return d
Пример #3
0
def test_multi_write():
    # Makes a random dict of random paths and variables (random number
    # of randomized paths with random numpy arrays as values).
    data = dict()
    for i in range(0, random.randint(min_dict_keys, \
            max_dict_keys)):
        name = random_name()
        data[name] = \
            random_numpy(random_numpy_shape( \
            dict_value_subarray_dimensions, \
            max_dict_value_subarray_axis_length), \
            dtype=random.choice(dtypes))

    # Write it and then read it back item by item.
    if os.path.exists(filename):
        os.remove(filename)
    try:
        hdf5storage.writes(mdict=data, filename=filename)
        out = dict()
        for p in data:
            out[p] = hdf5storage.read(path=p, filename=filename)
    except:
        raise
    finally:
        if os.path.exists(filename):
            os.remove(filename)

    # Compare data and out.
    assert_equal(out, data)
def test_plugin_marshaller_SubList():
    mc = hdf5storage.MarshallerCollection(load_plugins=True,
                                          lazy_loading=True)
    options = hdf5storage.Options(store_python_metadata=True,
                                  matlab_compatible=False,
                                  marshaller_collection=mc)
    ell = [1, 2, 'b1', b'3991', True, None]
    data = example_hdf5storage_marshaller_plugin.SubList(ell)
    f = None
    name = '/a'
    try:
        f = tempfile.mkstemp()
        os.close(f[0])
        filename = f[1]
        hdf5storage.write(data, path=name, filename=filename,
                          options=options)
        out = hdf5storage.read(path=name, filename=filename,
                               options=options)
    except:
        raise
    finally:
        if f is not None:
            os.remove(f[1])
    assert_equal_nose(ell, list(out))
    assert_equal_nose(type(out),
                      example_hdf5storage_marshaller_plugin.SubList)
def main(params):
    net = getattr(resnet, 'resnet152')()
    num_ftrs = net.fc.in_features
    #    net.fc = torch.nn.Linear(num_ftrs, 1)
    net.load_state_dict(torch.load('./resnet152.pth'), False)
    my_resnet = myResnet(net)
    my_resnet.cuda()
    my_resnet.eval()
    if not os.path.exists('resnet152'):
        subprocess.call('mkdir resnet152', shell=True)

    train_set = hdf5storage.read(path='/img_path', filename='./val_set.h5')
    print(len(train_set))
    seed(123)  # make reproducible
    for i in range(8000 + 780, len(train_set)):
        outputs = []

        for j in range(8):
            I = skimage.io.imread('./' + train_set[i][j].replace('\\', '/'),
                                  as_gray=1)
            if len(I.shape) == 2:
                I = I[:, :, np.newaxis]
                I = np.concatenate((I, I, I), axis=2)
            I = I.astype('float32') / 255.0
            I = torch.from_numpy(I.transpose([2, 0, 1])).cuda()
            I = preprocess(I)
            with torch.no_grad():
                tmp_fc, tmp_att = my_resnet(I, params['att_size'])
            outputs.append(tmp_fc.data.cpu().float().numpy())
        file_nm = os.path.join('./resnet152', 'v_video' + str(i + 1) + '.npy')
        np.save(file_nm, outputs)
        print(file_nm)
Пример #6
0
def test_multi_write():
    # Makes a random dict of random paths and variables (random number
    # of randomized paths with random numpy arrays as values).
    data = dict()
    for i in range(0, random.randint(min_dict_keys, \
            max_dict_keys)):
        name = random_name()
        data[name] = \
            random_numpy(random_numpy_shape( \
            dict_value_subarray_dimensions, \
            max_dict_value_subarray_axis_length), \
            dtype=random.choice(dtypes))

    # Write it and then read it back item by item.
    fld = None
    try:
        fld = tempfile.mkstemp()
        os.close(fld[0])
        filename = fld[1]
        hdf5storage.writes(mdict=data, filename=filename)
        out = dict()
        for p in data:
            out[p] = hdf5storage.read(path=p, filename=filename)
    except:
        raise
    finally:
        if fld is not None:
            os.remove(fld[1])

    # Compare data and out.
    assert_equal(out, data)
def test_plugin_marshaller_SubList():
    mc = hdf5storage.MarshallerCollection(load_plugins=True,
                                          lazy_loading=True)
    options = hdf5storage.Options(store_python_metadata=True,
                                  matlab_compatible=False,
                                  marshaller_collection=mc)
    ell = [1, 2, 'b1', b'3991', True, None]
    data = example_hdf5storage_marshaller_plugin.SubList(ell)
    f = None
    name = '/a'
    try:
        f = tempfile.mkstemp()
        os.close(f[0])
        filename = f[1]
        hdf5storage.write(data, path=name, filename=filename,
                          options=options)
        out = hdf5storage.read(path=name, filename=filename,
                               options=options)
    except:
        raise
    finally:
        if f is not None:
            os.remove(f[1])
    assert_equal_nose(ell, list(out))
    assert_equal_nose(type(out),
                      example_hdf5storage_marshaller_plugin.SubList)
Пример #8
0
def check_read_filters(filters):
    # Read out the filter arguments.
    filts = {"compression": "gzip", "shuffle": True, "fletcher32": True, "gzip_level": 7}
    for k, v in filters.items():
        filts[k] = v
    if filts["compression"] == "gzip":
        filts["compression_opts"] = filts["gzip_level"]
    del filts["gzip_level"]

    # Make some random data.
    dims = random.randint(1, 4)
    data = random_numpy(
        shape=random_numpy_shape(dims, max_array_axis_length), dtype=random.choice(tuple(set(dtypes) - set(["U"])))
    )
    # Make a random name.
    name = random_name()

    # Write the data to the proper file with the given name with the
    # provided filters and read it backt. The file needs to be deleted
    # before and after to keep junk from building up.
    if os.path.exists(filename):
        os.remove(filename)
    try:
        with h5py.File(filename) as f:
            f.create_dataset(name, data=data, chunks=True, **filts)
        out = hdf5storage.read(path=name, filename=filename, matlab_compatible=False)
    except:
        raise
    finally:
        if os.path.exists(filename):
            os.remove(filename)

    # Compare
    assert_equal(out, data)
Пример #9
0
    def loadh5(cls, file_name, path='/', matlab_compatible=False, **kwargs):
        """Load from an HDF5 file

        :param file_name: file name
        :param path: path toread data from
        """
        # TODO how to preserve order?
        return hdf5storage.read(path, file_name,
                                marshaller_collection=cls.__mc(),
                                matlab_compatible=matlab_compatible)
 def write_readback(self, data, name, options, read_options=None):
     # Randomly convert the name to other path types.
     path_choices = (str, bytes, pathlib.PurePath, pathlib.PurePosixPath,
                     pathlib.PureWindowsPath, pathlib.Path)
     name_type_w = random.choice(path_choices)
     name_type_r = random.choice(path_choices)
     # Name to write with.
     if name_type_w == bytes:
         name_w = name.encode('utf-8')
     elif name_type_w in (pathlib.PurePath, pathlib.PurePosixPath,
                          pathlib.PosixPath):
         name_w = name_type_w(name)
     elif name_type_w != str:
         name_w = name_type_w(name[posixpath.isabs(name):])
     else:
         name_w = name
     # Name to read with.
     if name_type_r == bytes:
         name_r = name.encode('utf-8')
     elif name_type_r in (pathlib.PurePath, pathlib.PurePosixPath,
                          pathlib.PosixPath):
         name_r = name_type_r(name)
     elif name_type_r != str:
         name_r = name_type_r(name[posixpath.isabs(name):])
     else:
         name_r = name
     # Write the data to the proper file with the given name, read it
     # back, and return the result. The file needs to be deleted
     # after to keep junk from building up. Different options can be
     # used for reading the data back.
     f = None
     try:
         f = tempfile.mkstemp()
         os.close(f[0])
         filename = f[1]
         hdf5storage.write(data,
                           path=name_w,
                           filename=filename,
                           options=options)
         out = hdf5storage.read(path=name_r,
                                filename=filename,
                                options=read_options)
     except:
         raise
     finally:
         if f is not None:
             os.remove(f[1])
     return out
Пример #11
0
 def write_readback(self, data, name, options):
     # Write the data to the proper file with the given name, read it
     # back, and return the result. The file needs to be deleted
     # before and after to keep junk from building up.
     if os.path.exists(self.filename):
         os.remove(self.filename)
     try:
         hdf5storage.write(data, path=name, filename=self.filename,
                           options=options)
         out = hdf5storage.read(path=name, filename=self.filename,
                                options=options)
     except:
         raise
     finally:
         if os.path.exists(self.filename):
             os.remove(self.filename)
     return out
Пример #12
0
def check_read_filters(filters):
    # Read out the filter arguments.
    filts = {
        'compression': 'gzip',
        'shuffle': True,
        'fletcher32': True,
        'gzip_level': 7
    }
    for k, v in filters.items():
        filts[k] = v
    if filts['compression'] == 'gzip':
        filts['compression_opts'] = filts['gzip_level']
    del filts['gzip_level']

    # Make some random data.
    dims = random.randint(1, 4)
    data = random_numpy(shape=random_numpy_shape(dims, max_array_axis_length),
                        dtype=random.choice(tuple(set(dtypes) - set(['U']))))
    # Make a random name.
    name = random_name()

    # Write the data to the proper file with the given name with the
    # provided filters and read it backt. The file needs to be deleted
    # after to keep junk from building up.
    fld = None
    try:
        fld = tempfile.mkstemp()
        os.close(fld[0])
        filename = fld[1]
        with h5py.File(filename) as f:
            f.create_dataset(name, data=data, chunks=True, **filts)
        out = hdf5storage.read(path=name,
                               filename=filename,
                               matlab_compatible=False)
    except:
        raise
    finally:
        if fld is not None:
            os.remove(fld[1])

    # Compare
    assert_equal(out, data)
Пример #13
0
 def write_readback(self, data, name, options, read_options=None):
     # Write the data to the proper file with the given name, read it
     # back, and return the result. The file needs to be deleted
     # after to keep junk from building up. Different options can be
     # used for reading the data back.
     f = None
     try:
         f = tempfile.mkstemp()
         os.close(f[0])
         filename = f[1]
         hdf5storage.write(data, path=name, filename=filename,
                           options=options)
         out = hdf5storage.read(path=name, filename=filename,
                                options=read_options)
     except:
         raise
     finally:
         if f is not None:
             os.remove(f[1])
     return out
Пример #14
0
 def write_readback(self, data, name, options):
     # Write the data to the proper file with the given name, read it
     # back, and return the result. The file needs to be deleted
     # before and after to keep junk from building up.
     if os.path.exists(self.filename):
         os.remove(self.filename)
     try:
         hdf5storage.write(data,
                           path=name,
                           filename=self.filename,
                           options=options)
         out = hdf5storage.read(path=name,
                                filename=self.filename,
                                options=options)
     except:
         raise
     finally:
         if os.path.exists(self.filename):
             os.remove(self.filename)
     return out
Пример #15
0
def check_read_filters(filters):
    # Read out the filter arguments.
    filts = {'compression': 'gzip',
             'shuffle': True,
             'fletcher32': True,
             'gzip_level': 7}
    for k, v in filters.items():
        filts[k] = v
    if filts['compression'] == 'gzip':
        filts['compression_opts'] = filts['gzip_level']
    del filts['gzip_level']
    
    # Make some random data.
    dims = random.randint(1, 4)
    data = random_numpy(shape=random_numpy_shape(dims,
                        max_array_axis_length),
                        dtype=random.choice(tuple(
                        set(dtypes) - set(['U']))))
    # Make a random name.
    name = random_name()

    # Write the data to the proper file with the given name with the
    # provided filters and read it backt. The file needs to be deleted
    # after to keep junk from building up.
    fld = None
    try:
        fld = tempfile.mkstemp()
        os.close(fld[0])
        filename = fld[1]
        with h5py.File(filename) as f:
            f.create_dataset(name, data=data, chunks=True, **filts)
        out = hdf5storage.read(path=name, filename=filename,
                               matlab_compatible=False)
    except:
        raise
    finally:
        if fld is not None:
            os.remove(fld[1])

    # Compare
    assert_equal(out, data)
Пример #16
0
def mat2obj(folder=".", filename="model2.mat"):
    f = h5.read(folder, filename, options=h5.Options.matlab_compatible)
    return MatObj(f)
Пример #17
0
import os
import hdf5storage

os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

datasetR = []
data_path = './'

# %%
'''Load Test Data'''
no_measurements = 100
#filename = data_path+'data/gt_graph_random_large_outliers_real.h5'
filename = data_path + 'data/gt_graph_random_large_outliers_test.h5'
for item in range(no_measurements):
    x = torch.tensor(hdf5storage.read(path='/data/' + str(item + 1) + '/x',
                                      filename=filename,
                                      options=None),
                     dtype=torch.float)
    xt = torch.tensor(hdf5storage.read(path='/data/' + str(item + 1) + '/xt',
                                       filename=filename,
                                       options=None),
                      dtype=torch.float)
    o = torch.tensor(hdf5storage.read(path='/data/' + str(item + 1) + '/o',
                                      filename=filename,
                                      options=None),
                     dtype=torch.float)
    edge_index = torch.tensor(hdf5storage.read(path='/data/' + str(item + 1) +
                                               '/edge_index',
                                               filename=filename,
                                               options=None),
                              dtype=torch.long)
    def __init__(self, filename):
        self._filename = filename

        # Get and check the file type.
        file_type = hdf5storage.read(path='/Type',
                                     filename=filename)[()].decode()
        if file_type != 'Acquisition HDF5':
            raise NotImplementedError('Unsupported file type.')

        # Get and check the version.
        self.Version = hdf5storage.read(path='/Version',
                                        filename=filename)[()].decode()
        self._supported_version = \
            _get_supported_version(self.Version)
        if self._supported_version is None:
            raise NotImplementedError('Unsupported Acquisition '
                                      + 'HDF5 version: '
                                      + self.Version)

        # If it is version 1.1.0 or newer, it will have the Software
        # field which we want to grab (set to None otherwise).
        if LooseVersion(self._supported_version) \
                >= LooseVersion('1.1.0'):
            self.Software = hdf5storage.read( \
                path='/Software', filename=filename)[()].decode()
        else:
            self.Software = None

        # Read the Info field and convert it to a dict from a structred
        # ndarray if it isn't a dict already.
        info = hdf5storage.read(path='/Info', filename=filename)
        if isinstance(info, dict):
            self.Info = info
        else:
            self.Info = dict()
            for field in info.dtype.names:
                self.Info[field] = info[field][0]

        # Convert string types to str from np.bytes_.
        for k, v in self.Info.items():
            if isinstance(v, np.bytes_):
                self.Info[k] = v.decode()

        # Grab and check the type and storage type.
        tp = hdf5storage.read(path='/Data/Type',
                              filename=filename)[()].decode()
        if tp not in _data_types:
            raise NotImplementedError('Unsupported data type: '
                                        + tp)
        self.Type = _data_types[tp]
        tp = hdf5storage.read(path='/Data/StorageType',
                              filename=filename)[()].decode()
        if tp not in _data_types:
            raise NotImplementedError('Unsupported storage type: '
                                        + tp)
        self.StorageType = _data_types[tp]

        # Check that /Data/Data is present.
        with h5py.File(filename, 'r') as f:
            if self.data_path not in f:
                raise NotImplementedError('Couldn''t find the acquired '
                                          'data.')
Пример #19
0
def run_testcase(casefile):

    data = hdf5storage.read(filename=casefile)
    if data['version'] == 'diff_v1':
        run_test_diff_v1(data)
Пример #20
0
    model.load_state_dict(model_data['state_dict'], False)

    model.eval()

    class_names = []
    with open('class_names_list') as f:
        for row in f:
            class_names.append(row[:-1])

    if os.path.exists('tmpv11'):
        subprocess.call('rm -rf tmpv11', shell=True)
    if not os.path.exists('resnext101-64f'):
        subprocess.call('mkdir resnext101-64f', shell=True)

    val_set = hdf5storage.read(path='/img_path', filename='./val_set.h5')
    print(len(val_set))
    for i in range(len(val_set)):

        subprocess.call('mkdir tmpv11', shell=True)
        for j in range(8):
            img_path = './' + val_set[i][j].replace('\\', '/')

            subprocess.call('cp ' + img_path + ' tmpv11/' +
                            'image_{:05d}.jpg'.format(j + 1),
                            shell=True)

        result = classify_video('tmpv11', str(i), class_names, model, opt)
        outputs = []
        for kk in range(7):
            outputs.append(result['clips'][kk]['features'])
Пример #21
0
def find_representative_locations(paths, param, tech):
    """
    This function reads the masked FLH raster and finds the coordinates and indices of the pixels for the user-defined quantiles for each region.
    It creates a shapefile containing the position of those points for each region, and two MAT files with their
    coordinates and indices.

    :param paths: Dictionary of dictionaries containing path values for FLH MAT files, region statistics, and output paths.
    :type paths: dict
    :param param: Dictionary of dictionaries containing the user-defined quantiles, FLH resolution, and spatial scope.
    :type param: dict
    :param tech: Technology under study.
    :type tech: str

    :return: The shapefile with the locations and the two MAT files for the coordinates and the indices are saved
             directly in the given paths, along with their corresponding metadata in JSON files.
    :rtype: None
    """
    ul.timecheck("Start")
    FLH_mask = hdf5storage.read("FLH_mask", paths[tech]["FLH_mask"])
    quantiles = param["quantiles"]
    res_desired = param["res_desired"]
    filter = pd.read_csv(paths[tech]["Region_Stats"],
                         sep=";",
                         decimal=",",
                         index_col=0).index
    if tech == "WindOff":
        Crd = param["Crd_offshore"]
        GeoRef = param["GeoRef_offshore"]
        # Select only indices in the report
        regions_shp = param["regions_sea"].loc[filter]
        nRegions = len(regions_shp)
        Crd_regions = param["Crd_regions_sea"]
    else:
        Crd = param["Crd_all"]
        GeoRef = param["GeoRef"]
        # Select only indices in the report
        regions_shp = param["regions_land"].loc[filter]
        nRegions = len(regions_shp)
        Crd_regions = param["Crd_regions_land"]
    Ind = ind_merra(Crd_regions, Crd, res_desired)
    reg_ind = np.zeros((nRegions, len(quantiles), 2))
    k = 0
    list_names = []
    list_quantiles = []
    for reg in filter:
        # A_region
        A_region = calc_region(regions_shp.loc[reg], Crd_regions[reg, :],
                               res_desired, GeoRef)

        FLH_reg = A_region * FLH_mask[Ind[reg, 2] - 1:Ind[reg, 0],
                                      Ind[reg, 3] - 1:Ind[reg, 1]]
        FLH_reg[FLH_reg == 0] = np.nan
        X = FLH_reg.flatten(order="F")
        I_old = np.argsort(X)

        # Escape loop if intersection only yields NaN
        if np.isnan(X).all():
            # do nothing
            continue

        q_rank = 0
        for q in quantiles:
            if tech == "WindOff":
                list_names.append(regions_shp["ISO_Ter1"].loc[reg])
            else:
                # list_names.append(regions_shp["NAME_SHORT"].loc[reg])
                list_names.append(regions_shp["GID_0"].loc[reg])

            list_quantiles.append("q" + str(q))
            if q == 100:
                I = I_old[(len(X) - 1) - sum(np.isnan(X).astype(int))]
            elif q == 0:
                I = I_old[0]
            else:
                I = I_old[int(
                    np.round(q / 100 *
                             (len(X) - 1 - sum(np.isnan(X).astype(int)))))]
            # Convert the indices to row-column indices
            I, J = ul.ind2sub(FLH_reg.shape, I)
            reg_ind[k,
                    q_rank, :] = np.array([I + Ind[reg, 2],
                                           J + Ind[reg, 3]]).astype(int)
            q_rank = q_rank + 1
        k = k + 1

    reg_ind = np.reshape(reg_ind, (-1, 2), "C").astype(int)

    reg_ind = (reg_ind[:, 0] - 1, reg_ind[:, 1] - 1)

    param[tech]["Ind_points"] = reg_ind
    param[tech]["Crd_points"] = ind2crd(reg_ind, Crd, res_desired)
    param[tech]["Crd_points"] = (param[tech]["Crd_points"][0],
                                 param[tech]["Crd_points"][1], list_names,
                                 list_quantiles)

    # Format point locations
    points = [(param[tech]["Crd_points"][1][i],
               param[tech]["Crd_points"][0][i])
              for i in range(0, len(param[tech]["Crd_points"][0]))]

    # Create shapefile
    schema = {
        "geometry": "Point",
        "properties": {
            "NAME_SHORT": "str",
            "quantile": "str"
        }
    }
    with fiona.open(paths[tech]["Locations"], "w", "ESRI Shapefile",
                    schema) as c:
        c.writerecords([{
            "geometry": mapping(Point(points[i])),
            "properties": {
                "NAME_SHORT": list_names[i],
                "quantile": list_quantiles[i]
            }
        } for i in range(0, len(points))])
    hdf5storage.writes({"Ind_points": param[tech]["Ind_points"]},
                       paths[tech]["Locations"][:-4] + "_Ind.mat",
                       store_python_metadata=True,
                       matlab_compatible=True)
    hdf5storage.writes({"Crd_points": param[tech]["Crd_points"]},
                       paths[tech]["Locations"][:-4] + "_Crd.mat",
                       store_python_metadata=True,
                       matlab_compatible=True)
    ul.create_json(
        paths[tech]["Locations"],
        param,
        [
            "author", "comment", tech, "region_name", "subregions_name",
            "quantiles"
        ],
        paths,
        ["subregions"],
    )
    print("files saved: " + paths[tech]["Locations"])
    ul.timecheck("End")
Пример #22
0
def generate_time_series_for_representative_locations(paths, param, tech):
    """
    This function generates yearly capacity factor time-series for the technology of choice at quantile locations
    generated in find_locations_quantiles.
    The timeseries are saved in CSV files.

    :param paths: Dictionary of dictionaries containing paths to coordinate and indices of the quantile locations.
    :type paths: dict
    :param param: Dictionary of dictionaries containing processing parameters.
    :type param: dict
    :param tech: Technology under study.
    :type tech: str

    :return: The CSV file with the time series for all subregions and quantiles is saved directly in the given path,
             along with the corresponding metadata in a JSON file.
    :rtype: None
    """
    ul.timecheck("Start")
    nproc = param["nproc"]
    CPU_limit = np.full((1, nproc), param["CPU_limit"])
    param[tech]["Crd_points"] = hdf5storage.read(
        "Crd_points", paths[tech]["Locations"][:-4] + "_Crd.mat")
    param[tech]["Ind_points"] = hdf5storage.read(
        "Ind_points", paths[tech]["Locations"][:-4] + "_Ind.mat")
    list_names = param[tech]["Crd_points"][2]
    list_quantiles = param[tech]["Crd_points"][3]
    m_high = param["m_high"]

    # Obtain weather and correction matrices
    param["Ind_nz"] = param[tech]["Ind_points"]
    merraData, rasterData = pl.get_merra_raster_data(paths, param, tech)

    if tech in ["OpenFieldPV", "RoofTopPV", "CSP"]:
        res_weather = param["res_weather"]
        Crd_all = param["Crd_all"]
        Ind = ind_merra(Crd_all, Crd_all, res_weather)[0]

        day_filter = np.nonzero(
            merraData["CLEARNESS"][Ind[2] - 1:Ind[0],
                                   Ind[3] - 1:Ind[1], :].sum(axis=(0, 1)))
        list_hours = np.arange(0, 8760)
        if nproc == 1:
            param["status_bar_limit"] = list_hours[-1]
            results = calc_TS_solar(list_hours[day_filter],
                                    [param, tech, rasterData, merraData])
        else:
            list_hours = np.array_split(list_hours[day_filter], nproc)
            param["status_bar_limit"] = list_hours[0][-1]
            results = mp.Pool(processes=nproc,
                              initializer=ul.limit_cpu,
                              initargs=CPU_limit).starmap(
                                  calc_TS_solar,
                                  it.product(
                                      list_hours,
                                      [[param, tech, rasterData, merraData]]))
        print("\n")

        # Collecting results
        TS = np.zeros((len(param[tech]["Ind_points"][0]), 8760))
        if nproc > 1:
            for p in range(len(results)):
                TS = TS + results[p]
        else:
            TS = results

    elif tech in ["WindOff"]:
        list_hours = np.array_split(np.arange(0, 8760), nproc)
        param["status_bar_limit"] = list_hours[0][-1]
        results = mp.Pool(processes=nproc,
                          initializer=ul.limit_cpu,
                          initargs=CPU_limit).starmap(
                              calc_TS_windoff,
                              it.product(
                                  list_hours,
                                  [[param, tech, rasterData, merraData]]))
        print("\n")

        # Collecting results
        TS = np.zeros((len(param[tech]["Ind_points"][0]), 8760))
        if nproc > 1:
            for p in range(len(results)):
                TS = TS + results[p]
        else:
            TS = results

    elif tech in ["WindOn"]:
        # indices with merra resolution intitialization of array
        param[tech]["Ind_merra_points"] = hdf5storage.read(
            "Ind_points", paths[tech]["Locations"][:-4] + "_Ind.mat")
        # calculation of merra resolution indices for points
        for p in range(len(param[tech]["Ind_points"][0])):
            param[tech]["Ind_merra_points"][0][p] = (
                m_high - param[tech]["Ind_points"][0][p] - 1) / 200
            param[tech]["Ind_merra_points"][1][p] = (
                param[tech]["Ind_points"][1][p] + 1) / 250
        # read wind speed merra data and the box coordinates
        merraData = merraData["W50M"][::-1, :, :]
        b_xmin = hdf5storage.read("MERRA_XMIN", paths["MERRA_XMIN"])
        b_xmax = hdf5storage.read("MERRA_XMAX", paths["MERRA_XMAX"])
        b_ymin = hdf5storage.read("MERRA_YMIN", paths["MERRA_YMIN"])
        b_ymax = hdf5storage.read("MERRA_YMAX", paths["MERRA_YMAX"])
        # read global wind atlas data and coordinates
        with rasterio.open(paths["GWA_global"]) as src:
            GWA_array = src.read(1)
        # GWA_array = np.power(GWA_array, 3)
        GWA_array[np.isnan(GWA_array)] = 0
        x_gwa = hdf5storage.read("GWA_X", paths["GWA_X"])
        y_gwa = hdf5storage.read("GWA_Y", paths["GWA_Y"])

        TS = np.zeros((len(param[tech]["Ind_points"][0]), 8760))
        list_points = np.arange(0, len(param[tech]["Ind_points"][0]))
        if nproc == 1:
            param["status_bar_limit"] = list_points[-1]
            results = calc_TS_windon(list_points, [
                param, tech, paths, rasterData, merraData, b_xmin, b_xmax,
                b_ymin, b_ymax, GWA_array, x_gwa, y_gwa
            ])
            TS = results
        else:
            list_points = np.array_split(list_points, nproc)
            param["status_bar_limit"] = list_points[0][-1]
            results = mp.Pool(
                processes=nproc, initializer=ul.limit_cpu,
                initargs=CPU_limit).starmap(
                    calc_TS_windon,
                    it.product(list_points, [[
                        param, tech, paths, rasterData, merraData, b_xmin,
                        b_xmax, b_ymin, b_ymax, GWA_array, x_gwa, y_gwa
                    ]]))
            for p in range(len(results)):
                TS = TS + results[p]
    print("\n")

    # Restructuring results
    tuples = list(zip(list_names, list_quantiles))
    if tech == "WindOff":
        column_names = pd.MultiIndex.from_tuples(
            tuples, names=["ISO_Ter1", "Quantile"])
    else:
        # column_names = pd.MultiIndex.from_tuples(tuples, names=["NAME_SHORT", "Quantile"])
        column_names = pd.MultiIndex.from_tuples(tuples,
                                                 names=["GID_0", "Quantile"])
    results = pd.DataFrame(TS.transpose(), columns=column_names)
    results.to_csv(paths[tech]["TS"], sep=";", decimal=",")
    ul.create_json(
        paths[tech]["TS"],
        param,
        [
            "author", "comment", tech, "quantiles", "region_name",
            "subregions_name", "year", "Crd_all"
        ],
        paths,
        [tech, "subregions"],
    )
    print("files saved: " + paths[tech]["TS"])
    ul.timecheck("End")
Пример #23
0
def generate_time_series_for_specific_locations(paths, param, tech):
    """
    This function generates yearly capacity factor time-series for the technology of choice at user defined locations.
    The timeseries are saved in CSV files.

    :param paths: Dictionary of dictionaries containing paths output desired locations.
    :type paths: dict
    :param param: Dictionary of dictionaries containing processing parameters, and user-defined locations.
    :type param: dict
    :param tech: Technology under study.
    :type tech: str

    :return: The CSV file with the time series for all subregions and quantiles is saved directly in the given path,
             along with the corresponding metadata in a JSON file.
    :rtype: None
    :raise Point locations not found: Is raised when the dictionary containing the points names and locations is empty.
    :raise Points outside spatial scope: Some points are not located inside of the spatial scope, therefore no input maps are available for the calculations
    """
    ul.timecheck("Start")
    nproc = param["nproc"]
    CPU_limit = np.full((1, nproc), param["CPU_limit"])
    res_desired = param["res_desired"]
    Crd_all = param["Crd_all"]
    m_high = param["m_high"]

    # Read user defined locations dictionary
    if not param["useloc"]:
        warn(
            "Point locations not found: Please fill in the name and locations of the points in config.py prior to executing this function",
            UserWarning,
        )
        ul.timecheck("End")
        return
    points_df = pd.DataFrame.from_dict(param["useloc"],
                                       orient="index",
                                       columns=["lat", "lon"])

    # Filter points outside spatial scope
    lat_max, lon_max, lat_min, lon_min = param["spatial_scope"]
    # Points outside the scope bound
    out_scope_df = points_df.loc[(lat_min > points_df["lat"]) |
                                 (lat_max < points_df["lat"]) |
                                 (lon_min > points_df["lon"]) |
                                 (lon_max < points_df["lon"])].copy()
    if not out_scope_df.empty:
        out_points = list(out_scope_df.index)
        print(
            "WARNING: The following points are located outside of the spatial scope "
            + str(param["spatial_scope"]) + ": \n" + str(out_scope_df))
        warn("Points located outside spatial scope", UserWarning)
    # Points inside the scope bounds
    points_df = points_df.loc[(lat_min <= points_df["lat"])
                              & (lat_max >= points_df["lat"]) &
                              (lon_min <= points_df["lon"]) &
                              (lon_max >= points_df["lon"])].copy()
    if not points_df.empty:
        # Prepare input for calc_TS functions
        crd = (points_df["lat"].to_numpy(), points_df["lon"].to_numpy())
        ind = crd2ind(crd, Crd_all, res_desired)
        list_names = ["UD"] * len(crd[0])
        list_points = list(points_df.index)

        param[tech]["Crd_points"] = (crd[0], crd[1], list_names, list_points)
        param[tech]["Ind_points"] = crd2ind(crd, Crd_all, res_desired)

        # Obtain weather and correction matrices
        param["Ind_nz"] = param[tech]["Ind_points"]
        merraData, rasterData = pl.get_merra_raster_data(paths, param, tech)

        if tech in ["OpenFieldPV", "RoofTopPV", "CSP"]:
            # Set up day_filter
            res_weather = param["res_weather"]
            Ind = ind_merra(Crd_all, Crd_all, res_weather)[0]
            day_filter = np.nonzero(
                merraData["CLEARNESS"][Ind[2] - 1:Ind[0],
                                       Ind[3] - 1:Ind[1], :].sum(axis=(0, 1)))

            list_hours = np.arange(0, 8760)
            if nproc == 1:
                param["status_bar_limit"] = list_hours[-1]
                results = calc_TS_solar(list_hours[day_filter],
                                        [param, tech, rasterData, merraData])
            else:
                list_hours = np.array_split(list_hours[day_filter], nproc)
                param["status_bar_limit"] = list_hours[0][-1]
                results = mp.Pool(
                    processes=nproc,
                    initializer=ul.limit_cpu,
                    initargs=CPU_limit).starmap(
                        calc_TS_solar,
                        it.product(list_hours,
                                   [[param, tech, rasterData, merraData]]))
            print("\n")

            # Collecting results
            TS = np.zeros((len(param[tech]["Ind_points"][0]), 8760))
            if nproc > 1:
                for p in range(len(results)):
                    TS = TS + results[p]
            else:
                TS = results

        elif tech in ["WindOff"]:
            list_hours = np.array_split(np.arange(0, 8760), nproc)
            param["status_bar_limit"] = list_hours[0][-1]
            results = mp.Pool(processes=nproc,
                              initializer=ul.limit_cpu,
                              initargs=CPU_limit).starmap(
                                  calc_TS_windoff,
                                  it.product(
                                      list_hours,
                                      [[param, tech, rasterData, merraData]]))
            print("\n")

            # Collecting results
            TS = np.zeros((len(param[tech]["Ind_points"][0]), 8760))
            if nproc > 1:
                for p in range(len(results)):
                    TS = TS + results[p]
            else:
                TS = results

        elif tech in ["WindOn"]:
            # indices with merra resolution intitialization of array
            param[tech]["Ind_merra_points"] = ind
            # calculation of merra resolution indices for points
            for p in range(len(param[tech]["Ind_points"][0])):
                param[tech]["Ind_merra_points"][0][p] = (m_high - ind[0][p] -
                                                         1) / 200
                param[tech]["Ind_merra_points"][1][p] = (ind[1][p] + 1) / 250
            # read wind speed merra data and the box coordinates
            merraData = merraData["W50M"][::-1, :, :]
            b_xmin = hdf5storage.read("MERRA_XMIN", paths["MERRA_XMIN"])
            b_xmax = hdf5storage.read("MERRA_XMAX", paths["MERRA_XMAX"])
            b_ymin = hdf5storage.read("MERRA_YMIN", paths["MERRA_YMIN"])
            b_ymax = hdf5storage.read("MERRA_YMAX", paths["MERRA_YMAX"])
            # read global wind atlas data and coordinates
            with rasterio.open(paths["GWA_global"]) as src:
                GWA_array = src.read(1)
            # GWA_array = np.power(GWA_array, 3)
            GWA_array[np.isnan(GWA_array)] = 0
            x_gwa = hdf5storage.read("GWA_X", paths["GWA_X"])
            y_gwa = hdf5storage.read("GWA_Y", paths["GWA_Y"])

            TS = np.zeros((len(param[tech]["Ind_points"][0]), 8760))
            list_locations = np.arange(0, len(param[tech]["Ind_points"][0]))
            if nproc == 1:
                param["status_bar_limit"] = list_points[-1]
                results = calc_TS_windon(list_locations, [
                    param, tech, paths, rasterData, merraData, b_xmin, b_xmax,
                    b_ymin, b_ymax, GWA_array, x_gwa, y_gwa
                ])
                TS = results
            else:
                list_locations = np.array_split(list_locations, nproc)
                param["status_bar_limit"] = list_locations[0][-1]
                results = mp.Pool(
                    processes=nproc,
                    initializer=ul.limit_cpu,
                    initargs=CPU_limit).starmap(
                        calc_TS_windon,
                        it.product(list_locations, [[
                            param, tech, paths, rasterData, merraData, b_xmin,
                            b_xmax, b_ymin, b_ymax, GWA_array, x_gwa, y_gwa
                        ]]))
                for p in range(len(results)):
                    TS = TS + results[p]

            # TS = calc_TS_windon(param, paths, tech, rasterData, merraData, b_xmin, b_xmax, b_ymin, b_ymax, GWA_array, x_gwa, y_gwa)

        print("\n")

        # Restructuring results
        results = pd.DataFrame(TS.transpose(),
                               columns=list_points).rename_axis("Points",
                                                                axis="columns")
        results.to_csv(paths[tech]["TS_discrete"], sep=";", decimal=",")
        ul.create_json(
            paths[tech]["TS_discrete"],
            param,
            [
                "author", "comment", tech, "useloc", "region_name",
                "subregions_name", "year", "Crd_all"
            ],
            paths,
            [tech, "subregions"],
        )
        print("files saved: " + paths[tech]["TS_discrete"])
    ul.timecheck("End")