示例#1
0
 def load_hdf5(self, indir):
     """
     function to read data from a hdf5 file
     
     :param indir: directory to read ".h5" file from
     :type: str
     """
     self.metadata = h5w.load(indir, path='metadata/')
     self.source_properties = h5w.load(indir, path='source_properties/')
    def test_save_creates_correct_output(self, tmpdir, empty_network):
        file = 'test.h5'
        keys = [
            'results', 'results_hash_dict', 'network_params', 'analysis_params'
        ]

        def _test_func(x):
            return x

        def test_function(network):
            return lmt.utils._cache(network, _test_func, {'x': 1}, 'test')

        empty_network.network_params['a'] = 1
        empty_network.analysis_params['a'] = 1
        test_function(empty_network)

        tmp_test = tmpdir.mkdir('tmp_test')
        with tmp_test.as_cwd():
            io.save_network(file, empty_network, overwrite=True)
            output = h5.load(file)
            for key in keys:
                assert key in output.keys()
            # check that dicts are not empty
            for sub_dict in output.values():
                assert bool(sub_dict)
            # check that all quantities have been converted
            check_dict_contains_no_quantity(output)
 def test_save_adds_units_to_results(self, mocker, tmpdir, empty_network):
     empty_network.results['test'] = 1
     empty_network.result_units['test'] = 'millivolt'
     empty_network.save
     tmp_test = tmpdir.mkdir('tmp_test')
     with tmp_test.as_cwd():
         empty_network.save('test.h5')
         data = h5.load('test.h5')
         assert data['results']['test']['val'] == 1
         assert data['results']['test']['unit'] == 'millivolt'
示例#4
0
    def concatenate(self):

        if len(self.children) > 0:

            # Sequentially open up children and insert their results into a master
            # dictionary
            dummy_dict = h5py_wrapper.load(self.children[0]['path'])
            master_dict = init_structure(self.total_tasks, dummy_dict)

            for i, child in enumerate(self.children):
                child_data = h5py_wrapper.load(child['path'])
                master_dict = insert_data(master_dict, child_data, child['idx'])

            master_data_filepath = os.path.abspath(os.path.join(self.directory, '..', '%s.dat' % self.directory))
            h5py_wrapper.save(master_data_filepath, master_dict, write_mode = 'w')          

        else:

            # Still create a dummy .dat file to indicate that the job completed
            dummy_dict = {}
            master_data_filepath = os.path.abspath(os.path.join(self.directory, '..', '%s.dat' % self.directory))
            h5py_wrapper.save(master_data_filepath, dummy_dict, write_mode = 'w')          
示例#5
0
 def test_save_creates_correct_output(self, tmpdir, network_dict_quantity):
     file = 'test.h5'
     keys = ['results', 'results_hash_dict', 'network_params',
             'analysis_params']
     tmp_test = tmpdir.mkdir('tmp_test')
     with tmp_test.as_cwd():
         io.save_quantity_dict_to_h5(file, network_dict_quantity)
         output = h5.load(file)
         for key in keys:
             assert key in output.keys()
         # check that dicts are not empty
         for sub_dict in output.values():
             assert bool(sub_dict)
         # check that all quantities have been converted
         check_dict_contains_no_quantity(output)
def concatenate_children(comm, root_dir):

    # Assemble the list of subdirectories that need to be processed
    dirlist = []
    for root, dirs, files in os.walk(root_dir):
        for d in dirs:
            p = os.path.join(root, d)
            if 'node' in p:
                dirno = p.split('dir')[1].split('/')[0]
                nodeno = p.split('node')[1]
                if len(glob.glob('%s/master_%s_%s.dat' %
                                 (p, dirno, nodeno))) == 0:
                    dirlist.append(p)

    # Chunk the dirlist
    chunk_dirlist = np.array_split(dirlist, comm.size)
    rank_dirlist = chunk_dirlist[comm.rank]
    print(len(rank_dirlist))
    for i, p in enumerate(rank_dirlist):

        t0 = time.time()
        rmanager = ResultsManager.restore_from_directory(p)
        master_list = []
        dirno = p.split('dir')[1].split('/')[0]
        nodeno = p.split('node')[1]

        for i, child in enumerate(rmanager.children):

            try:
                child_data = h5py_wrapper.load(child['path'])
                child_data['idx'] = child['idx']
                master_list.append(child_data)
            except:
                continue

        # Pickle away
        with open('%s/master_%s_%s.dat' % (p, dirno, nodeno), 'wb') as f:
            f.write(pickle.dumps(master_list))
        print('Task %d/%d, %f s' % (i + 1, len(dirlist), time.time() - t0))
示例#7
0
 def test_save_creates_correct_output(self, tmpdir, mocker, network):
     file = 'test.h5'
     keys = ['results', 'results_hash_dict', 'network_params',
             'analysis_params']
     
     @lmt.Network._check_and_store(['test'], ['test_key'])
     def test_method(self, key):
         return 1 * ureg.ms
 
     mocker.patch.object(lmt.Network, 'mean_input', new=test_method)
     network.mean_input(np.array([1, 2, 3]) * ureg.ms)
     
     tmp_test = tmpdir.mkdir('tmp_test')
     with tmp_test.as_cwd():
         io.save_network(file, network)
         output = h5.load(file)
         for key in keys:
             assert key in output.keys()
         # check that dicts are not empty
         for sub_dict in output.values():
             assert bool(sub_dict)
         # check that all quantities have been converted
         check_dict_contains_no_quantity(output)
def pytest_generate_tests(metafunc,
                          all_params=all_params,
                          results=results,
                          ids_all_regimes=ids_all_regimes):
    """
    Special pytest function defining parametrizations for certain fixtures.
    
    `pos_keys`:
    If a test requires all positive keys contained in the list of arguments of
    the tested function, the corresponding function test class needs to have a
    class attribute `func`, which is the tested function as a staticmethod.
    The pos keys are tested one after each other as a parametrization.
    
    `output_test_fixtures`:
    If a test requires input arguments and outputs in different regimes for
    comparison with the return values of the tested function, the corresponding
    function test class needs a class attribute `func`, the tested function as
    a staticmethod, and either a `output_key` attribute or a `output_keys`
    attribute. The different parameter regimes are then tested one after each
    other as a parametrization.
    """
    # check if requesting test class has class attribute func
    if hasattr(metafunc.cls, 'func'):
        func = metafunc.cls.func
    # if it does not, just return and don't parametrize
    else:
        return None

    if "pos_keys" in metafunc.fixturenames:
        pos_keys = get_required_keys(func, all_pos_keys)
        # define parametrization
        metafunc.parametrize("pos_keys", pos_keys)

    elif "output_test_fixtures" in metafunc.fixturenames:
        # list of input arguments for the tested function for each regime
        params = [
            get_required_params(func, dict(results, **params))
            for params, results in zip(all_params, results)
        ]
        # list of outputs for the tested function for each regime
        output = get_output_for_keys_of_metafunc(metafunc, results, params)
        fixtures = [
            dict(output=output, params=params)
            for output, params in zip(output, params)
        ]
        metafunc.parametrize("output_test_fixtures",
                             fixtures,
                             ids=ids_all_regimes)

    elif "output_fixtures_noise_driven" in metafunc.fixturenames:
        # list of input arguments for the tested function for each regime
        params = [get_required_params(func, dict(results[0], **all_params[0]))]
        # list of outputs for the tested function for each regime
        output = get_output_for_keys_of_metafunc(metafunc, results[0:1],
                                                 params)
        fixtures = [
            dict(output=output, params=params)
            for output, params in zip(output, params)
        ]
        metafunc.parametrize("output_fixtures_noise_driven",
                             fixtures,
                             ids=ids_all_regimes[0:1])

    elif "output_fixtures_mean_driven" in metafunc.fixturenames:
        # list of input arguments for the tested function for each regime
        params = [get_required_params(func, dict(results[1], **all_params[1]))]
        # list of outputs for the tested function for each regime
        output = get_output_for_keys_of_metafunc(metafunc, results[1:], params)
        fixtures = [
            dict(output=output, params=params)
            for output, params in zip(output, params)
        ]
        metafunc.parametrize("output_fixtures_mean_driven",
                             fixtures,
                             ids=ids_all_regimes[1:])

    elif "unit_fixtures" in metafunc.fixturenames:
        file = metafunc.cls.fixtures
        fixtures = h5.load(unit_fix_path + file)
        ids = sorted(fixtures.keys())
        fixture_list = [
            dict(output=fixtures[id]['output'], params=fixtures[id]['params'])
            for id in ids
        ]
        # import pdb; pdb.set_trace()
        metafunc.parametrize("unit_fixtures", fixture_list, ids=ids)

    elif "unit_fixtures_fully_vectorized" in metafunc.fixturenames:
        file = metafunc.cls.fixtures
        fixtures = h5.load(unit_fix_path + file)
        ids = sorted(fixtures.keys())
        fixture_list = [
            dict(output=fixtures[id]['output'], params=fixtures[id]['params'])
            for id in ids if id == 'fully_vectorized'
        ]
        # import pdb; pdb.set_trace()
        metafunc.parametrize("unit_fixtures_fully_vectorized",
                             fixture_list,
                             ids=['fully_vectorized'])
示例#9
0
    def parallel_concatenate(self, comm, root=0):

        if len(self.children) > 0:

            rank = comm.rank
            # Have the parallel workers 
            children_chunks = np.array_split(self.children, comm.size)
            rank_children = children_chunks[rank]

            # Just gather a raw list of dictionaries
            child_index_lookup_table = []
            dict_list = []

            bad_children = []

            for i, child in enumerate(rank_children):
                try:
                    child_data = h5py_wrapper.load(child['path'])
                except:
                    bad_children.append(child)
                    continue
                dict_list.append(child_data)
                child_index_lookup_table.append(child['idx'])

            # Gather across ranks
            dict_list = comm.gather(dict_list, root=root)
            lookup_table = comm.gather(child_index_lookup_table, root=root)
            bad_children = comm.gather(bad_children, root=root)

            if rank == 0:
                
                # Flatten the list(s)
                dict_list = [elem for sublist in dict_list for elem in sublist]
                lookup_table = np.array([elem for sublist in lookup_table for elem in sublist]).astype(int)
                bad_children = [elem for sublist in bad_children for elem in sublist]

                print(len(dict_list))

                # Follow the normal procedure from concatenate
                dummy_dict = dict_list[0]
                # Init the structure to the total number of tasks; won't necessarily fill all of them
                # because of the presence of bad children
                master_dict = init_structure(self.total_tasks, dummy_dict)

                for i, dict_ in enumerate(dict_list):
                    master_dict = insert_data(master_dict, dict_, lookup_table[i])

                # Save
                file_name = os.path.abspath(self.directory).split('/')[-1]
                print(file_name)
                master_data_filepath = os.path.abspath('..') + '/%s.dat' % file_name
                h5py_wrapper.save(master_data_filepath, master_dict, write_mode = 'w')          
                return bad_children

        else:
            if comm.rank == 0:
                # Still create a dummy .dat file to indicate that the job completed
                dummy_dict = {}
                file_name = os.path.abspath(self.directory).split('/')[-1]
                master_data_filepath = os.path.abspath('..') + '/%s.dat' % file_name
                h5py_wrapper.save(master_data_filepath, dummy_dict, write_mode = 'w')          

                return []