コード例 #1
0
ファイル: storage_test.py プロジェクト: nikhil-garg/pypet
    def test_loading_as_new(self):
        filename = make_temp_dir('asnew.h5')
        traj = Trajectory(name='TestPartial', filename=filename, add_time=True)

        traj.f_add_parameter('x', 3)
        traj.f_add_parameter('y', 2)

        traj.f_explore({'x': [12,3,44], 'y':[1,23,4]})

        traj.f_store()

        traj = load_trajectory(name=traj.v_name, filename=filename)

        with self.assertRaises(TypeError):
            traj.f_shrink()

        traj = load_trajectory(name=traj.v_name, filename=filename, as_new=True,
                               new_name='TestTraj', add_time=False)

        self.assertTrue(traj.v_name == 'TestTraj')

        self.assertEqual(len(traj), 3)

        self.assertEqual(len(traj._explored_parameters), 2)

        traj.f_shrink()

        self.assertTrue(len(traj) == 1)
コード例 #2
0
def load_trajectories(filename, brian2=True):

    loading, idx = True, 0
    trajectories = []

    while loading:

        if brian2:
            tr = load_trajectory(
                index=idx,
                filename=filename,
                dynamic_imports=[Brian2MonitorResult, Brian2Parameter])
        else:
            tr = load_trajectory(index=idx, filename=filename)
        trajectories.append(tr)

        idx += 1

        try:
            if brian2:
                tr = load_trajectory(
                    index=idx,
                    filename=filename,
                    dynamic_imports=[Brian2MonitorResult, Brian2Parameter])
            else:
                tr = load_trajectory(index=idx, filename=filename)

        except ValueError:
            loading = False

    return trajectories
コード例 #3
0
ファイル: storage_test.py プロジェクト: henribunting/pypet
    def test_loading_as_new(self):
        filename = make_temp_dir('asnew.h5')
        traj = Trajectory(name='TestPartial', filename=filename)

        traj.f_add_parameter('x', 3)
        traj.f_add_parameter('y', 2)

        traj.f_explore({'x': [12,3,44], 'y':[1,23,4]})

        traj.f_store()

        traj = load_trajectory(name=traj.v_name, filename=filename)

        with self.assertRaises(TypeError):
            traj.f_shrink()

        traj = load_trajectory(name=traj.v_name, filename=filename, as_new=True,
                               new_name='TestTraj', add_time=False)

        self.assertTrue(traj.v_name == 'TestTraj')

        self.assertTrue(len(traj) == 3)

        traj.f_shrink()

        self.assertTrue(len(traj) == 1)
コード例 #4
0
ファイル: shared_data_test.py プロジェクト: femtotrader/pypet
    def test_table_read(self):
        the_reading_table = self.traj.results.shared_data.table
        self.assertTrue(the_reading_table is self.shared_table)
        the_reading_table.create_shared_data(description=MyTable)

        with StorageContextManager(self.traj):
            row = the_reading_table.row
            for i in range(10):
                row['id'] = i
                row['name'] = 'mehmet %d' % i
                row['surname'] = 'Timur'
                row['weight'] = 65.5 + i * 1.5
                row.append()
            the_reading_table.flush()

            for idx, row in enumerate(the_reading_table.iterrows()):
                self.assertEqual(row['id'], idx)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        second_reading_table = traj2.results.shared_data.table

        self.assertTrue(np.all(the_reading_table.read() == second_reading_table.read()))

        second_reading_table.append([(21, 'aaa', 'bbb', 100)])

        self.assertTrue(np.all(the_reading_table.read() == second_reading_table.read()))

        traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        third_reading_table = traj3.results.shared_data.table

        self.assertTrue(np.all(the_reading_table.read() == third_reading_table.read()))
コード例 #5
0
ファイル: shared_data_test.py プロジェクト: MehmetTimur/pypet
    def test_all_arrays(self):
        filename = make_temp_dir("hdf5arrays.hdf5")
        traj = Trajectory(name=make_trajectory_name(self), filename=filename)
        trajname = traj.v_name

        npearray = np.ones((2, 10, 3), dtype=np.float)
        thevlarray = np.array([compat.tobytes("j"), 22.2, compat.tobytes("gutter")])
        traj.f_store(only_init=True)
        res = traj.f_add_result(SharedResult, "arrays")
        res["carray"] = SharedCArray()
        res["carray"].create_shared_data(shape=(10, 10), atom=pt.atom.FloatAtom())
        res["earray"] = SharedEArray()
        res["earray"].create_shared_data(obj=npearray)
        res["vlarray"] = SharedVLArray()
        res["vlarray"].create_shared_data(obj=thevlarray)
        res["array"] = SharedArray()
        res["array"].create_shared_data(data=npearray)

        traj.f_store()

        traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult)

        toappned = [44, compat.tobytes("k")]
        with StorageContextManager(traj) as cm:
            a1 = traj.arrays.array
            a1[0, 0, 0] = 4.0

            a2 = traj.arrays.carray
            a2[0, 1] = 4

            a4 = traj.arrays.vlarray
            a4.append(toappned)

            a3 = traj.arrays.earray
            a3.append(np.zeros((1, 10, 3)))

            # cm.f_flush_storage()

        traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult)

        with StorageContextManager(traj) as cm:
            a1 = traj.arrays.array
            self.assertTrue(a1[0, 0, 0] == 4.0)

            a2 = traj.arrays.carray
            self.assertTrue(a2[0, 1] == 4)

            a3 = traj.arrays.earray
            self.assertTrue(a3.read().shape == (3, 10, 3))

            a4 = traj.arrays.vlarray
            for idx, x in enumerate(a4):
                if idx == 0:
                    self.assertTrue(np.all(x == np.array(thevlarray)))
                elif idx == 1:
                    self.assertTrue(np.all(x == np.array(toappned)))
                else:
                    raise RuntimeError()
コード例 #6
0
ファイル: shared_data_test.py プロジェクト: femtotrader/pypet
    def test_table_append(self):
        the_append_table = self.traj.results.shared_data.table
        self.assertTrue(the_append_table is self.shared_table)
        the_append_table.create_shared_data(description=MyTable)

        with StorageContextManager(self.traj):
            row = the_append_table.row
            for i in range(15):
                row['id'] = i * 2
                row['name'] = 'name %d' % i
                row['surname'] = '%d surname' % i
                row['weight'] = (i*0.5 + 50.0)
                row.append()
            the_append_table.flush()

            for idx, row in enumerate(the_append_table.iterrows()):
                self.assertEqual(row['id'], idx * 2)
                self.assertEqual(row['name'], compat.tobytes('name %d' % idx))
                self.assertEqual(row['surname'], compat.tobytes('%d surname' % idx))
                self.assertEqual(row['weight'], idx*0.5+50.0)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        second_append_table = traj2.results.shared_data.table

        with StorageContextManager(traj2):
            for idx, row in enumerate(second_append_table.iterrows()):
                self.assertEqual(row['id'], idx * 2)
                self.assertEqual(row['name'], compat.tobytes('name %d' % idx))
                self.assertEqual(row['surname'], compat.tobytes('%d surname' % idx))
                self.assertEqual(row['weight'], idx*0.5+50.0)

            second_append_table.append([(30, 'mehmet', 'timur', 65.5)])

            self.assertEqual(second_append_table.read(field='id')[-1], 30)
            self.assertEqual(second_append_table.read(field='name')[-1], compat.tobytes('mehmet'))
            self.assertEqual(second_append_table.read(field='surname')[-1], compat.tobytes('timur'))
            self.assertEqual(second_append_table.read(field='weight')[-1], 65.5)

        traj2.f_store()

        traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        third_append_table = traj3.results.shared_data.table

        self.assertEqual((third_append_table.read(field='id')[-1]), 30)
        self.assertEqual((third_append_table.read(field='name')[-1]), compat.tobytes('mehmet'))
        self.assertEqual((third_append_table.read(field='surname')[-1]), compat.tobytes('timur'))
        self.assertEqual((third_append_table.read(field='weight')[-1]), 65.5)

        third_append_table.append([(33, 'Harrison', 'Ford', 95.5)])

        self.assertEqual((third_append_table.read(field='id')[-1]), 33)
        self.assertEqual((third_append_table.read(field='name')[-1]), compat.tobytes('Harrison'))
        self.assertEqual((third_append_table.read(field='surname')[-1]), compat.tobytes('Ford'))
        self.assertEqual((third_append_table.read(field='weight')[-1]), 95.5)
コード例 #7
0
    def test_table_getitem(self):
        the_getitem_table = self.traj.results.shared_data.table

        self.assertTrue(the_getitem_table is self.shared_table)

        the_getitem_table.create_shared_data(description=MyTable)

        with StorageContextManager(self.traj):
            row = the_getitem_table.row
            for i in range(10):
                row['id'] = i
                row['name'] = 'mehmet %d' % i
                row['surname'] = 'Timur'
                row['weight'] = 65.5 + i * 1.5
                row.append()
            the_getitem_table.flush()

            for idx, row in enumerate(the_getitem_table.iterrows()):
                self.assertEqual(row['id'], idx)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name,
                                filename=self.filename,
                                load_all=2,
                                dynamic_imports=SharedResult)

        second_getitem_table = traj2.results.shared_data.table

        with StorageContextManager(traj2):
            for idx, row in enumerate(second_getitem_table.iterrows()):
                self.assertTrue(
                    np.all(second_getitem_table.read()[idx] ==
                           second_getitem_table[idx]))

            second_getitem_table.append([(30, 'mehmet nevvaf', 'timur', 65.5)])

            for idx, row in enumerate(second_getitem_table.iterrows(-1)):
                self.assertEqual(row['id'], 30)
                self.assertEqual(row['name'], compat.tobytes('mehmet nevvaf'))
                self.assertEqual(row['surname'], compat.tobytes('timur'))
                self.assertEqual(row['weight'], 65.5)

        traj2.f_store()

        traj3 = load_trajectory(name=self.traj.v_name,
                                filename=self.filename,
                                load_all=2,
                                dynamic_imports=SharedResult)

        third_getitem_table = traj3.results.shared_data.table

        with StorageContextManager(traj3):
            for idx, row in enumerate(third_getitem_table.iterrows()):
                self.assertTrue(
                    np.all(third_getitem_table.read()[idx] ==
                           third_getitem_table[idx]))
コード例 #8
0
    def test_table_setitem(self):
        the_setitem_table = self.traj.results.shared_data.table

        self.assertTrue(the_setitem_table is self.shared_table)

        the_setitem_table.create_shared_data(description=MyTable)

        with StorageContextManager(self.traj):
            row = the_setitem_table.row
            for i in range(10):
                row['id'] = i
                row['name'] = 'mehmet %d' % i
                row['surname'] = 'Timur'
                row['weight'] = 65.5 + i * 1.5
                row.append()
            the_setitem_table.flush()

            for idx, row in enumerate(the_setitem_table.iterrows()):
                self.assertEqual(row['id'], idx)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name,
                                filename=self.filename,
                                load_all=2,
                                dynamic_imports=SharedResult)

        second_setitem_table = traj2.results.shared_data.table

        second_setitem_table[0] = [(100, 'Mehmet Nevvaf', 'TIMUR', 75.5)]

        self.assertEqual(second_setitem_table.read(field='id')[0], 100)
        self.assertEqual(
            second_setitem_table.read(field='name')[0],
            compat.tobytes('Mehmet Nevvaf'))
        self.assertEqual(
            second_setitem_table.read(field='surname')[0],
            compat.tobytes('TIMUR'))
        self.assertEqual(second_setitem_table.read(field='weight')[0], 75.5)

        traj2.f_store()

        traj3 = load_trajectory(name=self.traj.v_name,
                                filename=self.filename,
                                load_all=2,
                                dynamic_imports=SharedResult)

        third_setitem_table = traj3.results.shared_data.table

        self.assertEqual(third_setitem_table.read(field='id')[0], 100)
        self.assertEqual(
            third_setitem_table.read(field='name')[0],
            compat.tobytes('Mehmet Nevvaf'))
        self.assertEqual(
            third_setitem_table.read(field='surname')[0],
            compat.tobytes('TIMUR'))
        self.assertEqual(third_setitem_table.read(field='weight')[0], 75.5)
コード例 #9
0
ファイル: storage_test.py プロジェクト: henribunting/pypet
    def test_max_depth_loading_and_storing(self):
        filename = make_temp_dir('newassignment.hdf5')
        traj = Trajectory(filename=filename, overwrite_file=True)

        traj.par = Parameter('d1.d2.d3.d4.d5', 55)
        traj.f_store(max_depth=4)

        traj = load_trajectory(index=-1, filename=filename)
        traj.f_load(load_data=2)
        self.assertTrue('d3' in traj)
        self.assertFalse('d4' in traj)

        traj = load_trajectory(index=-1, filename=filename, max_depth=3)
        self.assertTrue('d2' in traj)
        self.assertFalse('d3' in traj)

        traj.par.f_remove(recursive=True)
        traj.dpar = Parameter('d1.d2.d3.d4.d5', 123)

        traj.dpar.f_store_child('d1', recursive=True, max_depth=3)
        traj.dpar.f_remove_child('d1', recursive=True)

        self.assertTrue('d1' not in traj)
        traj.dpar.f_load_child('d1', recursive=True)

        self.assertTrue('d3' in traj)
        self.assertTrue('d4' not in traj)

        traj.dpar.f_remove_child('d1', recursive=True)
        self.assertTrue('d1' not in traj)
        traj.dpar.f_load_child('d1', recursive=True, max_depth=2)

        self.assertTrue('d2' in traj)
        self.assertTrue('d3' not in traj)

        traj.dpar = Parameter('l1.l2.l3.l4.l5', 123)
        traj.dpar.f_store(recursive=True, max_depth=0)
        self.assertFalse(traj.dpar.l1._stored)

        traj.dpar.f_store(recursive=True, max_depth=4)
        traj.dpar.f_remove()
        self.assertTrue('l1' not in traj)
        traj.dpar.f_load(recursive=True)
        self.assertTrue('l4' in traj)
        self.assertTrue('l5' not in traj)

        traj.dpar.f_remove()
        self.assertTrue('l1' not in traj)
        traj.dpar.f_load(recursive=True, max_depth=3)
        self.assertTrue('l3' in traj)
        self.assertTrue('l4' not in traj)
コード例 #10
0
ファイル: storage_test.py プロジェクト: nikhil-garg/pypet
    def test_max_depth_loading_and_storing(self):
        filename = make_temp_dir('newassignment.hdf5')
        traj = Trajectory(filename=filename, overwrite_file=True)

        traj.par.d1 = Parameter('d1.d2.d3.d4.d5', 55)
        traj.f_store(max_depth=4)

        traj = load_trajectory(index=-1, filename=filename)
        traj.f_load(load_data=2)
        self.assertTrue('d3' in traj)
        self.assertFalse('d4' in traj)

        traj = load_trajectory(index=-1, filename=filename, max_depth=3)
        self.assertTrue('d2' in traj)
        self.assertFalse('d3' in traj)

        traj.par.f_remove(recursive=True)
        traj.dpar.d1 = Parameter('d1.d2.d3.d4.d5', 123)

        traj.dpar.f_store_child('d1', recursive=True, max_depth=3)
        traj.dpar.f_remove_child('d1', recursive=True)

        self.assertTrue('d1' not in traj)
        traj.dpar.f_load_child('d1', recursive=True)

        self.assertTrue('d3' in traj)
        self.assertTrue('d4' not in traj)

        traj.dpar.f_remove_child('d1', recursive=True)
        self.assertTrue('d1' not in traj)
        traj.dpar.f_load_child('d1', recursive=True, max_depth=2)

        self.assertTrue('d2' in traj)
        self.assertTrue('d3' not in traj)

        traj.dpar.l1 = Parameter('l1.l2.l3.l4.l5', 123)
        traj.dpar.f_store(recursive=True, max_depth=0)
        self.assertFalse(traj.dpar.l1._stored)

        traj.dpar.f_store(recursive=True, max_depth=4)
        traj.dpar.f_remove()
        self.assertTrue('l1' not in traj)
        traj.dpar.f_load(recursive=True)
        self.assertTrue('l4' in traj)
        self.assertTrue('l5' not in traj)

        traj.dpar.f_remove()
        self.assertTrue('l1' not in traj)
        traj.dpar.f_load(recursive=True, max_depth=3)
        self.assertTrue('l3' in traj)
        self.assertTrue('l4' not in traj)
コード例 #11
0
    def test_hdf5_store_load_monitorresult(self):
        traj_name = make_trajectory_name(self)
        file_name = make_temp_dir(os.path.join('brian2', 'tests', 'hdf5', 'test_%s.hdf5' % traj_name))
        env = Environment(trajectory=traj_name, filename=file_name, log_config=get_log_config(),
                            dynamic_imports=[Brian2MonitorResult], add_time=False, storage_service=HDF5StorageService)
        traj = env.v_trajectory
        traj.v_standard_result = Brian2MonitorResult
        traj.f_add_result('brian2.single.millivolts_single_a', 10*mvolt, comment='single value a')

        traj.f_add_result('brian2.single.millivolts_single_c', 11*mvolt, comment='single value b')

        traj.f_add_result('brian2.array.millivolts_array_a', [11, 12]*mvolt, comment='array')
        traj.f_add_result('mV1', 42.0*mV)
        # results can hold much more than a single data item:
        traj.f_add_result('ampere1', 1*mA, 44, test=300*mV, test2=[1,2,3],
                          test3=np.array([1,2,3])*mA, comment='Result keeping track of many things')
        traj.f_add_result('integer', 16)
        traj.f_add_result('kHz05', 0.5*kHz)
        traj.f_add_result('nested_array', np.array([[6.,7.,8.],[9.,10.,11.]]) * ms)
        traj.f_add_result('b2a', np.array([1., 2.]) * mV)


        traj.f_store()

        traj2 = load_trajectory(filename=file_name, name=traj_name, dynamic_imports=[Brian2MonitorResult], load_data=2)

        #traj._logger.error('traj :'+str(traj))
        #traj._logger.error('traj2:'+str(traj2))
        self.compare_trajectories(traj, traj2)
コード例 #12
0
ファイル: shared_data_test.py プロジェクト: femtotrader/pypet
    def test_table_where(self):
        the_where_table = self.traj.results.shared_data.table

        self.assertTrue(the_where_table is self.shared_table)

        the_where_table.create_shared_data(description=MyTable)

        with StorageContextManager(self.traj):
            row = the_where_table.row
            for i in range(10):
                row['id'] = i
                row['name'] = 'mehmet %d' % i
                row['surname'] = 'Timur'
                row['weight'] = 65.5 + i
                row.append()
            the_where_table.flush()

            for idx, row in enumerate(the_where_table.iterrows()):
                self.assertEqual(row['id'], idx)

            self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        second_where_table = traj2.results.shared_data.table

        with StorageContextManager(traj2):
            result = second_where_table.where('(id == 2)&(name == b"mehmet 2")&(surname ==b"Timur")&(weight == 67.5)')
            there = False
            for row in result:
                there = True
            self.assertTrue(there)
コード例 #13
0
    def test_array_setitem(self):
        the_setitem_array = np.zeros((50, 50))

        first_setitem_array = self.traj.results.shared_data.array

        first_setitem_array.create_shared_data(obj=the_setitem_array)

        first_setitem_array[2, 2] = 10

        self.assertEqual(first_setitem_array[2, 2], 10)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name,
                                filename=self.filename,
                                load_all=2,
                                dynamic_imports=SharedResult)

        second_setitem_array = traj2.results.shared_data.array

        self.assertEqual(second_setitem_array[2, 2], 10)

        second_setitem_array[3, 3] = 17

        self.assertEqual(second_setitem_array[3, 3], 17)
コード例 #14
0
def import_traj(folder_path,
                file_name,
                order_face=None,
                traj_name='explore_perf'):
    print "importing data..."
    traj = pypet.load_trajectory(traj_name,
                                 filename=os.path.join(folder_path,
                                                       file_name + '.hdf5'),
                                 force=True)
    traj.v_auto_load = True

    perc_correct = np.array([])
    perc_correct_all = []
    stat_diff = []
    ok_runs = []
    for run in traj.f_iter_runs():
        perc_correct = np.append(perc_correct,
                                 np.mean(traj.results[run].test_perf))
        perc_correct_all.append(traj.results[run].test_perf)
        stat_diff.append(traj.results[run].stat_diff)
        ok_runs.append(int(run[4:]))

    param_traj = traj.f_get_explored_parameters()
    param = {}
    for k in param_traj:
        if k[11:] != 'name':
            xplr_values = np.array(param_traj[k].f_get_range())[ok_runs]
            if len(np.unique(xplr_values)) > 1:
                param[k[11:]] = xplr_values

    return perc_correct, np.array(perc_correct_all), np.array(stat_diff), param
コード例 #15
0
ファイル: shared_data_test.py プロジェクト: femtotrader/pypet
    def test_array_iter(self):

        the_iterrows_array = np.random.randint(0, 100, (100, 100))

        first_iterrows_array = self.traj.results.shared_data.array

        first_iterrows_array.create_shared_data(obj=the_iterrows_array)

        with StorageContextManager(self.traj):
            for idx, row in enumerate(first_iterrows_array):
                self.assertTrue(np.all(row == the_iterrows_array[idx, :]))

        self.assertTrue(np.all(the_iterrows_array == first_iterrows_array.read()))

        for idx, row in enumerate(the_iterrows_array):
            self.assertTrue(np.all(row == the_iterrows_array[idx, :]))

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        second_iterrows_array = traj2.results.shared_data.array

        with StorageContextManager(traj2):
            for idx, row in enumerate(second_iterrows_array):
                self.assertTrue(np.all(row == the_iterrows_array[idx, :]))

        self.assertTrue(np.all(the_iterrows_array == second_iterrows_array.read()))

        for idx, row in enumerate(second_iterrows_array):
            self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
コード例 #16
0
    def test_hdf5_store_load_parameter(self):
        traj_name = make_trajectory_name(self)
        file_name = make_temp_dir(os.path.join('brian2', 'tests', 'hdf5', 'test_%s.hdf5' % traj_name))
        env = Environment(trajectory=traj_name, filename=file_name, log_config=get_log_config(),
                            dynamic_imports=[Brian2Parameter], add_time=False, storage_service=HDF5StorageService)
        traj = env.v_trajectory
        traj.v_standard_parameter = Brian2Parameter
        traj.f_add_parameter('brian2.single.millivolts', 10*mvolt, comment='single value')

        #traj.f_add_parameter('brian2.array.millivolts', [11, 12]*mvolt, comment='array')
        #traj.f_add_parameter('mV1', 42.0*mV)
        #traj.f_add_parameter('ampere1', 1*mA)
        #traj.f_add_parameter('integer', 16)
        #traj.f_add_parameter('kHz05', 0.5*kHz)
        #traj.f_add_parameter('nested_array', np.array([[6.,7.,8.],[9.,10.,11.]]) * ms)
        #traj.f_add_parameter('b2a', np.array([1., 2.]) * mV)

        # We also need to check if explorations work with hdf5 store!
        #explore_dict = {'ampere1': [1*mA, 2*mA, 3*mA],
        #                'integer': [42,43,44],
        #                'b2a': [np.array([1., 2.]) * mV, np.array([1., 4.]) * mV,
        #                       np.array([1., 2.]) * mV]}
        #traj.f_explore(explore_dict)

        traj.f_store()

        traj2 = load_trajectory(filename=file_name, name=traj_name, dynamic_imports=[Brian2Parameter],
                                load_data=2)
        self.compare_trajectories(traj, traj2)
コード例 #17
0
    def test_table_iterrows(self):
        the_iterrows_table = self.traj.results.shared_data.table
        self.assertTrue(the_iterrows_table is self.shared_table)
        the_iterrows_table.create_shared_data(description=MyTable)

        with StorageContextManager(self.traj):
            row = the_iterrows_table.row
            for i in range(10):
                row['id'] = i
                row['name'] = 'mehmet %d' % i
                row['surname'] = 'Timur'
                row['weight'] = 65.5 + i * 1.5
                row.append()
            the_iterrows_table.flush()

            for idx, row in enumerate(the_iterrows_table.iterrows()):
                self.assertEqual(row['id'], idx)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name,
                                filename=self.filename,
                                load_all=2,
                                dynamic_imports=SharedResult)

        second_iterrows_table = traj2.results.shared_data.table

        with StorageContextManager(traj2):
            for idx, row in enumerate(second_iterrows_table.iterrows()):
                self.assertEqual(row['id'], idx)
コード例 #18
0
ファイル: shared_data_test.py プロジェクト: femtotrader/pypet
    def test_table_getitem(self):
        the_getitem_table = self.traj.results.shared_data.table

        self.assertTrue(the_getitem_table is self.shared_table)

        the_getitem_table.create_shared_data(description=MyTable)

        with StorageContextManager(self.traj):
            row = the_getitem_table.row
            for i in range(10):
                row['id'] = i
                row['name'] = 'mehmet %d' % i
                row['surname'] = 'Timur'
                row['weight'] = 65.5 + i * 1.5
                row.append()
            the_getitem_table.flush()

            for idx, row in enumerate(the_getitem_table.iterrows()):
                self.assertEqual(row['id'], idx)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        second_getitem_table = traj2.results.shared_data.table

        with StorageContextManager(traj2):
            for idx, row in enumerate(second_getitem_table.iterrows()):
                self.assertTrue(np.all(second_getitem_table.read()[idx] == second_getitem_table[idx]))

            second_getitem_table.append([(30, 'mehmet nevvaf', 'timur', 65.5)])

            for idx, row in enumerate(second_getitem_table.iterrows(-1)):
                self.assertEqual(row['id'], 30)
                self.assertEqual(row['name'], compat.tobytes('mehmet nevvaf'))
                self.assertEqual(row['surname'], compat.tobytes('timur'))
                self.assertEqual(row['weight'], 65.5)

        traj2.f_store()

        traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        third_getitem_table = traj3.results.shared_data.table

        with StorageContextManager(traj3):
            for idx, row in enumerate(third_getitem_table.iterrows()):
                self.assertTrue(np.all(third_getitem_table.read()[idx] == third_getitem_table[idx]))
コード例 #19
0
ファイル: shared_data_test.py プロジェクト: MehmetTimur/pypet
    def test_storing_and_manipulating(self):
        filename = make_temp_dir("hdf5manipulation.hdf5")
        traj = Trajectory(name=make_trajectory_name(self), filename=filename)
        trajname = traj.v_name

        thedata = np.zeros((1000, 1000))
        res = traj.f_add_result(SharedResult, "shared")
        myarray = SharedArray("array", res, trajectory=traj, add_to_parent=True)
        mytable = SharedTable("t1", res, trajectory=traj, add_to_parent=True)
        mytable2 = SharedTable("t2", res, trajectory=traj, add_to_parent=True)
        mytable3 = SharedTable("t3", res, trajectory=traj, add_to_parent=True)

        traj.f_store(only_init=True)
        myarray.create_shared_data(data=thedata)
        mytable.create_shared_data(first_row={"hi": compat.tobytes("hi"), "huhu": np.ones(3)})
        mytable2.create_shared_data(description={"ha": pt.StringCol(2, pos=0), "haha": pt.FloatCol(pos=1)})
        mytable3.create_shared_data(description={"ha": pt.StringCol(2, pos=0), "haha": pt.FloatCol(pos=1)})

        traj.f_store()

        newrow = {"ha": "hu", "haha": 4.0}

        with self.assertRaises(TypeError):
            row = traj.shared.t2.row

        with StorageContextManager(traj) as cm:
            row = traj.shared.t2.row
            for irun in range(11):
                for key, val in newrow.items():
                    row[key] = val
                row.append()
            traj.shared.t3.flush()

        data = myarray.read()
        arr = myarray.get_data_node()
        self.assertTrue(np.all(data == thedata))

        with StorageContextManager(traj) as cm:
            myarray[2, 2] = 10
            data = myarray.read()
            self.assertTrue(data[2, 2] == 10)

        self.assertTrue(data[2, 2] == 10)
        self.assertFalse(traj.v_storage_service.is_open)

        traj = load_trajectory(name=trajname, filename=filename)

        traj.f_load(load_data=2)

        traj.shared.t2.traj = traj
        traj.shared.t1.traj = traj
        traj.shared.array.traj = traj

        self.assertTrue(traj.shared.t2.nrows == 11, "%s != 11" % str(traj.shared.t2.nrows))
        self.assertTrue(traj.shared.t2[0]["ha"] == compat.tobytes("hu"), traj.shared.t2[0]["ha"])
        self.assertTrue(traj.shared.t2[1]["ha"] == compat.tobytes("hu"), traj.shared.t2[1]["ha"])
        self.assertTrue("huhu" in traj.shared.t1.colnames)
        self.assertTrue(traj.shared.array[2, 2] == 10)
コード例 #20
0
    def test_loading_and_storing_empty_containers(self):
        filename = make_temp_dir('empty_containers.hdf5')
        traj = Trajectory(filename=filename, add_time=True)

        # traj.f_add_parameter('empty.dict', {})
        # traj.f_add_parameter('empty.list', [])
        traj.f_add_parameter(ArrayParameter, 'empty.tuple', ())
        traj.f_add_parameter(ArrayParameter, 'empty.array',
                             np.array([], dtype=float))

        spsparse_csc = spsp.csc_matrix((2, 10))
        spsparse_csr = spsp.csr_matrix((6660, 660))
        spsparse_bsr = spsp.bsr_matrix((3330, 2220))
        spsparse_dia = spsp.dia_matrix((1230, 1230))

        traj.f_add_parameter(SparseParameter, 'empty.csc', spsparse_csc)
        traj.f_add_parameter(SparseParameter, 'empty.csr', spsparse_csr)
        traj.f_add_parameter(SparseParameter, 'empty.bsr', spsparse_bsr)
        traj.f_add_parameter(SparseParameter, 'empty.dia', spsparse_dia)

        traj.f_add_result(SparseResult,
                          'empty.all',
                          dict={},
                          list=[],
                          series=pd.Series(),
                          frame=pd.DataFrame(),
                          panel=pd.Panel(),
                          **traj.par.f_to_dict(short_names=True,
                                               fast_access=True))

        traj.f_store()

        newtraj = load_trajectory(index=-1, filename=filename)

        newtraj.f_load(load_data=2)

        epg = newtraj.par.empty
        self.assertTrue(type(epg.tuple) is tuple)
        self.assertTrue(len(epg.tuple) == 0)

        self.assertTrue(type(epg.array) is np.ndarray)
        self.assertTrue(epg.array.size == 0)

        self.assertTrue(spsp.isspmatrix_csr(epg.csr))
        self.assertTrue(epg.csr.size == 0)

        self.assertTrue(spsp.isspmatrix_csc(epg.csc))
        self.assertTrue(epg.csc.size == 0)

        self.assertTrue(spsp.isspmatrix_bsr(epg.bsr))
        self.assertTrue(epg.bsr.size == 0)

        self.assertTrue(spsp.isspmatrix_dia(epg.dia))
        self.assertTrue(epg.dia.size == 0)

        self.compare_trajectories(traj, newtraj)
コード例 #21
0
ファイル: shared_data_test.py プロジェクト: femtotrader/pypet
    def test_table_setitem(self):
        the_setitem_table = self.traj.results.shared_data.table

        self.assertTrue(the_setitem_table is self.shared_table)

        the_setitem_table.create_shared_data(description=MyTable)

        with StorageContextManager(self.traj):
            row = the_setitem_table.row
            for i in range(10):
                row['id'] = i
                row['name'] = 'mehmet %d' % i
                row['surname'] = 'Timur'
                row['weight'] = 65.5 + i * 1.5
                row.append()
            the_setitem_table.flush()

            for idx, row in enumerate(the_setitem_table.iterrows()):
                self.assertEqual(row['id'], idx)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        second_setitem_table = traj2.results.shared_data.table

        second_setitem_table[0] = [(100, 'Mehmet Nevvaf', 'TIMUR', 75.5)]

        self.assertEqual(second_setitem_table.read(field='id')[0], 100)
        self.assertEqual(second_setitem_table.read(field='name')[0], compat.tobytes('Mehmet Nevvaf'))
        self.assertEqual(second_setitem_table.read(field='surname')[0], compat.tobytes('TIMUR'))
        self.assertEqual(second_setitem_table.read(field='weight')[0], 75.5)

        traj2.f_store()

        traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        third_setitem_table = traj3.results.shared_data.table

        self.assertEqual(third_setitem_table.read(field='id')[0], 100)
        self.assertEqual(third_setitem_table.read(field='name')[0], compat.tobytes('Mehmet Nevvaf'))
        self.assertEqual(third_setitem_table.read(field='surname')[0], compat.tobytes('TIMUR'))
        self.assertEqual(third_setitem_table.read(field='weight')[0], 75.5)
コード例 #22
0
    def test_compacting(self):
        filename = make_temp_dir('hdf5compacting.hdf5')
        traj = Trajectory(name=make_trajectory_name(self), filename=filename)
        trajname = traj.v_name
        traj.v_storage_service.complevel = 7

        first_row = {'ha': compat.tobytes('hi'), 'haha': np.zeros((3, 3))}

        traj.f_store(only_init=True)

        traj.f_add_result('My.Tree.Will.Be.Deleted', 42)
        traj.f_add_result('Mine.Too.HomeBoy', 42, comment='Don`t cry for me!')

        res = traj.f_add_result(SharedResult, 'myres')

        res['myres'] = SharedTable()

        res['myres'].create_shared_data(first_row=first_row)

        with StorageContextManager(traj):
            traj.myres
            for irun in range(10000):
                row = traj.myres.row
                for key in first_row:
                    row[key] = first_row[key]
                row.append()
        traj.f_store()
        del traj
        traj = load_trajectory(name=trajname, filename=filename, load_all=2)
        with StorageContextManager(traj) as cm:
            tb = traj.myres.get_data_node()
            ptcompat.remove_rows(tb, 1000, 10000)

            cm.flush_store()
            self.assertTrue(traj.myres.nrows == 1001)

        traj.f_delete_item(traj.My, recursive=True)
        traj.f_delete_item(traj.Mine, recursive=True)

        size = os.path.getsize(filename)
        get_root_logger().info('Filesize is %s' % str(size))
        name_wo_ext, ext = os.path.splitext(filename)
        backup_file_name = name_wo_ext + '_backup' + ext
        code = compact_hdf5_file(filename, keep_backup=True)
        if code != 0:
            raise RuntimeError('ptrepack fail')
        backup_size = os.path.getsize(backup_file_name)
        self.assertTrue(backup_size == size)
        new_size = os.path.getsize(filename)
        get_root_logger().info('New filesize is %s' % str(new_size))
        self.assertTrue(new_size < size,
                        "%s > %s" % (str(new_size), str(size)))
コード例 #23
0
ファイル: storage_test.py プロジェクト: henribunting/pypet
    def test_loading_and_storing_empty_containers(self):
        filename = make_temp_dir('empty_containers.hdf5')
        traj = Trajectory(filename=filename)

        # traj.f_add_parameter('empty.dict', {})
        # traj.f_add_parameter('empty.list', [])
        traj.f_add_parameter(ArrayParameter, 'empty.tuple', ())
        traj.f_add_parameter(ArrayParameter, 'empty.array', np.array([], dtype=float))

        spsparse_csc = spsp.csc_matrix((2,10))
        spsparse_csr = spsp.csr_matrix((6660,660))
        spsparse_bsr = spsp.bsr_matrix((3330,2220))
        spsparse_dia = spsp.dia_matrix((1230,1230))

        traj.f_add_parameter(SparseParameter, 'empty.csc', spsparse_csc)
        traj.f_add_parameter(SparseParameter, 'empty.csr', spsparse_csr)
        traj.f_add_parameter(SparseParameter, 'empty.bsr', spsparse_bsr)
        traj.f_add_parameter(SparseParameter, 'empty.dia', spsparse_dia)

        traj.f_add_result(SparseResult, 'empty.all', dict={}, list=[],
                          series = pd.Series(),
                          frame = pd.DataFrame(),
                          panel = pd.Panel(),
                          **traj.par.f_to_dict(short_names=True, fast_access=True))

        traj.f_store()

        newtraj = load_trajectory(index=-1, filename=filename)

        newtraj.f_load(load_data=2)

        epg = newtraj.par.empty
        self.assertTrue(type(epg.tuple) is tuple)
        self.assertTrue(len(epg.tuple) == 0)

        self.assertTrue(type(epg.array) is np.ndarray)
        self.assertTrue(epg.array.size == 0)

        self.assertTrue(spsp.isspmatrix_csr(epg.csr))
        self.assertTrue(epg.csr.size == 0)

        self.assertTrue(spsp.isspmatrix_csc(epg.csc))
        self.assertTrue(epg.csc.size == 0)

        self.assertTrue(spsp.isspmatrix_bsr(epg.bsr))
        self.assertTrue(epg.bsr.size == 0)

        self.assertTrue(spsp.isspmatrix_dia(epg.dia))
        self.assertTrue(epg.dia.size == 0)

        self.compare_trajectories(traj, newtraj)
コード例 #24
0
ファイル: shared_data_test.py プロジェクト: MehmetTimur/pypet
    def test_compacting(self):
        filename = make_temp_dir("hdf5compacting.hdf5")
        traj = Trajectory(name=make_trajectory_name(self), filename=filename)
        trajname = traj.v_name
        traj.v_storage_service.complevel = 7

        first_row = {"ha": compat.tobytes("hi"), "haha": np.zeros((3, 3))}

        traj.f_store(only_init=True)

        res1 = traj.f_add_result("My.Tree.Will.Be.Deleted", 42)
        res2 = traj.f_add_result("Mine.Too.HomeBoy", 42, comment="Don`t cry for me!")

        res = traj.f_add_result(SharedResult, "myres")

        res["myres"] = SharedTable()

        res["myres"].create_shared_data(first_row=first_row)

        with StorageContextManager(traj):
            tab = traj.myres
            for irun in range(10000):
                row = traj.myres.row
                for key in first_row:
                    row[key] = first_row[key]
                row.append()
        traj.f_store()
        del traj
        traj = load_trajectory(name=trajname, filename=filename, load_all=2)
        with StorageContextManager(traj) as cm:
            tb = traj.myres.get_data_node()
            ptcompat.remove_rows(tb, 1000, 10000)

            cm.f_flush_store()
            self.assertTrue(traj.myres.nrows == 1001)

        traj.f_delete_item(traj.My, recursive=True)
        traj.f_delete_item(traj.Mine, recursive=True)

        size = os.path.getsize(filename)
        get_root_logger().info("Filesize is %s" % str(size))
        name_wo_ext, ext = os.path.splitext(filename)
        backup_file_name = name_wo_ext + "_backup" + ext
        code = compact_hdf5_file(filename, keep_backup=True)
        if code != 0:
            raise RuntimeError("ptrepack fail")
        backup_size = os.path.getsize(backup_file_name)
        self.assertTrue(backup_size == size)
        new_size = os.path.getsize(filename)
        get_root_logger().info("New filesize is %s" % str(new_size))
        self.assertTrue(new_size < size, "%s > %s" % (str(new_size), str(size)))
コード例 #25
0
ファイル: shared_data_test.py プロジェクト: femtotrader/pypet
    def test_table_flush(self):
        the_flush_table = self.traj.results.shared_data.table

        self.assertTrue(the_flush_table is self.shared_table)

        the_flush_table.create_shared_data(description=MyTable)

        with StorageContextManager(self.traj):
            row = the_flush_table.row
            for i in range(10):
                row['id'] = i
                row['name'] = 'mehmet %d' % i
                row['surname'] = 'Timur'
                row['weight'] = 65.5 + i
                row.append()
            the_flush_table.flush()

            for idx, row in enumerate(the_flush_table.iterrows()):
                self.assertEqual(row['id'], idx)
                self.assertEqual(row['name'], compat.tobytes('mehmet %d' % idx))
                self.assertEqual(row['surname'], compat.tobytes('Timur'))
                self.assertEqual(row['weight'], 65.5+idx)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        second_flush_table = traj2.results.shared_data.table

        with StorageContextManager(traj2):
            for idx, row in enumerate(second_flush_table.iterrows()):
                self.assertEqual(row['id'], idx)
                self.assertEqual(row['name'], compat.tobytes('mehmet %d' % idx))
                self.assertEqual(row['surname'], compat.tobytes('Timur'))
                self.assertEqual(row['weight'], 65.5+idx)

            row = second_flush_table.row
            for i in range(10, 11):
                row['id'] = i
                row['name'] = 'mehmet %d' % i
                row['surname'] = 'Timur'
                row['weight'] = 65.5 + i
                row.append()
            second_flush_table.flush()

            for idx, row in enumerate(second_flush_table.iterrows()):
                self.assertEqual(row['id'], idx)
                self.assertEqual(row['name'], compat.tobytes('mehmet %d' % idx))
                self.assertEqual(row['surname'], compat.tobytes('Timur'))
                self.assertEqual(row['weight'], 65.5+idx)
コード例 #26
0
    def test_storing_and_loading_groups(self):
        filename = make_temp_dir('grpgrp.hdf5')
        traj = Trajectory(name='traj', add_time=True, filename=filename)
        res = traj.f_add_result('aaa.bbb.ccc.iii', 42, 43, comment=7777 * '6')
        traj.ccc.v_annotations['gg'] = 4
        res = traj.f_add_result('aaa.ddd.eee.jjj', 42, 43, comment=777 * '6')
        traj.ccc.v_annotations['j'] = 'osajdsojds'
        traj.f_store(only_init=True)
        traj.f_store_item('aaa', recursive=True)
        newtraj = load_trajectory(traj.v_name, filename=filename, load_all=2)

        self.compare_trajectories(traj, newtraj)

        traj.iii.f_set(55)

        self.assertFalse(results_equal(traj.iii, newtraj.iii))

        traj.aaa.f_store(recursive=True, store_data=3)

        newtraj.bbb.f_load(recursive=True, load_data=3)

        self.compare_trajectories(traj, newtraj)

        traj.ccc.v_annotations['gg'] = 5
        traj.f_load(load_data=3)
        self.assertTrue(traj.ccc.v_annotations['gg'] == 4)
        traj.ccc.v_annotations['gg'] = 5
        traj.f_store(store_data=3)
        newtraj.f_load(load_data=2)
        self.assertTrue(newtraj.ccc.v_annotations['gg'] == 4)
        newtraj.f_load(load_data=3)
        self.assertTrue(newtraj.ccc.v_annotations['gg'] == 5)

        traj.ccc.f_add_link('link', res)
        traj.f_store_item(traj.ccc, store_data=3, with_links=False)

        newtraj.f_load(load_data=3)
        self.assertTrue('link' not in newtraj.ccc)

        traj.f_store_item(traj.ccc,
                          store_data=3,
                          with_links=True,
                          recursive=True)

        newtraj.f_load_item(newtraj.ccc, with_links=False, recursive=True)
        self.assertTrue('link' not in newtraj.ccc)

        newtraj.f_load_item(newtraj.ccc, recursive=True)
        self.assertTrue('link' in newtraj.ccc)
コード例 #27
0
ファイル: storage_test.py プロジェクト: nikhil-garg/pypet
    def test_no_run_information_loading(self):
        filename = make_temp_dir('testnoruninfo.hdf5')
        traj = Trajectory(name='TestDelete',
                          filename=filename,
                          add_time=True)

        length = 100000
        traj.par.x = Parameter('', 42)
        traj.f_explore({'x': range(length)})

        traj.f_store()

        traj = load_trajectory(index=-1, filename=filename, with_run_information=False)
        self.assertEqual(len(traj), length)
        self.assertEqual(len(traj._run_information), 1)
コード例 #28
0
ファイル: storage_test.py プロジェクト: henribunting/pypet
    def test_no_run_information_loading(self):
        filename = make_temp_dir('testnoruninfo.hdf5')
        traj = Trajectory(name='TestDelete',
                          filename=filename)

        length = 100000
        traj.v_lazy_adding = True
        traj.par.x = 42
        traj.f_explore({'x': range(length)})

        traj.f_store()

        traj = load_trajectory(index=-1, filename=filename, with_run_information=False)
        self.assertEqual(len(traj), length)
        self.assertEqual(len(traj._run_information), 1)
コード例 #29
0
    def test_hdf5_store_load_result(self):
        traj_name = make_trajectory_name(self)
        file_name = make_temp_dir(
            os.path.join('brian2', 'tests', 'hdf5',
                         'test_%s.hdf5' % traj_name))
        env = Environment(trajectory=traj_name,
                          filename=file_name,
                          log_config=get_log_config(),
                          dynamic_imports=[Brian2Result],
                          add_time=False,
                          storage_service=HDF5StorageService)
        traj = env.v_trajectory
        traj.v_standard_result = Brian2Result
        traj.f_add_result('brian2.single.millivolts_single_a',
                          10 * mvolt,
                          comment='single value a')
        traj.f_add_result('brian2.single.millivolts_single_c',
                          11 * mvolt,
                          comment='single value b')

        traj.f_add_result('brian2.array.millivolts_array_a', [11, 12] * mvolt,
                          comment='array')
        traj.f_add_result('mV1', 42.0 * mV)
        # results can hold much more than a single data item:
        traj.f_add_result('ampere1',
                          1 * mA,
                          44,
                          test=300 * mV,
                          test2=[1, 2, 3],
                          test3=np.array([1, 2, 3]) * mA,
                          comment='Result keeping track of many things')
        traj.f_add_result('integer', 16)
        traj.f_add_result('kHz05', 0.5 * kHz)
        traj.f_add_result('nested_array',
                          np.array([[6., 7., 8.], [9., 10., 11.]]) * ms)
        traj.f_add_result('b2a', np.array([1., 2.]) * mV)

        traj.f_add_result('nounit',
                          Quantity(np.array([[6., 7., 8.], [9., 10., 11.]])))

        traj.f_store()

        traj2 = load_trajectory(filename=file_name,
                                name=traj_name,
                                dynamic_imports=[Brian2Result],
                                load_data=2)

        self.compare_trajectories(traj, traj2)
コード例 #30
0
ファイル: shared_data_test.py プロジェクト: femtotrader/pypet
    def test_array_getitem(self):
        the_getitem_array = np.array(range(100))

        first_getitem_array = self.traj.results.shared_data.array

        first_getitem_array.create_shared_data(obj=the_getitem_array)

        for k in range(len(the_getitem_array)):
            self.assertEqual(the_getitem_array[k], first_getitem_array[k])

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        for j in range(len(the_getitem_array)):
            self.assertEqual(the_getitem_array[j], traj2.results.shared_data.array[j])
コード例 #31
0
ファイル: storage_test.py プロジェクト: henribunting/pypet
    def test_storing_and_loading_groups(self):
        filename = make_temp_dir('grpgrp.hdf5')
        traj = Trajectory(name='traj', add_time=True, filename=filename)
        res=traj.f_add_result('aaa.bbb.ccc.iii', 42, 43, comment=7777 * '6')
        traj.ccc.v_annotations['gg']=4
        res=traj.f_add_result('aaa.ddd.eee.jjj', 42, 43, comment=777 * '6')
        traj.ccc.v_annotations['j'] = 'osajdsojds'
        traj.f_store(only_init=True)
        traj.f_store_item('aaa', recursive=True)
        newtraj = load_trajectory(traj.v_name, filename=filename, load_all=2)

        self.compare_trajectories(traj, newtraj)

        traj.iii.f_set(55)

        self.assertFalse(results_equal(traj.iii, newtraj.iii))

        traj.aaa.f_store(recursive=True, store_data=3)

        newtraj.bbb.f_load(recursive=True, load_data=3)

        self.compare_trajectories(traj, newtraj)

        traj.ccc.v_annotations['gg'] = 5
        traj.f_load(load_data=3)
        self.assertTrue(traj.ccc.v_annotations['gg'] == 4)
        traj.ccc.v_annotations['gg'] = 5
        traj.f_store(store_data=3)
        newtraj.f_load(load_data=2)
        self.assertTrue(newtraj.ccc.v_annotations['gg'] == 4)
        newtraj.f_load(load_data=3)
        self.assertTrue(newtraj.ccc.v_annotations['gg'] == 5)

        traj.ccc.f_add_link('link', res)
        traj.f_store_item(traj.ccc, store_data=3, with_links=False)

        newtraj.f_load(load_data=3)
        self.assertTrue('link' not in newtraj.ccc)

        traj.f_store_item(traj.ccc, store_data=3, with_links=True, recursive=True)

        newtraj.f_load_item(newtraj.ccc, with_links=False, recursive=True)
        self.assertTrue('link' not in newtraj.ccc)

        newtraj.f_load_item(newtraj.ccc, recursive=True)
        self.assertTrue('link' in newtraj.ccc)
コード例 #32
0
ファイル: shared_data_test.py プロジェクト: femtotrader/pypet
    def test_array_read(self):
        the_reading_array = np.ones((100, 100)) * 4

        first_reading_array = self.traj.results.shared_data.array

        self.assertTrue(first_reading_array is self.shared_array)

        first_reading_array.create_shared_data(obj=the_reading_array)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        second_reading_array = traj2.shared_data.array.read()

        self.assertTrue(np.all(the_reading_array == second_reading_array),
                        '%s != %s' % (str(the_reading_array), str(second_reading_array)))
コード例 #33
0
ファイル: shared_data_test.py プロジェクト: femtotrader/pypet
    def test_array_len(self):
        the_len_array = np.ones((100, 100))

        first_len_array = self.traj.results.shared_data.array

        self.assertTrue(first_len_array is self.shared_array)

        first_len_array.create_shared_data(obj=the_len_array)

        self.assertEqual(len(first_len_array), 100)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        second_len_array = traj2.results.shared_data.array

        self.assertEqual(len(second_len_array), 100)
コード例 #34
0
ファイル: shared_data_test.py プロジェクト: femtotrader/pypet
    def test_array_getenum(self):
        the_getenum_array = np.array(range(100))

        first_getenum_array = self.traj.results.shared_data.array

        first_getenum_array.create_shared_data(obj=the_getenum_array)

        with self.assertRaises(TypeError):
            first_getenum_array.get_enum()

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        second_enum_array = traj2.results.shared_data.array

        with self.assertRaises(TypeError):
            second_enum_array.get_enum()
コード例 #35
0
    def test_array_getitem(self):
        the_getitem_array = np.array(range(100))

        first_getitem_array = self.traj.results.shared_data.array

        first_getitem_array.create_shared_data(obj=the_getitem_array)

        for k in range(len(the_getitem_array)):
            self.assertEqual(the_getitem_array[k], first_getitem_array[k])

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name,
                                filename=self.filename,
                                load_all=2,
                                dynamic_imports=SharedResult)

        for j in range(len(the_getitem_array)):
            self.assertEqual(the_getitem_array[j],
                             traj2.results.shared_data.array[j])
コード例 #36
0
    def test_array_getenum(self):
        the_getenum_array = np.array(range(100))

        first_getenum_array = self.traj.results.shared_data.array

        first_getenum_array.create_shared_data(obj=the_getenum_array)

        with self.assertRaises(TypeError):
            first_getenum_array.get_enum()

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name,
                                filename=self.filename,
                                load_all=2,
                                dynamic_imports=SharedResult)

        second_enum_array = traj2.results.shared_data.array

        with self.assertRaises(TypeError):
            second_enum_array.get_enum()
コード例 #37
0
    def test_array_read(self):
        the_reading_array = np.ones((100, 100)) * 4

        first_reading_array = self.traj.results.shared_data.array

        self.assertTrue(first_reading_array is self.shared_array)

        first_reading_array.create_shared_data(obj=the_reading_array)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name,
                                filename=self.filename,
                                load_all=2,
                                dynamic_imports=SharedResult)

        second_reading_array = traj2.shared_data.array.read()

        self.assertTrue(
            np.all(the_reading_array == second_reading_array),
            '%s != %s' % (str(the_reading_array), str(second_reading_array)))
コード例 #38
0
    def test_array_len(self):
        the_len_array = np.ones((100, 100))

        first_len_array = self.traj.results.shared_data.array

        self.assertTrue(first_len_array is self.shared_array)

        first_len_array.create_shared_data(obj=the_len_array)

        self.assertEqual(len(first_len_array), 100)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name,
                                filename=self.filename,
                                load_all=2,
                                dynamic_imports=SharedResult)

        second_len_array = traj2.results.shared_data.array

        self.assertEqual(len(second_len_array), 100)
コード例 #39
0
    def test_hdf5_store_load_parameter(self):
        traj_name = make_trajectory_name(self)
        file_name = make_temp_dir(
            os.path.join('brian2', 'tests', 'hdf5',
                         'test_%s.hdf5' % traj_name))
        env = Environment(trajectory=traj_name,
                          filename=file_name,
                          log_config=get_log_config(),
                          dynamic_imports=[Brian2Parameter],
                          add_time=False,
                          storage_service=HDF5StorageService)
        traj = env.v_trajectory
        traj.v_standard_parameter = Brian2Parameter
        traj.f_add_parameter('brian2.single.millivolts',
                             10 * mvolt,
                             comment='single value')

        #traj.f_add_parameter('brian2.array.millivolts', [11, 12]*mvolt, comment='array')
        #traj.f_add_parameter('mV1', 42.0*mV)
        #traj.f_add_parameter('ampere1', 1*mA)
        #traj.f_add_parameter('integer', 16)
        #traj.f_add_parameter('kHz05', 0.5*kHz)
        #traj.f_add_parameter('nested_array', np.array([[6.,7.,8.],[9.,10.,11.]]) * ms)
        #traj.f_add_parameter('b2a', np.array([1., 2.]) * mV)

        # We also need to check if explorations work with hdf5 store!
        #explore_dict = {'ampere1': [1*mA, 2*mA, 3*mA],
        #                'integer': [42,43,44],
        #                'b2a': [np.array([1., 2.]) * mV, np.array([1., 4.]) * mV,
        #                       np.array([1., 2.]) * mV]}
        #traj.f_explore(explore_dict)

        traj.f_store()

        traj2 = load_trajectory(filename=file_name,
                                name=traj_name,
                                dynamic_imports=[Brian2Parameter],
                                load_data=2)
        self.compare_trajectories(traj, traj2)
コード例 #40
0
ファイル: shared_data_test.py プロジェクト: femtotrader/pypet
    def test_array_setitem(self):
        the_setitem_array = np.zeros((50, 50))

        first_setitem_array = self.traj.results.shared_data.array

        first_setitem_array.create_shared_data(obj=the_setitem_array)

        first_setitem_array[2, 2] = 10

        self.assertEqual(first_setitem_array[2, 2], 10)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)

        second_setitem_array = traj2.results.shared_data.array

        self.assertEqual(second_setitem_array[2, 2], 10)

        second_setitem_array[3, 3] = 17

        self.assertEqual(second_setitem_array[3, 3], 17)
コード例 #41
0
    def test_array_iterrows(self):
        the_iterrows_array = np.random.randint(0, 100, (100, 100))

        first_iterrows_array = self.traj.results.shared_data.array

        first_iterrows_array.create_shared_data(obj=the_iterrows_array)

        with StorageContextManager(self.traj):
            for idx, row in enumerate(first_iterrows_array.iterrows()):
                self.assertTrue(np.all(row == the_iterrows_array[idx, :]))

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name,
                                filename=self.filename,
                                load_all=2,
                                dynamic_imports=SharedResult)

        second_iterrows_array = traj2.results.shared_data.array

        with StorageContextManager(traj2):
            for idx, row in enumerate(second_iterrows_array.iterrows()):
                self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
コード例 #42
0
def import_traj(folder_path, file_name, order_face=None, traj_name='explore_perf'):
	print "importing data..."
	traj = pypet.load_trajectory(traj_name, filename=os.path.join(folder_path, file_name+'.hdf5'), force=True)
	traj.v_auto_load = True

	perc_correct = np.array([])
	perc_correct_all = []
	stat_diff = []
	ok_runs = []
	for run in traj.f_iter_runs():
		perc_correct = np.append(perc_correct, np.mean(traj.results[run].test_perf))
		perc_correct_all.append(traj.results[run].test_perf)
		stat_diff.append(traj.results[run].stat_diff)
		ok_runs.append(int(run[4:]))

	param_traj = traj.f_get_explored_parameters()
	param = {}
	for k in param_traj:
		if k[11:] != 'name':
			xplr_values = np.array(param_traj[k].f_get_range())[ok_runs]
			if len(np.unique(xplr_values)) > 1:
				param[k[11:]] = xplr_values

	return perc_correct, np.array(perc_correct_all), np.array(stat_diff), param
コード例 #43
0
ファイル: storage_test.py プロジェクト: henribunting/pypet
    def test_new_assignment_method(self):
        filename = make_temp_dir('newassignment.hdf5')
        traj = Trajectory(filename=filename)

        traj.v_lazy_adding = True
        comment = 'A number'
        traj.par.x = 44, comment

        self.assertTrue(traj.f_get('x').v_comment == comment)

        traj.par.iamgroup = a_new_group

        self.assertTrue(isinstance(traj.iamgroup, ParameterGroup))

        traj.v_lazy_adding = False
        traj.x = 45
        self.assertTrue(traj.par.f_get('x').f_get() == 45)

        self.assertTrue(isinstance(traj.f_get('x'), Parameter))

        traj.f = Parameter('lll', 444, 'lll')

        self.assertTrue(traj.f_get('f').v_name == 'f')

        traj.v_lazy_adding = True
        traj.res.k = 22, 'Hi'
        self.assertTrue(isinstance(traj.f_get('k'), Result))
        self.assertTrue(traj.f_get('k')[1] == 'Hi')

        with self.assertRaises(AttributeError):
            traj.res.k = 33, 'adsd'

        conf = traj.conf
        with self.assertRaises(AttributeError):
            conf = traj.conf.jjjj
        traj.f_set_properties(fast_access=True)


        traj.crun = 43, 'JJJ'
        self.assertTrue(traj.run_A[0] == 43)

        with self.assertRaises(AttributeError):
            traj.f_set_properties(j=7)

        with self.assertRaises(AttributeError):
            traj.f_set_properties(depth=7)

        traj.hui = (('444', 'kkkk',), 'l')



        self.assertTrue(traj.f_get('hui')[1] == 'l')

        with self.assertRaises(AttributeError):
            traj.hui = ('445', 'kkkk',)

        traj.f_get('hui').f_set(('445', 'kkkk',))

        self.assertTrue(traj.f_get('hui')[1] == 'l')

        self.assertTrue(traj.hui[0] == ('445', 'kkkk',))

        traj.f_add_link('klkikju', traj.par) # for shizzle


        traj.meee = Result('h', 43, hui = 3213, comment='du')

        self.assertTrue(traj.meee.h.h == 43)

        with self.assertRaises(TypeError):
            traj.par.mu = NNGroupNode('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.res.mu = NNGroupNode('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.conf.mu = NNGroupNode('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.dpar.mu = NNGroupNode('jj', comment='mi')

        with self.assertRaises(TypeError):
            traj.par.mu = ResultGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.dpar.mu = ResultGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.conf.mu = ResultGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.mu = ResultGroup('jj', comment='mi')

        with self.assertRaises(TypeError):
            traj.par.mu = ConfigGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.dpar.mu = ConfigGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.res.mu = ConfigGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.mu = ConfigGroup('jj', comment='mi')

        with self.assertRaises(TypeError):
            traj.par.mu = DerivedParameterGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.conf.mu = DerivedParameterGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.res.mu = DerivedParameterGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.mu = DerivedParameterGroup('jj', comment='mi')

        with self.assertRaises(TypeError):
            traj.dpar.mu = ParameterGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.res.mu = ParameterGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.conf.mu = ParameterGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.mu = ParameterGroup('jj', comment='mi')

        traj.par.mu = ParameterGroup('jj', comment='mi')
        traj.res.mus = ResultGroup('jj', comment='mi')
        traj.mu = NNGroupNode('jj')
        cg = ConfigGroup('a.g')
        traj.conf.a = cg

        self.assertTrue(traj.f_get('conf.a.a.g', shortcuts=False) is cg)

        dg = DerivedParameterGroup('ttt')
        traj.dpar.ttt = dg

        self.assertTrue(traj.f_get('dpar.ttt', shortcuts=False) is dg)

        traj.mylink = traj.par

        self.assertTrue(traj.mylink is traj.par)

        traj.vvv = NNGroupNode('', comment='kkk')

        self.assertTrue(traj.vvv.v_full_name == 'vvv')

        self.assertTrue(traj.par.mu.v_name == 'mu')

        traj.rrr = MyParamGroup('ff')

        traj.par.g = MyParamGroup('')

        pg = traj.f_add_parameter_group(comment='gg', full_name='me')
        self.assertTrue(traj.par.me is pg)

        traj.f_store()

        traj = load_trajectory(index=-1, filename=filename, dynamic_imports=MyParamGroup)

        self.assertTrue(isinstance(traj.rrr, NNGroupNode))
        self.assertTrue(isinstance(traj.rrr.ff, MyParamGroup))
        self.assertTrue(isinstance(traj.par.g, MyParamGroup))

        traj.par = Parameter('hiho', 42, comment='you')
        traj.par = Parameter('g1.g2.g3.g4.g5', 43)

        self.assertTrue(traj.hiho == 42)
        self.assertTrue(isinstance(traj.par.g1, ParameterGroup ))
        self.assertTrue(isinstance(traj.par.g3, ParameterGroup ))
        self.assertTrue(traj.g3.g5 == 43)
コード例 #44
0
ファイル: batch_lib.py プロジェクト: bagjohn/larvaworld-1
def _batch_run(dir='unnamed',
               batch_id='template',
               space=None,
               save_data_in_hdf5=False,
               single_method=single_run,
               process_method=null_processing,
               post_process_method=None,
               final_process_method=None,
               multiprocessing=True,
               resumable=True,
               overwrite=False,
               sim_config=None,
               params=None,
               config=None,
               post_kwargs={},
               run_kwargs={}):
    saved_args = locals()
    # print(locals())
    traj_name = f'{batch_id}_traj'
    parent_dir_path = f'{BatchRunFolder}/{dir}'
    dir_path = os.path.join(parent_dir_path, batch_id)
    plot_path = os.path.join(dir_path, f'{batch_id}.pdf')
    data_path = os.path.join(dir_path, f'{batch_id}.csv')
    filename = f'{dir_path}/{batch_id}.hdf5'
    build_new = True
    if os.path.exists(parent_dir_path) and os.path.exists(
            dir_path) and overwrite == False:
        build_new = False
        try:
            print('Trying to resume existing trajectory')
            env = Environment(continuable=True)
            env.resume(trajectory_name=traj_name, resume_folder=dir_path)
            print('Resumed existing trajectory')
            build_new = False
        except:
            try:
                print('Trying to load existing trajectory')
                traj = load_trajectory(filename=filename,
                                       name=traj_name,
                                       load_all=0)
                env = Environment(trajectory=traj)
                traj.f_load(index=None, load_parameters=2, load_results=0)
                traj.f_expand(space)
                print('Loaded existing trajectory')
                build_new = False
            except:
                print(
                    'Neither of resuming or expanding of existing trajectory worked'
                )

    if build_new:
        if multiprocessing:
            multiproc = True
            resumable = False
            wrap_mode = pypetconstants.WRAP_MODE_QUEUE
        else:
            multiproc = False
            resumable = True
            wrap_mode = pypetconstants.WRAP_MODE_LOCK
        # try:
        print('Trying to create novel environment')
        env = Environment(
            trajectory=traj_name,
            filename=filename,
            file_title=batch_id,
            comment=f'{batch_id} batch run!',
            large_overview_tables=True,
            overwrite_file=True,
            resumable=False,
            resume_folder=dir_path,
            multiproc=multiproc,
            ncores=4,
            use_pool=
            True,  # Our runs are inexpensive we can get rid of overhead by using a pool
            freeze_input=
            True,  # We can avoid some overhead by freezing the input to the pool
            wrap_mode=wrap_mode,
            graceful_exit=True)
        traj = env.traj
        print('Created novel environment')
        fly_params, env_params, sim_params = sim_config[
            'fly_params'], sim_config['env_params'], sim_config['sim_params']
        if all(v is not None for v in [sim_params, env_params, fly_params]):
            traj = load_default_configuration(traj,
                                              sim_params=sim_params,
                                              env_params=env_params,
                                              fly_params=fly_params)
        elif params is not None:
            for p in params:
                traj.f_apar(p, 0.0)
        if config is not None:
            for k, v in config.items():
                traj.f_aconf(k, v)
        traj.f_aconf('parent_dir_path',
                     parent_dir_path,
                     comment='The parent directory')
        traj.f_aconf('dir_path',
                     dir_path,
                     comment='The directory path for saving data')
        traj.f_aconf('plot_path',
                     plot_path,
                     comment='The file path for saving plot')
        traj.f_aconf('data_path',
                     data_path,
                     comment='The file path for saving data')
        traj.f_aconf('dataset_path',
                     f'{dir_path}/{batch_id}',
                     comment='The directory path for saving datasets')
        traj.f_explore(space)
        # except:
        #     raise ValueError(f'Failed to perform batch run {batch_id}')

    if post_process_method is not None:
        env.add_postprocessing(post_process_method, **post_kwargs)
    env.run(single_method,
            process_method,
            save_data_in_hdf5=save_data_in_hdf5,
            save_to=dir_path,
            common_folder=batch_id,
            **run_kwargs)
    env.disable_logging()
    print('Batch run complete')
    if final_process_method is not None:
        return final_process_method(env.traj)
コード例 #45
0
ファイル: storage_test.py プロジェクト: nikhil-garg/pypet
    def test_new_assignment_method(self):
        filename = make_temp_dir('newassignment.hdf5')
        traj = Trajectory(filename=filename, add_time=True)

        comment = 'A number'
        traj.par.x = Parameter('', 44, comment)

        self.assertTrue(traj.f_get('x').v_comment == comment)

        traj.x = 45
        self.assertTrue(traj.par.f_get('x').f_get() == 45)

        self.assertTrue(isinstance(traj.f_get('x'), Parameter))

        with self.assertRaises(AttributeError):
            traj.f = Parameter('lll', 444, 'lll')


        traj.f = Parameter('', 444, 'lll')

        self.assertTrue(traj.f_get('f').v_name == 'f')


        conf = traj.conf
        with self.assertRaises(AttributeError):
            conf = traj.conf.jjjj
        traj.f_set_properties(fast_access=True)


        traj.crun = Result('', k=43, m='JJJ')
        self.assertTrue(traj.run_A['k'] == 43)

        with self.assertRaises(AttributeError):
            traj.f_set_properties(j=7)

        with self.assertRaises(AttributeError):
            traj.f_set_properties(depth=7)

        traj.hui = Result('hui', ('444', 'kkkk',), 'l')



        self.assertTrue(traj.f_get('hui')[1] == 'l')


        traj.f_get('hui').f_set(('445', 'kkkk',))

        self.assertTrue(traj.f_get('hui')[1] == 'l')

        self.assertTrue(traj.hui[0] == ('445', 'kkkk',))

        traj.f_add_link('klkikju', traj.par) # for shizzle


        traj.meee = Result('meee.h', 43, hui = 3213, comment='du')

        self.assertTrue(traj.meee.h.h == 43)

        with self.assertRaises(TypeError):
            traj.par.jj = NNGroupNode('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.res.jj = NNGroupNode('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.conf.jj = NNGroupNode('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.dpar.jj = NNGroupNode('jj', comment='mi')

        with self.assertRaises(TypeError):
            traj.par.jj = ResultGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.dpar.jj = ResultGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.conf.jj = ResultGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.jj = ResultGroup('jj', comment='mi')

        with self.assertRaises(TypeError):
            traj.par.jj = ConfigGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.dpar.jj = ConfigGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.res.jj = ConfigGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.jj = ConfigGroup('jj', comment='mi')

        with self.assertRaises(TypeError):
            traj.par.jj = DerivedParameterGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.conf.jj = DerivedParameterGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.res.jj = DerivedParameterGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.jj = DerivedParameterGroup('jj', comment='mi')

        with self.assertRaises(TypeError):
            traj.dpar.jj = ParameterGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.res.jj = ParameterGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.conf.jj = ParameterGroup('jj', comment='mi')
        with self.assertRaises(TypeError):
            traj.jj = ParameterGroup('jj', comment='mi')

        traj.par.jj = ParameterGroup('jj', comment='mi')
        traj.res.jj = ResultGroup('jj', comment='mi')
        traj.jj = NNGroupNode('jj')
        cg = ConfigGroup('a.g')
        traj.conf.a = cg

        self.assertTrue(traj.f_get('conf.a.g', shortcuts=False) is cg)

        dg = DerivedParameterGroup('ttt')
        traj.dpar.ttt = dg

        self.assertTrue(traj.f_get('dpar.ttt', shortcuts=False) is dg)

        traj.mylink = traj.par

        self.assertTrue(traj.mylink is traj.par)

        traj.vvv = NNGroupNode('', comment='kkk')

        self.assertTrue(traj.vvv.v_full_name == 'vvv')

        self.assertTrue(traj.par.jj.v_name == 'jj')

        traj.ff = MyParamGroup('ff')

        traj.par.g = MyParamGroup('')

        pg = traj.f_add_parameter_group(comment='gg', full_name='me')
        self.assertTrue(traj.par.me is pg)

        traj.f_store()

        traj = load_trajectory(index=-1, filename=filename, dynamic_imports=MyParamGroup)

        self.assertTrue(isinstance(traj.ff, MyParamGroup))
        self.assertTrue(isinstance(traj.par.g, MyParamGroup))

        traj.par.hiho = Parameter('hiho', 42, comment='you')
        traj.par.g1 = Parameter('g1.g2.g3.g4.g5', 43)

        self.assertTrue(traj.hiho == 42)
        self.assertTrue(isinstance(traj.par.g1, ParameterGroup ))
        self.assertTrue(isinstance(traj.par.g3, ParameterGroup ))
        self.assertTrue(traj.g3.g5 == 43)
コード例 #46
0
ファイル: profiling.py プロジェクト: MehmetTimur/pypet
def test_load():
    newtraj = load_trajectory(index=-1, filename=filename, load_data=1)
コード例 #47
0
    def test_table_append(self):
        the_append_table = self.traj.results.shared_data.table
        self.assertTrue(the_append_table is self.shared_table)
        the_append_table.create_shared_data(description=MyTable)

        with StorageContextManager(self.traj):
            row = the_append_table.row
            for i in range(15):
                row['id'] = i * 2
                row['name'] = 'name %d' % i
                row['surname'] = '%d surname' % i
                row['weight'] = (i * 0.5 + 50.0)
                row.append()
            the_append_table.flush()

            for idx, row in enumerate(the_append_table.iterrows()):
                self.assertEqual(row['id'], idx * 2)
                self.assertEqual(row['name'], compat.tobytes('name %d' % idx))
                self.assertEqual(row['surname'],
                                 compat.tobytes('%d surname' % idx))
                self.assertEqual(row['weight'], idx * 0.5 + 50.0)

        self.traj.f_store()

        traj2 = load_trajectory(name=self.traj.v_name,
                                filename=self.filename,
                                load_all=2,
                                dynamic_imports=SharedResult)

        second_append_table = traj2.results.shared_data.table

        with StorageContextManager(traj2):
            for idx, row in enumerate(second_append_table.iterrows()):
                self.assertEqual(row['id'], idx * 2)
                self.assertEqual(row['name'], compat.tobytes('name %d' % idx))
                self.assertEqual(row['surname'],
                                 compat.tobytes('%d surname' % idx))
                self.assertEqual(row['weight'], idx * 0.5 + 50.0)

            second_append_table.append([(30, 'mehmet', 'timur', 65.5)])

            self.assertEqual(second_append_table.read(field='id')[-1], 30)
            self.assertEqual(
                second_append_table.read(field='name')[-1],
                compat.tobytes('mehmet'))
            self.assertEqual(
                second_append_table.read(field='surname')[-1],
                compat.tobytes('timur'))
            self.assertEqual(
                second_append_table.read(field='weight')[-1], 65.5)

        traj2.f_store()

        traj3 = load_trajectory(name=self.traj.v_name,
                                filename=self.filename,
                                load_all=2,
                                dynamic_imports=SharedResult)

        third_append_table = traj3.results.shared_data.table

        self.assertEqual((third_append_table.read(field='id')[-1]), 30)
        self.assertEqual((third_append_table.read(field='name')[-1]),
                         compat.tobytes('mehmet'))
        self.assertEqual((third_append_table.read(field='surname')[-1]),
                         compat.tobytes('timur'))
        self.assertEqual((third_append_table.read(field='weight')[-1]), 65.5)

        third_append_table.append([(33, 'Harrison', 'Ford', 95.5)])

        self.assertEqual((third_append_table.read(field='id')[-1]), 33)
        self.assertEqual((third_append_table.read(field='name')[-1]),
                         compat.tobytes('Harrison'))
        self.assertEqual((third_append_table.read(field='surname')[-1]),
                         compat.tobytes('Ford'))
        self.assertEqual((third_append_table.read(field='weight')[-1]), 95.5)
コード例 #48
0
    def test_conversions(self):
        filename = make_temp_dir('hdf5manipulation.hdf5')
        traj = Trajectory(name=make_trajectory_name(self), filename=filename)

        trajname = traj.v_name
        traj.v_standard_result = SharedResult

        traj.f_store(only_init=True)

        traj.f_add_result('shared_data')

        thedata = np.zeros((1000, 1000))
        myarray = SharedArray('array', traj.shared_data, trajectory=traj)
        traj.shared_data['array'] = myarray
        mytable = SharedTable('t1', traj.shared_data, trajectory=traj)
        traj.shared_data['t1'] = mytable
        dadict = {
            'hi': [1, 2, 3, 4, 5],
            'shu': ['bi', 'du', 'da', 'ha', 'hui']
        }
        dadict2 = {'answer': [42]}
        res = traj.f_add_result('shared.dfs')
        res['df'] = SharedPandasFrame()
        res['df'].create_shared_data(data=pd.DataFrame(dadict),
                                     trajectory=traj)
        frame = SharedPandasFrame('df1',
                                  traj.f_get('shared.dfs'),
                                  trajectory=traj,
                                  add_to_parent=True)
        frame.create_shared_data(data=pd.DataFrame(dadict2), )
        res['df1'] = frame

        traj.f_add_result('mylist', [1, 2, 3])
        traj.f_add_result('my.mytuple', k=(1, 2, 3), wa=42)
        traj.f_add_result('my.myarray', np.zeros((50, 50)))
        traj.f_add_result('my.myframe', data=pd.DataFrame(dadict2))
        traj.f_add_result('my.mytable', ObjectTable(data=dadict2))

        myarray.create_shared_data(data=thedata)
        mytable.create_shared_data(first_row={
            'hi': compat.tobytes('hi'),
            'huhu': np.ones(3)
        })

        traj.f_store()

        data = myarray.read()
        myarray.get_data_node()
        self.assertTrue(np.all(data == thedata))

        with StorageContextManager(traj):
            myarray[2, 2] = 10
            data = myarray.read()
            self.assertTrue(data[2, 2] == 10)

        self.assertTrue(data[2, 2] == 10)
        self.assertFalse(traj.v_storage_service.is_open)

        traj = load_trajectory(name=trajname,
                               filename=filename,
                               load_all=2,
                               dynamic_imports=SharedResult)

        make_ordinary_result(traj.shared_data, 'array', trajectory=traj)
        array = traj.shared_data.array
        self.assertTrue(isinstance(array, np.ndarray))
        thedata[2, 2] = 10
        self.assertTrue(np.all(array == thedata))

        make_ordinary_result(
            traj.shared_data,
            't1',
            trajectory=traj,
        )
        t1 = traj.shared_data.t1
        self.assertTrue(isinstance(t1, ObjectTable))
        self.assertTrue(np.all(t1['huhu'][0] == np.ones(3)))

        dfs = traj.shared.dfs
        make_ordinary_result(traj.shared.dfs, 'df', trajectory=traj)
        theframe = dfs.f_get('df')
        self.assertTrue(isinstance(dfs, Result))
        self.assertTrue(isinstance(theframe, pd.DataFrame))
        self.assertTrue(theframe['hi'][0] == 1)

        listres = traj.f_get('mylist')
        listres = make_shared_result(listres, 0, trajectory=traj)
        with StorageContextManager(traj):
            self.assertTrue(listres[0][2] == 3)
            listres[0][0] = 4

        self.assertTrue(listres[0][0] == 4)
        listres = make_ordinary_result(listres, 0, trajectory=traj)
        traj = load_trajectory(name=trajname,
                               filename=filename,
                               load_all=2,
                               dynamic_imports=SharedResult)
        mylist = traj.mylist
        self.assertTrue(isinstance(listres, Result))
        self.assertTrue(mylist[0] == 4)
        self.assertTrue(isinstance(mylist, list))

        mytuple = traj.mytuple

        with self.assertRaises(AttributeError):
            mytuple = make_shared_result(mytuple,
                                         'mylist',
                                         traj,
                                         new_class=SharedArray)

        mytuple = make_shared_result(mytuple, 'k', traj, new_class=SharedArray)
        self.assertTrue(mytuple.k[1] == 2)

        mytuple = make_ordinary_result(mytuple, 'k', trajectory=traj)
        self.assertTrue(isinstance(mytuple.k, tuple))
        self.assertTrue(mytuple.k[2] == 3)

        myframe = traj.myframe
        myframe = make_shared_result(myframe, 'data', traj)

        theframe = myframe.data.read()
        self.assertTrue(theframe['answer'][0] == 42)

        myframe = make_ordinary_result(myframe, 'data', trajectory=traj)
        traj.f_load_item(myframe)
        self.assertTrue(myframe.data['answer'][0] == 42)

        mytable = traj.f_get('mytable')
        mytable = make_shared_result(mytable, 0, traj)

        self.assertTrue(isinstance(mytable[0], SharedTable))
        rows = mytable.mytable.read()

        self.assertTrue(rows[0][0] == 42)

        mytable = make_ordinary_result(mytable, 0, trajectory=traj)

        self.assertTrue(isinstance(mytable, Result))
        self.assertTrue(mytable[0]['answer'][0] == 42)
コード例 #49
0
    def test_all_arrays(self):
        filename = make_temp_dir('hdf5arrays.hdf5')
        traj = Trajectory(name=make_trajectory_name(self), filename=filename)
        trajname = traj.v_name

        npearray = np.ones((2, 10, 3), dtype=np.float)
        thevlarray = np.array(
            [compat.tobytes('j'), 22.2,
             compat.tobytes('gutter')])
        traj.f_store(only_init=True)
        res = traj.f_add_result(SharedResult, 'arrays')
        res['carray'] = SharedCArray()
        res['carray'].create_shared_data(shape=(10, 10),
                                         atom=pt.atom.FloatAtom())
        res['earray'] = SharedEArray()
        res['earray'].create_shared_data(obj=npearray)
        res['vlarray'] = SharedVLArray()
        res['vlarray'].create_shared_data(obj=thevlarray)
        res['array'] = SharedArray()
        res['array'].create_shared_data(data=npearray)

        traj.f_store()

        traj = load_trajectory(name=trajname,
                               filename=filename,
                               load_all=2,
                               dynamic_imports=SharedResult)

        toappned = [44, compat.tobytes('k')]
        with StorageContextManager(traj):
            a1 = traj.arrays.array
            a1[0, 0, 0] = 4.0

            a2 = traj.arrays.carray
            a2[0, 1] = 4

            a4 = traj.arrays.vlarray
            a4.append(toappned)

            a3 = traj.arrays.earray
            a3.append(np.zeros((1, 10, 3)))

        traj = load_trajectory(name=trajname,
                               filename=filename,
                               load_all=2,
                               dynamic_imports=SharedResult)

        with StorageContextManager(traj):
            a1 = traj.arrays.array
            self.assertTrue(a1[0, 0, 0] == 4.0)

            a2 = traj.arrays.carray
            self.assertTrue(a2[0, 1] == 4)

            a3 = traj.arrays.earray
            self.assertTrue(a3.read().shape == (3, 10, 3))

            a4 = traj.arrays.vlarray
            for idx, x in enumerate(a4):
                if idx == 0:
                    self.assertTrue(np.all(x == np.array(thevlarray)))
                elif idx == 1:
                    self.assertTrue(np.all(x == np.array(toappned)))
                else:
                    raise RuntimeError()
コード例 #50
0
def plot_results(folder_path=''):
	if folder_path=='':
		folder_path = '/Users/raphaelholca/Dropbox/hebbian_net/output/test_pypet_0/'
		# folder_path = '/Users/raphaelholca/Mountpoint/hebbianRL/output/proba_two_lin/'

	traj_name = 'explore_perf'
	traj = pypet.load_trajectory(traj_name, filename=os.path.join(folder_path, 'explore_perf.hdf5'), force=True)
	traj.v_auto_load = True

	perf_all = []
	for run in traj.f_iter_runs():
		perf_all.append(traj.results[run].test_perf)
	perf_all = np.array(perf_all)
	perf = np.mean(perf_all,1)

	param_traj = traj.f_get_explored_parameters()
	param = {}
	for k in param_traj:
		if k[11:] != 'name':
			xplr_values = np.array(param_traj[k].f_get_range())
			param[k[11:]] = xplr_values

	arg_best = np.argmax(perf)

	best_param = {}

	print 'best parameters:'
	print '================'
	for k in param.keys():
		best_param[k] = param[k][arg_best]
		print k + ' : ' + str(param[k][arg_best]) + '\t\t' + str(np.round(np.unique(param[k]),3))
	print "\nbest performance: " + str(np.round(np.max(perf)*100,2)) + "\n"

	keys = param.keys()
	for ik in range(len(keys)):
		if len(keys)==1: ik=-1
		for k in keys[ik+1:]:
			others = keys[:]
			if len(keys)>1: 
				others.remove(keys[ik])
				others.remove(k)
			
			mask = np.ones_like(param[k], dtype=bool)
			if len(param)>2:
				for o in others:
					mask = np.logical_and(mask, param[o]==best_param[o])
			pX = param[keys[ik]][mask]
			pY = param[k][mask]
			rC = np.hstack(perf)[mask]

			if True: #True: non-linear representation of results; False: linear representation 
				ipX = np.zeros(len(pX))
				ipY = np.zeros(len(pY))
				for i in range(len(pX)):
					ipX[i] = np.argwhere(pX[i]==np.sort(np.unique(pX)))
					ipY[i] = np.argwhere(pY[i]==np.sort(np.unique(pY)))
			else:
				ipX = np.copy(pX)
				ipY = np.copy(pY)

			fig = plt.figure()
			fig.patch.set_facecolor('white')
			
			plt.scatter(ipX, ipY, c=rC, cmap='CMRmap', vmin=np.min(perf)-0.1, vmax=np.max(perf), s=1000, marker='s')
			# plt.scatter(param[keys[ik]][arg_best], param[k][arg_best], c='r', s=50, marker='x')
			for i in range(len(pX)):
				if pX[i]==param[keys[ik]][arg_best] and pY[i]==param[k][arg_best]:
					plt.text(ipX[i], ipY[i], str(np.round(rC[i]*100,1)), horizontalalignment='center', verticalalignment='center', weight='bold', bbox=dict(facecolor='red', alpha=0.5))
				else:
					plt.text(ipX[i], ipY[i], str(np.round(rC[i]*100,1)), horizontalalignment='center', verticalalignment='center')
			plt.xticks(ipX, pX)
			plt.yticks(ipY, pY)
			plt.xlabel(keys[ik], fontsize=25)
			plt.ylabel(k, fontsize=25)
			plt.tick_params(axis='both', which='major', labelsize=18)
			plt.tight_layout()
			plt.savefig(os.path.join(folder_path, keys[ik] + '_' + k + '.pdf'))

			plt.close(fig)

	name_best = ''
	for k in sorted(param.keys()):
		name_best += '_'
		name_best += k
		name_best += str(best_param[k]).replace('.', ',')

	return name_best
コード例 #51
0
def test_load():
    newtraj = load_trajectory(index=-1, filename=filename, load_data=1)
コード例 #52
0
def _batch_run(dir='unnamed',
               batch_id='template',
               space=None,
               save_data_in_hdf5=False,
               single_method=single_run,
               process_method=null_processing,
               post_process_method=None,
               final_process_method=None,
               multiprocessing=True,
               resumable=True,
               overwrite=False,
               sim_config=None,
               params=None,
               optimization=None,
               post_kwargs={},
               run_kwargs={}):
    saved_args = locals()
    traj_name = f'{batch_id}_traj'
    parent_dir_path = f'{paths.BatchRunFolder}/{dir}'
    dir_path = os.path.join(parent_dir_path, batch_id)

    filename = f'{dir_path}/{batch_id}.hdf5'
    build_new = True
    if os.path.exists(parent_dir_path) and os.path.exists(
            dir_path) and overwrite == False:
        build_new = False
        try:
            # print('Trying to resume existing trajectory')
            env = Environment(continuable=True)
            env.resume(trajectory_name=traj_name, resume_folder=dir_path)
            print('Resumed existing trajectory')
            build_new = False
        except:
            try:
                # print('Trying to load existing trajectory')
                traj = load_trajectory(filename=filename,
                                       name=traj_name,
                                       load_all=0)
                env = Environment(trajectory=traj, multiproc=True, ncores=4)

                traj = config_traj(traj, optimization)

                traj.f_load(index=None, load_parameters=2, load_results=0)
                traj.f_expand(space)
                print('Loaded existing trajectory')
                build_new = False
            except:
                print(
                    'Neither of resuming or expanding of existing trajectory worked'
                )

    if build_new:
        if multiprocessing:
            multiproc = True
            resumable = False
            wrap_mode = pypetconstants.WRAP_MODE_QUEUE
        else:
            multiproc = False
            resumable = True
            wrap_mode = pypetconstants.WRAP_MODE_LOCK
        # print('Trying to create novel environment')
        env = Environment(
            trajectory=traj_name,
            filename=filename,
            file_title=batch_id,
            comment=f'{batch_id} batch run!',
            large_overview_tables=True,
            overwrite_file=True,
            resumable=False,
            resume_folder=dir_path,
            multiproc=multiproc,
            ncores=4,
            use_pool=
            True,  # Our runs are inexpensive we can get rid of overhead by using a pool
            freeze_input=
            True,  # We can avoid some overhead by freezing the input to the pool
            wrap_mode=wrap_mode,
            graceful_exit=True)
        print('Created novel environment')
        traj = prepare_traj(env.traj, sim_config, params, batch_id,
                            parent_dir_path, dir_path)
        traj = config_traj(traj, optimization)
        traj.f_explore(space)

    if post_process_method is not None:
        env.add_postprocessing(post_process_method, **post_kwargs)
    env.run(single_method,
            process_method,
            save_data_in_hdf5=save_data_in_hdf5,
            save_to=dir_path,
            **run_kwargs)
    env.disable_logging()
    print('Batch run complete')
    if final_process_method is not None:
        results = final_process_method(env.traj)
        # print(results)
        return results
コード例 #53
0
ファイル: shared_data_test.py プロジェクト: MehmetTimur/pypet
    def test_conversions(self):
        filename = make_temp_dir("hdf5manipulation.hdf5")
        traj = Trajectory(name=make_trajectory_name(self), filename=filename)

        trajname = traj.v_name
        traj.v_standard_result = SharedResult

        traj.f_store(only_init=True)

        traj.f_add_result("shared_data")

        thedata = np.zeros((1000, 1000))
        myarray = SharedArray("array", traj.shared_data, trajectory=traj)
        traj.shared_data["array"] = myarray
        mytable = SharedTable("t1", traj.shared_data, trajectory=traj)
        traj.shared_data["t1"] = mytable
        # mytable2 = SharedTableResult('h.t2', trajectory=traj)
        # mytable3 = SharedTableResult('jjj.t3', trajectory=traj)
        dadict = {"hi": [1, 2, 3, 4, 5], "shu": ["bi", "du", "da", "ha", "hui"]}
        dadict2 = {"answer": [42]}
        res = traj.f_add_result("shared.dfs")
        res["df"] = SharedPandasFrame()
        res["df"].create_shared_data(data=pd.DataFrame(dadict), trajectory=traj)
        frame = SharedPandasFrame("df1", traj.f_get("shared.dfs"), trajectory=traj)
        frame.create_shared_data(data=pd.DataFrame(dadict2))
        res["df1"] = frame

        traj.f_add_result("mylist", [1, 2, 3])
        traj.f_add_result("my.mytuple", k=(1, 2, 3), wa=42)
        traj.f_add_result("my.myarray", np.zeros((50, 50)))
        traj.f_add_result("my.myframe", data=pd.DataFrame(dadict2))
        traj.f_add_result("my.mytable", ObjectTable(data=dadict2))

        myarray.create_shared_data(data=thedata)
        mytable.create_shared_data(first_row={"hi": compat.tobytes("hi"), "huhu": np.ones(3)})

        traj.f_store()

        data = myarray.read()
        arr = myarray.get_data_node()
        self.assertTrue(np.all(data == thedata))

        with StorageContextManager(traj) as cm:
            myarray[2, 2] = 10
            data = myarray.read()
            self.assertTrue(data[2, 2] == 10)

        self.assertTrue(data[2, 2] == 10)
        self.assertFalse(traj.v_storage_service.is_open)

        traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult)

        make_ordinary_result(traj.shared_data, "array", trajectory=traj)
        array = traj.shared_data.array
        self.assertTrue(isinstance(array, np.ndarray))
        thedata[2, 2] = 10
        self.assertTrue(np.all(array == thedata))

        make_ordinary_result(traj.shared_data, "t1", trajectory=traj)
        t1 = traj.shared_data.t1
        self.assertTrue(isinstance(t1, ObjectTable))  #
        self.assertTrue(np.all(t1["huhu"][0] == np.ones(3)))

        dfs = traj.shared.dfs
        make_ordinary_result(traj.shared.dfs, "df", trajectory=traj)
        theframe = dfs.f_get("df")
        self.assertTrue(isinstance(dfs, Result))
        self.assertTrue(isinstance(theframe, pd.DataFrame))
        self.assertTrue(theframe["hi"][0] == 1)

        listres = traj.f_get("mylist")
        listres = make_shared_result(listres, 0, trajectory=traj)
        with StorageContextManager(traj) as cm:
            self.assertTrue(listres[0][2] == 3)
            listres[0][0] = 4

        self.assertTrue(listres[0][0] == 4)
        listres = make_ordinary_result(listres, 0, trajectory=traj)
        traj = load_trajectory(name=trajname, filename=filename, load_all=2, dynamic_imports=SharedResult)
        mylist = traj.mylist
        self.assertTrue(isinstance(listres, Result))
        self.assertTrue(mylist[0] == 4)
        self.assertTrue(isinstance(mylist, list))

        mytuple = traj.mytuple

        with self.assertRaises(AttributeError):
            mytuple = make_shared_result(mytuple, "mylist", traj, new_class=SharedArray)

        mytuple = make_shared_result(mytuple, "k", traj, new_class=SharedArray)
        self.assertTrue(mytuple.k[1] == 2)

        mytuple = make_ordinary_result(mytuple, "k", trajectory=traj)
        self.assertTrue(isinstance(mytuple.k, tuple))
        self.assertTrue(mytuple.k[2] == 3)

        myframe = traj.myframe
        myframe = make_shared_result(myframe, "data", traj)

        theframe = myframe.data.read()
        self.assertTrue(theframe["answer"][0] == 42)

        myframe = make_ordinary_result(myframe, "data", trajectory=traj)
        traj.f_load_item(myframe)
        self.assertTrue(myframe.data["answer"][0] == 42)

        mytable = traj.f_get("mytable")
        mytable = make_shared_result(mytable, 0, traj)

        self.assertTrue(isinstance(mytable[0], SharedTable))
        rows = mytable.mytable.read()

        self.assertTrue(rows[0][0] == 42)

        mytable = make_ordinary_result(mytable, 0, trajectory=traj)

        self.assertTrue(isinstance(mytable, Result))
        self.assertTrue(mytable[0]["answer"][0] == 42)
コード例 #54
0
    def test_storing_and_manipulating(self):
        filename = make_temp_dir('hdf5manipulation.hdf5')
        traj = Trajectory(name=make_trajectory_name(self), filename=filename)
        trajname = traj.v_name

        thedata = np.zeros((1000, 1000))
        res = traj.f_add_result(SharedResult, 'shared')
        myarray = SharedArray('array',
                              res,
                              trajectory=traj,
                              add_to_parent=True)
        mytable = SharedTable('t1', res, trajectory=traj, add_to_parent=True)
        mytable2 = SharedTable('t2', res, trajectory=traj, add_to_parent=True)
        mytable3 = SharedTable('t3', res, trajectory=traj, add_to_parent=True)

        traj.f_store(only_init=True)
        myarray.create_shared_data(data=thedata)
        mytable.create_shared_data(first_row={
            'hi': compat.tobytes('hi'),
            'huhu': np.ones(3)
        })
        mytable2.create_shared_data(description={
            'ha': pt.StringCol(2, pos=0),
            'haha': pt.FloatCol(pos=1)
        })
        mytable3.create_shared_data(description={
            'ha': pt.StringCol(2, pos=0),
            'haha': pt.FloatCol(pos=1)
        })

        traj.f_store()

        newrow = {'ha': 'hu', 'haha': 4.0}

        with self.assertRaises(TypeError):
            traj.shared.t2.row

        with StorageContextManager(traj) as cm:
            row = traj.shared.t2.row
            for irun in range(11):
                for key, val in newrow.items():
                    row[key] = val
                row.append()
            traj.shared.t3.flush()

        data = myarray.read()
        myarray.get_data_node()
        self.assertTrue(np.all(data == thedata))

        with StorageContextManager(traj):
            myarray[2, 2] = 10
            data = myarray.read()
            self.assertTrue(data[2, 2] == 10)

        self.assertTrue(data[2, 2] == 10)
        self.assertFalse(traj.v_storage_service.is_open)

        traj = load_trajectory(name=trajname, filename=filename)

        traj.f_load(load_data=2)

        traj.shared.t2.traj = traj
        traj.shared.t1.traj = traj
        traj.shared.array.traj = traj

        self.assertTrue(traj.shared.t2.nrows == 11,
                        '%s != 11' % str(traj.shared.t2.nrows))
        self.assertTrue(traj.shared.t2[0]['ha'] == compat.tobytes('hu'),
                        traj.shared.t2[0]['ha'])
        self.assertTrue(traj.shared.t2[1]['ha'] == compat.tobytes('hu'),
                        traj.shared.t2[1]['ha'])
        self.assertTrue('huhu' in traj.shared.t1.colnames)
        self.assertTrue(traj.shared.array[2, 2] == 10)
コード例 #55
0
def plot_results(folder_path=''):
    if folder_path == '':
        folder_path = '/Users/raphaelholca/Dropbox/hebbian_net/output/test_pypet_0/'
        # folder_path = '/Users/raphaelholca/Mountpoint/hebbianRL/output/proba_two_lin/'

    traj_name = 'explore_perf'
    traj = pypet.load_trajectory(traj_name,
                                 filename=os.path.join(folder_path,
                                                       'explore_perf.hdf5'),
                                 force=True)
    traj.v_auto_load = True

    perf_all = []
    for run in traj.f_iter_runs():
        perf_all.append(traj.results[run].test_perf)
    perf_all = np.array(perf_all)
    perf = np.mean(perf_all, 1)

    param_traj = traj.f_get_explored_parameters()
    param = {}
    for k in param_traj:
        if k[11:] != 'name':
            xplr_values = np.array(param_traj[k].f_get_range())
            param[k[11:]] = xplr_values

    arg_best = np.argmax(perf)

    best_param = {}

    print 'best parameters:'
    print '================'
    for k in param.keys():
        best_param[k] = param[k][arg_best]
        print k + ' : ' + str(param[k][arg_best]) + '\t\t' + str(
            np.round(np.unique(param[k]), 3))
    print "\nbest performance: " + str(np.round(np.max(perf) * 100, 2)) + "\n"

    keys = param.keys()
    for ik in range(len(keys)):
        if len(keys) == 1: ik = -1
        for k in keys[ik + 1:]:
            others = keys[:]
            if len(keys) > 1:
                others.remove(keys[ik])
                others.remove(k)

            mask = np.ones_like(param[k], dtype=bool)
            if len(param) > 2:
                for o in others:
                    mask = np.logical_and(mask, param[o] == best_param[o])
            pX = param[keys[ik]][mask]
            pY = param[k][mask]
            rC = np.hstack(perf)[mask]

            if True:  #True: non-linear representation of results; False: linear representation
                ipX = np.zeros(len(pX))
                ipY = np.zeros(len(pY))
                for i in range(len(pX)):
                    ipX[i] = np.argwhere(pX[i] == np.sort(np.unique(pX)))
                    ipY[i] = np.argwhere(pY[i] == np.sort(np.unique(pY)))
            else:
                ipX = np.copy(pX)
                ipY = np.copy(pY)

            fig = plt.figure()
            fig.patch.set_facecolor('white')

            plt.scatter(ipX,
                        ipY,
                        c=rC,
                        cmap='CMRmap',
                        vmin=np.min(perf) - 0.1,
                        vmax=np.max(perf),
                        s=1000,
                        marker='s')
            # plt.scatter(param[keys[ik]][arg_best], param[k][arg_best], c='r', s=50, marker='x')
            for i in range(len(pX)):
                if pX[i] == param[
                        keys[ik]][arg_best] and pY[i] == param[k][arg_best]:
                    plt.text(ipX[i],
                             ipY[i],
                             str(np.round(rC[i] * 100, 1)),
                             horizontalalignment='center',
                             verticalalignment='center',
                             weight='bold',
                             bbox=dict(facecolor='red', alpha=0.5))
                else:
                    plt.text(ipX[i],
                             ipY[i],
                             str(np.round(rC[i] * 100, 1)),
                             horizontalalignment='center',
                             verticalalignment='center')
            plt.xticks(ipX, pX)
            plt.yticks(ipY, pY)
            plt.xlabel(keys[ik], fontsize=25)
            plt.ylabel(k, fontsize=25)
            plt.tick_params(axis='both', which='major', labelsize=18)
            plt.tight_layout()
            plt.savefig(os.path.join(folder_path, keys[ik] + '_' + k + '.pdf'))

            plt.close(fig)

    name_best = ''
    for k in sorted(param.keys()):
        name_best += '_'
        name_best += k
        name_best += str(best_param[k]).replace('.', ',')

    return name_best
コード例 #56
0
ファイル: shared_data_test.py プロジェクト: femtotrader/pypet
    def test_conversions(self):
        filename = make_temp_dir('hdf5manipulation.hdf5')
        traj = Trajectory(name=make_trajectory_name(self), filename=filename)

        trajname = traj.v_name
        traj.v_standard_result = SharedResult

        traj.f_store(only_init=True)

        traj.f_add_result('shared_data')

        thedata = np.zeros((1000, 1000))
        myarray = SharedArray('array', traj.shared_data, trajectory=traj)
        traj.shared_data['array'] = myarray
        mytable = SharedTable('t1', traj.shared_data, trajectory=traj)
        traj.shared_data['t1'] = mytable
        dadict = {'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui']}
        dadict2 = {'answer': [42]}
        res = traj.f_add_result('shared.dfs')
        res['df'] = SharedPandasFrame()
        res['df'].create_shared_data(data=pd.DataFrame(dadict), trajectory=traj)
        frame = SharedPandasFrame('df1', traj.f_get('shared.dfs'), trajectory=traj,
                                  add_to_parent=True)
        frame.create_shared_data(data=pd.DataFrame(dadict2),)
        res['df1'] = frame

        traj.f_add_result('mylist', [1, 2, 3])
        traj.f_add_result('my.mytuple', k=(1, 2, 3), wa=42)
        traj.f_add_result('my.myarray', np.zeros((50, 50)))
        traj.f_add_result('my.myframe', data=pd.DataFrame(dadict2))
        traj.f_add_result('my.mytable', ObjectTable(data=dadict2))

        myarray.create_shared_data(data=thedata)
        mytable.create_shared_data(first_row={'hi': compat.tobytes('hi'), 'huhu': np.ones(3)})

        traj.f_store()

        data = myarray.read()
        myarray.get_data_node()
        self.assertTrue(np.all(data == thedata))

        with StorageContextManager(traj):
            myarray[2, 2] = 10
            data = myarray.read()
            self.assertTrue(data[2, 2] == 10)

        self.assertTrue(data[2, 2] == 10)
        self.assertFalse(traj.v_storage_service.is_open)

        traj = load_trajectory(name=trajname, filename=filename, load_all=2,
                               dynamic_imports=SharedResult)

        make_ordinary_result(traj.shared_data, 'array', trajectory=traj)
        array = traj.shared_data.array
        self.assertTrue(isinstance(array, np.ndarray))
        thedata[2, 2] = 10
        self.assertTrue(np.all(array == thedata))

        make_ordinary_result(traj.shared_data, 't1', trajectory=traj,)
        t1 = traj.shared_data.t1
        self.assertTrue(isinstance(t1, ObjectTable))
        self.assertTrue(np.all(t1['huhu'][0] == np.ones(3)))

        dfs = traj.shared.dfs
        make_ordinary_result(traj.shared.dfs, 'df', trajectory=traj)
        theframe = dfs.f_get('df')
        self.assertTrue(isinstance(dfs, Result))
        self.assertTrue(isinstance(theframe, pd.DataFrame))
        self.assertTrue(theframe['hi'][0] == 1)

        listres = traj.f_get('mylist')
        listres = make_shared_result(listres, 0, trajectory=traj)
        with StorageContextManager(traj):
            self.assertTrue(listres[0][2] == 3)
            listres[0][0] = 4

        self.assertTrue(listres[0][0] == 4)
        listres = make_ordinary_result(listres, 0, trajectory=traj)
        traj = load_trajectory(name=trajname, filename=filename, load_all=2,
                               dynamic_imports=SharedResult)
        mylist = traj.mylist
        self.assertTrue(isinstance(listres, Result))
        self.assertTrue(mylist[0] == 4)
        self.assertTrue(isinstance(mylist, list))

        mytuple = traj.mytuple

        with self.assertRaises(AttributeError):
            mytuple = make_shared_result(mytuple, 'mylist', traj, new_class=SharedArray)

        mytuple = make_shared_result(mytuple, 'k', traj, new_class=SharedArray)
        self.assertTrue(mytuple.k[1] == 2)

        mytuple = make_ordinary_result(mytuple, 'k', trajectory=traj)
        self.assertTrue(isinstance(mytuple.k, tuple))
        self.assertTrue(mytuple.k[2] == 3)

        myframe = traj.myframe
        myframe = make_shared_result(myframe, 'data', traj)

        theframe = myframe.data.read()
        self.assertTrue(theframe['answer'][0] == 42)

        myframe = make_ordinary_result(myframe, 'data', trajectory=traj)
        traj.f_load_item(myframe)
        self.assertTrue(myframe.data['answer'][0] == 42)

        mytable = traj.f_get('mytable')
        mytable = make_shared_result(mytable, 0, traj)

        self.assertTrue(isinstance(mytable[0], SharedTable))
        rows = mytable.mytable.read()

        self.assertTrue(rows[0][0] == 42)

        mytable = make_ordinary_result(mytable, 0, trajectory=traj)

        self.assertTrue(isinstance(mytable, Result))
        self.assertTrue(mytable[0]['answer'][0] == 42)