예제 #1
0
    def test_file_overwriting(self):
        self.traj.f_store()

        with pt.open_file(self.filename, mode='r') as file:
            nchildren = len(file.root._v_children)
            self.assertTrue(nchildren > 0)

        env2 = Environment(filename=self.filename,
                           log_config=get_log_config())
        traj2 = env2.v_trajectory
        traj2.f_store()

        self.assertTrue(os.path.exists(self.filename))

        with pt.open_file(self.filename, mode='r') as file:
            nchildren = len(file.root._v_children)
            self.assertTrue(nchildren > 1)

        env3 = Environment(filename=self.filename, overwrite_file=True,
                           log_config=get_log_config())

        self.assertFalse(os.path.exists(self.filename))

        env2.f_disable_logging()
        env3.f_disable_logging()
예제 #2
0
    def test_time_display_of_loading(self):
        filename = make_temp_dir('sloooow.hdf5')
        env = Environment(trajectory='traj', add_time=True, filename=filename,
                          log_stdout=False,
                          log_config=get_log_config(),
                          dynamic_imports=SlowResult,
                          display_time=0.1)
        traj = env.v_traj
        res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk')
        traj.f_store()
        service_logger = traj.v_storage_service._logger
        root = logging.getLogger('pypet')
        old_level = root.level
        service_logger.setLevel(logging.INFO)
        root.setLevel(logging.INFO)

        traj.f_load(load_data=3)
        service_logger.setLevel(old_level)
        root.setLevel(old_level)

        path = get_log_path(traj)
        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()
            self.assertTrue('nodes/s)' in full_text)

        env.f_disable_logging()
예제 #3
0
    def test_file_overwriting(self):
        self.traj.f_store()

        with ptcompat.open_file(self.filename, mode='r') as file:
            nchildren = len(file.root._v_children)
            self.assertTrue(nchildren > 0)

        env2 = Environment(filename=self.filename,
                           log_config=get_log_config())
        traj2 = env2.v_trajectory
        traj2.f_store()

        self.assertTrue(os.path.exists(self.filename))

        with ptcompat.open_file(self.filename, mode='r') as file:
            nchildren = len(file.root._v_children)
            self.assertTrue(nchildren > 1)

        env3 = Environment(filename=self.filename, overwrite_file=True,
                           log_config=get_log_config())

        self.assertFalse(os.path.exists(self.filename))

        env2.f_disable_logging()
        env3.f_disable_logging()
예제 #4
0
    def test_time_display_of_loading(self):
        filename = make_temp_dir('sloooow.hdf5')
        env = Environment(trajectory='traj', add_time=True, filename=filename,
                          log_stdout=False,
                          log_config=get_log_config(),
                          dynamic_imports=SlowResult,
                          display_time=0.1)
        traj = env.v_traj
        res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk')
        traj.f_store()
        service_logger = traj.v_storage_service._logger
        root = logging.getLogger('pypet')
        old_level = root.level
        service_logger.setLevel(logging.INFO)
        root.setLevel(logging.INFO)

        traj.f_load(load_data=3)
        service_logger.setLevel(old_level)
        root.setLevel(old_level)

        path = get_log_path(traj)
        mainfilename = os.path.join(path, 'LOG.txt')
        with open(mainfilename, mode='r') as mainf:
            full_text = mainf.read()
            self.assertTrue('nodes/s)' in full_text)

        env.f_disable_logging()
def main():
    # Let's be very verbose!
    logging.basicConfig(level = logging.INFO)


    # Let's do multiprocessing this time with a lock (which is default)
    filename = os.path.join('hdf5', 'example_07.hdf5')
    env = Environment(trajectory='Example_07_BRIAN',
                      filename=filename,
                      file_title='Example_07_Brian',
                      comment = 'Go Brian!',
                      dynamically_imported_classes=[BrianMonitorResult, BrianParameter],
                      multiproc=True,
                      wrap_mode='QUEUE',
                      ncores=2)

    traj = env.v_trajectory

    # 1st a) add the parameters
    add_params(traj)

    # 1st b) prepare, we want to explore the different network sizes and different tauw time scales
    traj.f_explore(cartesian_product({traj.f_get('N').v_full_name:[50,60],
                           traj.f_get('tauw').v_full_name:[30*ms,40*ms]}))

    # 2nd let's run our experiment
    env.f_run(run_net)

    # You can take a look at the results in the hdf5 file if you want!

    # Finally disable logging and close all log-files
    env.f_disable_logging()
예제 #6
0
def main():
    filename = os.path.join('hdf5', 'Clustered_Network.hdf5')
    env = Environment(trajectory='Clustered_Network',
                      add_time=False,
                      filename=filename,
                      continuable=False,
                      lazy_debug=False,
                      multiproc=True,
                      ncores=2,
                      use_pool=False, # We cannot use a pool, our network cannot be pickled
                      wrap_mode='QUEUE',
                      overwrite_file=True)

    #Get the trajectory container
    traj = env.v_trajectory

    # We introduce a `meta` parameter that we can use to easily rescale our network
    scale = 0.5  # To obtain the results from the paper scale this to 1.0
    # Be aware that your machine will need a lot of memory then!
    traj.f_add_parameter('simulation.scale', scale,
            comment='Meta parameter that can scale default settings. '
                    'Rescales number of neurons and connections strenghts, but '
                    'not the clustersize.')


    # We create a Manager and pass all our components to the Manager.
    # Note the order, CNNeuronGroups are scheduled before CNConnections,
    # and the Fano Factor computation depends on the CNMonitorAnalysis
    clustered_network_manager = NetworkManager(network_runner=CNNetworkRunner(),
                                component_list=(CNNeuronGroup(), CNConnections()),
                                analyser_list=(CNMonitorAnalysis(),CNFanoFactorComputer()))




    # Add original parameters (but scaled according to `scale`)
    clustered_network_manager.add_parameters(traj)

    # We need `tolist` here since our parameter is a python float and not a
    # numpy float.
    explore_list = np.arange(1.0, 2.6, 0.2).tolist()
    # Explore different values of `R_ee`
    traj.f_explore({'R_ee' : explore_list})

    # Pre-build network components
    clustered_network_manager.pre_build(traj)


    # Run the network simulation
    traj.f_store() # Let's store the parameters already before the run
    env.f_run(clustered_network_manager.run_network)

    # Finally disable logging and close all log-files
    env.f_disable_logging()
예제 #7
0
def main():
    filename = os.path.join('hdf5', 'Clustered_Network.hdf5')
    env = Environment(trajectory='Clustered_Network',
                      add_time=False,
                      filename=filename,
                      continuable=False,
                      lazy_debug=False,
                      multiproc=True,
                      ncores=2,
                      use_pool=False, # We cannot use a pool, our network cannot be pickled
                      wrap_mode='QUEUE',
                      overwrite_file=True)

    #Get the trajectory container
    traj = env.v_trajectory

    # We introduce a `meta` parameter that we can use to easily rescale our network
    scale = 1.0  # To obtain the results from the paper scale this to 1.0
    # Be aware that your machine will need a lot of memory then!
    traj.f_add_parameter('simulation.scale', scale,
            comment='Meta parameter that can scale default settings. '
                    'Rescales number of neurons and connections strenghts, but '
                    'not the clustersize.')


    # We create a Manager and pass all our components to the Manager.
    # Note the order, CNNeuronGroups are scheduled before CNConnections,
    # and the Fano Factor computation depends on the CNMonitorAnalysis
    clustered_network_manager = NetworkManager(network_runner=CNNetworkRunner(),
                                component_list=(CNNeuronGroup(), CNConnections()),
                                analyser_list=(CNMonitorAnalysis(),CNFanoFactorComputer()))




    # Add original parameters (but scaled according to `scale`)
    clustered_network_manager.add_parameters(traj)

    # We need `tolist` here since our parameter is a python float and not a
    # numpy float.
    explore_list = np.arange(1.0, 2.6, 0.2).tolist()
    # Explore different values of `R_ee`
    traj.f_explore({'R_ee' : explore_list})

    # Pre-build network components
    clustered_network_manager.pre_build(traj)


    # Run the network simulation
    traj.f_store() # Let's store the parameters already before the run
    env.f_run(clustered_network_manager.run_network)

    # Finally disable logging and close all log-files
    env.f_disable_logging()
예제 #8
0
class LinkMergeTest(TrajectoryComparator):

    tags = 'integration', 'hdf5', 'environment', 'links', 'merge'

    def test_merge_with_linked_derived_parameter(self, disable_logging=True):
        logging.basicConfig(level=logging.ERROR)

        self.logfolder = make_temp_dir(
            os.path.join('experiments', 'tests', 'Log'))

        random.seed()
        self.trajname1 = 'T1' + make_trajectory_name(self)
        self.trajname2 = 'T2' + make_trajectory_name(self)
        self.filename = make_temp_dir(
            os.path.join('experiments', 'tests', 'HDF5',
                         'test%s.hdf5' % self.trajname1))

        self.env1 = Environment(trajectory=self.trajname1,
                                filename=self.filename,
                                file_title=self.trajname1,
                                log_stdout=False,
                                log_config=get_log_config())
        self.env2 = Environment(trajectory=self.trajname2,
                                filename=self.filename,
                                file_title=self.trajname2,
                                log_stdout=False,
                                log_config=get_log_config())

        self.traj1 = self.env1.v_trajectory
        self.traj2 = self.env2.v_trajectory

        create_link_params(self.traj1)
        create_link_params(self.traj2)

        explore_params(self.traj1)
        explore_params2(self.traj2)

        self.traj1.f_add_derived_parameter('test.$.gg', 42)
        self.traj2.f_add_derived_parameter('test.$.gg', 44)

        self.traj1.f_add_derived_parameter('test.hh.$', 111)
        self.traj2.f_add_derived_parameter('test.hh.$', 53)

        self.env1.f_run(dostuff_and_add_links)
        self.env2.f_run(dostuff_and_add_links)

        old_length = len(self.traj1)

        self.traj1.f_merge(self.traj2, remove_duplicates=True)

        self.traj1.f_load(load_data=2)

        for run in self.traj1.f_get_run_names():
            self.traj1.v_crun = run
            idx = self.traj1.v_idx
            param = self.traj1['test.crun.gg']
            if idx < old_length:
                self.assertTrue(param == 42)
            else:
                self.assertTrue(param == 44)

            param = self.traj1['test.hh.crun']
            if idx < old_length:
                self.assertTrue(param == 111)
            else:
                self.assertTrue(param == 53)

        self.assertTrue(len(self.traj1) > old_length)

        for irun in range(len(self.traj1.f_get_run_names())):
            self.assertTrue(self.traj1.res['r_%d' % irun] == self.traj1.paramB)
            self.assertTrue(
                self.traj1.res.runs['r_%d' % irun].paraBL == self.traj1.paramB)

        if disable_logging:
            self.env1.f_disable_logging()
            self.env2.f_disable_logging()

        return old_length

    def test_remerging(self):
        prev_old_length = self.test_merge_with_linked_derived_parameter(
            disable_logging=False)

        name = self.traj1

        self.bfilename = make_temp_dir(
            os.path.join('experiments', 'tests', 'HDF5',
                         'backup_test%s.hdf5' % self.trajname1))

        self.traj1.f_load(load_data=2)

        self.traj1.f_backup(backup_filename=self.bfilename)

        self.traj3 = load_trajectory(index=-1,
                                     filename=self.bfilename,
                                     load_all=2)

        old_length = len(self.traj1)

        self.traj1.f_merge(self.traj3, backup=False, remove_duplicates=False)

        self.assertTrue(len(self.traj1) > old_length)

        self.traj1.f_load(load_data=2)

        for run in self.traj1.f_get_run_names():
            self.traj1.v_crun = run
            idx = self.traj1.v_idx
            param = self.traj1['test.crun.gg']
            if idx < prev_old_length or old_length <= idx < prev_old_length + old_length:
                self.assertTrue(param == 42, '%s != 42' % str(param))
            else:
                self.assertTrue(param == 44, '%s != 44' % str(param))

            param = self.traj1['test.hh.crun']
            if idx < prev_old_length or old_length <= idx < prev_old_length + old_length:
                self.assertTrue(param == 111, '%s != 111' % str(param))
            else:
                self.assertTrue(param == 53, '%s != 53' % str(param))

        self.assertTrue(len(self.traj1) > old_length)

        for irun in range(len(self.traj1.f_get_run_names())):
            self.assertTrue(
                self.traj1.res.runs['r_%d' % irun].paraBL == self.traj1.paramB)
            self.assertTrue(self.traj1.res['r_%d' % irun] == self.traj1.paramB)

        self.env1.f_disable_logging()
        self.env2.f_disable_logging()
예제 #9
0
class LinkMergeTest(TrajectoryComparator):

    tags = 'integration', 'hdf5', 'environment', 'links', 'merge'

    def test_merge_with_linked_derived_parameter(self, disable_logging = True):
        logging.basicConfig(level = logging.ERROR)


        self.logfolder = make_temp_dir(os.path.join('experiments',
                                                      'tests',
                                                      'Log'))

        random.seed()
        self.trajname1 = 'T1'+ make_trajectory_name(self)
        self.trajname2 = 'T2'+make_trajectory_name(self)
        self.filename = make_temp_dir(os.path.join('experiments',
                                                    'tests',
                                                    'HDF5',
                                                    'test%s.hdf5' % self.trajname1))

        self.env1 = Environment(trajectory=self.trajname1, filename=self.filename,
                          file_title=self.trajname1,

                          log_stdout=False, log_config=get_log_config())
        self.env2 = Environment(trajectory=self.trajname2, filename=self.filename,
                          file_title=self.trajname2,

                          log_stdout=False, log_config=get_log_config())

        self.traj1 = self.env1.v_trajectory
        self.traj2 = self.env2.v_trajectory

        create_link_params(self.traj1)
        create_link_params(self.traj2)

        explore_params(self.traj1)
        explore_params2(self.traj2)

        self.traj1.f_add_derived_parameter('test.$.gg', 42)
        self.traj2.f_add_derived_parameter('test.$.gg', 44)

        self.traj1.f_add_derived_parameter('test.hh.$', 111)
        self.traj2.f_add_derived_parameter('test.hh.$', 53)

        self.env1.f_run(dostuff_and_add_links)
        self.env2.f_run(dostuff_and_add_links)

        old_length = len(self.traj1)

        self.traj1.f_merge(self.traj2, remove_duplicates=True)

        self.traj1.f_load(load_data=2)

        for run in self.traj1.f_get_run_names():
            self.traj1.v_crun = run
            idx = self.traj1.v_idx
            param = self.traj1['test.crun.gg']
            if idx < old_length:
                self.assertTrue(param == 42)
            else:
                self.assertTrue(param == 44)

            param = self.traj1['test.hh.crun']
            if idx < old_length:
                self.assertTrue(param == 111)
            else:
                self.assertTrue(param == 53)

        self.assertTrue(len(self.traj1) > old_length)

        for irun in range(len(self.traj1.f_get_run_names())):
            self.assertTrue(self.traj1.res['r_%d' % irun] == self.traj1.paramB)
            self.assertTrue(self.traj1.res.runs['r_%d' % irun].paraBL == self.traj1.paramB)

        if disable_logging:
            self.env1.f_disable_logging()
            self.env2.f_disable_logging()

        return old_length

    def test_remerging(self):
        prev_old_length = self.test_merge_with_linked_derived_parameter(disable_logging=False)

        name = self.traj1

        self.bfilename = make_temp_dir(os.path.join('experiments',
                                                     'tests',
                                                     'HDF5',
                                                     'backup_test%s.hdf5' % self.trajname1))

        self.traj1.f_load(load_data=2)

        self.traj1.f_backup(backup_filename=self.bfilename)

        self.traj3 = load_trajectory(index=-1, filename=self.bfilename, load_all=2)

        old_length = len(self.traj1)

        self.traj1.f_merge(self.traj3, backup=False, remove_duplicates=False)

        self.assertTrue(len(self.traj1) > old_length)

        self.traj1.f_load(load_data=2)

        for run in self.traj1.f_get_run_names():
            self.traj1.v_crun = run
            idx = self.traj1.v_idx
            param = self.traj1['test.crun.gg']
            if idx < prev_old_length or old_length <= idx < prev_old_length + old_length:
                self.assertTrue(param == 42, '%s != 42' % str(param))
            else:
                self.assertTrue(param == 44, '%s != 44' % str(param))

            param = self.traj1['test.hh.crun']
            if idx < prev_old_length or old_length <= idx < prev_old_length + old_length:
                self.assertTrue(param == 111, '%s != 111' % str(param))
            else:
                self.assertTrue(param == 53, '%s != 53' % str(param))

        self.assertTrue(len(self.traj1)>old_length)

        for irun in range(len(self.traj1.f_get_run_names())):
            self.assertTrue(self.traj1.res.runs['r_%d' % irun].paraBL == self.traj1.paramB)
            self.assertTrue(self.traj1.res['r_%d' % irun] == self.traj1.paramB)

        self.env1.f_disable_logging()
        self.env2.f_disable_logging()