Пример #1
0
    def spawn_processes(self, start_idx, steps_from_start):
        """
        Spawns processes each dedicated to an instance of CxSystem.
        """
        print(
            " -  Following configurations are going to be simulated with "
            "%d processes using %s device (printed only in letters and numbers): "
            "\n %s" % (self.number_of_process, self.device,
                       str(self.final_namings).replace('_', '')))
        manager = multiprocessing.Manager()
        jobs = []
        working = manager.Value('i', 0)
        paths = manager.dict()
        # number_of_runs = len(self.final_messages) * self.trials_per_config
        self.final_metadata_df = self.final_metadata_df.loc[np.repeat(
            self.final_metadata_df.index.values,
            self.trials_per_config)].reset_index(drop=True)
        assert len(self.final_namings) < 1000, ' -  The array run is trying to run more than 1000 simulations, this is not allowed unless you' \
                                                ' REALLY want it and if you REALLY want it you should konw what to do.'
        # while len(jobs) < number_of_runs:
        while len(jobs) < steps_from_start:
            time.sleep(1.5)
            if working.value < self.number_of_process:
                idx = start_idx + len(jobs)
                p = multiprocessing.Process(target=self.run_parameter_search,
                                            args=(idx, working, paths,
                                                  self.array_run_stdout_file))
                jobs.append(p)
                p.start()
        for j in jobs:
            j.join()

        for item in list(paths.keys()):
            self.final_metadata_df['Full path'][item] = paths[item]
        write_to_file(
            os.path.join(os.path.dirname(paths[list(paths.keys())[0]]),
                         self.metadata_filename), self.final_metadata_df)
        print(" -  Array run metadata saved at: %s" %
              os.path.join(os.path.dirname(paths[list(paths.keys())[0]]),
                           self.metadata_filename))

        if self._is_running_locally() is True:
            tmp_folder_path = Path(
                parameter_finder(self.anatomy_df,
                                 'workspace_path')).expanduser().joinpath(
                                     '.tmp' + self.suffix).as_posix()
            print("cleaning tmp folders " + tmp_folder_path)
            shutil.rmtree(tmp_folder_path)
        elif self._is_running_in_cluster() is True:
            tmp_folder_path = Path(
                parameter_finder(self.anatomy_df,
                                 'cluster_workspace')).expanduser().joinpath(
                                     '.tmp' + self.suffix).as_posix()
            print("cleaning tmp folders " + tmp_folder_path)
            shutil.rmtree(tmp_folder_path)
Пример #2
0
 def save_connections_to_file(self):
     print(" -  Saving connections to file ...")
     self.connections['Full path'] = self.connections_export_path.as_posix()
     while self.connections_export_path.is_file():
         idx = 1
         self.connections_export_path = self.connections_export_path.parent.joinpath(
             self.connections_export_path.stem + '_{}'.format(idx) +
             self.connections_export_path.suffix)
         idx += 1
     write_to_file(self.connections_export_path.as_posix(),
                   self.connections)
Пример #3
0
 def save_results_to_file(self):
     print(" -  Saving results to file ...")
     self.results['Full path'] = self.results_export_path.as_posix()
     while self.results_export_path.is_file():
         idx = 1
         self.results_export_path = self.results_export_path.parent.joinpath(
             self.results_export_path.stem + '_{}'.format(idx) +
             self.results_export_path.suffix)
         idx += 1
     write_to_file(self.results_export_path.as_posix(), self.results)
     print(" -  The output of the simulation is saved at: {}".format(
         self.results_export_path))
    def run(self,
            ret,
            conditions,
            metadata,
            conditions_idx,
            conditions_metadata_idx_flat,
            n_trials=1,
            data_folder='',
            save_only_metadata=False):
        '''
        Unpack and run all conditions
        '''
        for this_key in metadata.keys():
            assert this_key in self.options.keys(
            ), 'Missing {this_key} in visual stimuli options, check stim param name'

        # Test data_folder, create if missing
        os.makedirs(data_folder, exist_ok=True)

        save_path = os.path.join(data_folder, 'metadata_conditions.gz')
        write_to_file(save_path, [
            metadata, conditions_idx, conditions_metadata_idx_flat,
            self.options
        ])

        if save_only_metadata:
            return

        # Replace with input options
        for idx, input_options in enumerate(conditions):
            self._replace_options(input_options)
            stim = vs.ConstructStimulus(**self.options)

            stim.save_to_file(
                filename=os.path.join(data_folder, 'Stim_' +
                                      conditions_idx[idx]))

            ret.load_stimulus(stim)

            example_gc = None  # int or 'None'

            filename = os.path.join(data_folder,
                                    'Response_' + conditions_idx[idx])

            ret.run_cells(cell_index=example_gc,
                          n_trials=n_trials,
                          visualize=False,
                          save_data=True,
                          spike_generator_model='poisson',
                          return_monitor=False,
                          filename=filename
                          )  # spike_generator_model='refractory' or 'poisson'
Пример #5
0
 def save_input_sequence(self, spike_mons, save_path):
     print(" -  Saving the generated video input...")
     self.generated_input_folder = save_path + self.output_file_extension
     data_to_save = {}
     for ii in range(len(spike_mons)):
         data_to_save['spikes_' + str(ii)] = []
         data_to_save['spikes_' + str(ii)].append(
             spike_mons[ii].it[0].__array__())
         data_to_save['spikes_' + str(ii)].append(
             spike_mons[ii].it[1].__array__())
     data_to_save['w_coord'] = self.w_coord
     data_to_save['z_coord'] = self.z_coord
     write_to_file(save_path + self.output_file_extension, data_to_save)
Пример #6
0
    def inter_os_metadata_mapper(self, metapath_pkl_download_fullfile):
        '''
        This checks if cluster_metadata pkl file contains local folders in the current system. If not, it assumes
        the .pkl file folder's (named cluster_run...) parent folder is the local workspace folder.

        '''

        cluster_metafile_dict = self.get_data(metapath_pkl_download_fullfile)

        # Check whether current folder match cluster_metafile_dict['local workspace']
        pkl_path = PurePath(metapath_pkl_download_fullfile)
        local_workspace = pkl_path.parents[1].as_posix()
        if local_workspace == cluster_metafile_dict['local_workspace']:
            print('local_workspace matches cluster_metadata.pkl local_workspace, no need to update')
        else: # If not, backup cluster_metadata.pkl local workspace folder and check and set local workspace
            cluster_metafile_dict['local_workspace_backup'] = cluster_metafile_dict['local_workspace']
            cluster_metafile_dict['local_workspace'] = local_workspace
            cluster_metafile_dict['local_workspace_unexpanded_backup'] = cluster_metafile_dict['local_workspace']
            cluster_metafile_dict['local_workspace_unexpanded'] = local_workspace

        local_cluster_run_download_folder = pkl_path.parents[0].joinpath('downloads').as_posix()
        local_cluster_run_folder = pkl_path.parents[0].as_posix()
        if local_cluster_run_download_folder == cluster_metafile_dict['local_cluster_run_download_folder']:
            print('local_cluster_run_download_folder matches cluster_metadata.pkl local_cluster_run_download_folder, no need to update')
        else: # If not, backup and set local 
            cluster_metafile_dict['local_cluster_run_download_folder_backup'] = cluster_metafile_dict['local_cluster_run_download_folder']
            cluster_metafile_dict['local_cluster_run_download_folder'] = local_cluster_run_download_folder
            cluster_metafile_dict['local_cluster_run_folder_backup'] = cluster_metafile_dict['local_cluster_run_folder']
            cluster_metafile_dict['local_cluster_run_folder'] = local_cluster_run_folder

        backup_project_data_folder = cluster_metafile_dict['project_data_folder']
        project_folder_name = PurePath(backup_project_data_folder).parts[-1]
        local_project_data_folder = Path(local_workspace).joinpath(project_folder_name).as_posix()
        cluster_metafile_dict['project_data_folder_backup'] = cluster_metafile_dict['project_data_folder']
        cluster_metafile_dict['project_data_folder'] = local_project_data_folder

        # save tranformed cluster_metadata file
        write_to_file(metapath_pkl_download_fullfile, cluster_metafile_dict)
Пример #7
0
    def cluster_metadata_compiler_and_data_transfer(self, metapath_pkl_download_fullfile):
        '''
        Cluster metadata comes in partial files metadata_part_1... etc.
        This method combines these parts to a single metadata file for analysis and visualization

        It also adds folder with name corresponding to "simulation title" parameter in the anatomy csv
        metapath_pkl_download_fullfile is full path to .pkl file containing global metadata about the cluster run.
        It should be in the downloads folder after downloading the results from cluster.
        '''

        metafile_master_dict = self.get_data(metapath_pkl_download_fullfile)
        local_cluster_run_download_folder = metafile_master_dict['local_cluster_run_download_folder']
        metapathfiles = os.listdir(local_cluster_run_download_folder)
        metafiles_list = [f for f in metapathfiles if 'metadata_part' in f]
        metafiles_fullfile_list = []
        for this_file in metafiles_list:
            metafiles_fullfile_list.append(os.path.join(local_cluster_run_download_folder, this_file))
        
        # Read first metadata file to df
        metafile_cluster_paths_df = self.get_data(metafiles_fullfile_list[0])
        
        # Go through all files in the list
        for this_file in metafiles_fullfile_list[1:]:
            this_df = self.get_data(this_file)
            # Put not nan values from ['Full path'] column to metafile_cluster_paths_df
            notna_idx = this_df['Full path'].notna().values
            metafile_cluster_paths_df['Full path'][notna_idx] = this_df['Full path'][notna_idx]

        # Get simulation folder name, a.k.a. condition, from metafile_master_dict['cluster_simulation_folder']
        cluster_simulation_folder = metafile_master_dict['cluster_simulation_folder']
        simulation_folder_name = os.path.split(cluster_simulation_folder)[1]

        # Change cluster_simulation_folder root to final local_workspace
        path_to_remove = Path(os.path.split(cluster_simulation_folder)[0])
        path_to_paste = Path(metafile_master_dict['local_workspace'])
        metafile_local_paths_df = copy.deepcopy(metafile_cluster_paths_df)
        # Change paths in-place
        metafile_local_paths_df['Full path'] = metafile_local_paths_df['Full path'].str.replace(pat=path_to_remove.as_posix(), repl=path_to_paste.as_posix(), regex=True)

        # Create local repo
        local_simulation_folder = path_to_paste.joinpath(simulation_folder_name)
        local_simulation_folder.mkdir(exist_ok=True)

        # Save compiled metadata file to final local repo
        meta_file_name = f'metadata_{metafile_master_dict["suffix"]}.gz'
        meta_fullpath_out = os.path.join(path_to_paste, simulation_folder_name, meta_file_name)
        write_to_file(meta_fullpath_out, metafile_local_paths_df)

        # Move relevant files to final local repo
        local_download_folder = metafile_master_dict['local_cluster_run_download_folder']
        path_to_paste_download = Path(local_download_folder)
        local_download_paths_S = copy.deepcopy(metafile_cluster_paths_df['Full path'])
        local_download_paths_S = local_download_paths_S.str.replace(pat=path_to_remove.joinpath(simulation_folder_name).as_posix(), repl=path_to_paste_download.as_posix(), regex=True)
        local_final_paths_S = copy.deepcopy(metafile_local_paths_df['Full path'])

        # Loop filenames and replace addresses with Path(). This will move the files to final position without copying them
        try:
            [Path(dp).replace(fp) for dp, fp in zip(local_download_paths_S.values,  local_final_paths_S)]
        except FileNotFoundError:
            print('Did not find data files, maybe they were already moved. Continuing...')

        # Update master metadata file, write new metadatafile to project folder
        metafile_master_dict['project_data_folder'] = local_simulation_folder.as_posix()
        metadata_pkl_filename = os.path.split(metapath_pkl_download_fullfile)[1]
        metapath_pkl_final_fullfile = local_simulation_folder.joinpath(metadata_pkl_filename)
        write_to_file(metapath_pkl_final_fullfile, metafile_master_dict)
        
        # Loop anat and phys and replace addresses with Path(). This will move the files to final position without copying them
        allfiles = os.listdir(local_cluster_run_download_folder)
        anat_phys_filename_list = [f for f in allfiles if 'anat' in f or 'phys' in f]        
        anat_phys_download_fullfile_list = []
        anat_phys_final_fullfile_list = []
        for this_file in anat_phys_filename_list:
            anat_phys_download_fullfile_list.append(os.path.join(local_cluster_run_download_folder, this_file))
            anat_phys_final_fullfile_list.append(os.path.join(local_simulation_folder.as_posix(), this_file))

        try:
            [Path(dp).replace(fp) for dp, fp in zip(anat_phys_download_fullfile_list,  anat_phys_final_fullfile_list)]
        except FileNotFoundError:
            print('Did not find anat & phys files, maybe they were already moved. Continuing...')        
    def replace_connections(self,
                            show_histograms=False,
                            constant_scaling=False,
                            constant_value=1e-9,
                            randomize_connections_list=[]):
        '''
        After creating a CxSystem neural system with correct cell numbers and random connectivity, here we assign 
        precomputed connection weights to this system. 
        '''

        mat_data_dict = self.get_data(self.workspace_deneve_filename)
        connection_skeleton_dict = self.get_data(
            self.connection_skeleton_filename_in)

        mat_keys = ['FsE', 'CsEE', 'CsEI', 'CsIE', 'CsII', 'DecsE', 'DecsI']
        mat_data_dict_keys_str = str(mat_data_dict.keys())

        assert all([x in mat_data_dict_keys_str
                    for x in mat_keys]), 'Some mat keys not found, aborting...'

        match_connection_names = {
            'relay_vpm__to__L4_CI_SS_L4_soma': 'FsE',
            'L4_CI_SS_L4__to__L4_CI_SS_L4_soma': 'CsEE',
            'L4_CI_SS_L4__to__L4_CI_BC_L4_soma': 'CsEI',
            'L4_CI_BC_L4__to__L4_CI_SS_L4_soma': 'CsIE',
            'L4_CI_BC_L4__to__L4_CI_BC_L4_soma': 'CsII',
            'L4_CI_SS_L4__to__L4_SS_L4_soma': 'DecsE',
            'L4_CI_BC_L4__to__L4_SS_L4_soma': 'DecsI'
        }

        # which phase of learned connections to select. 29 = after teaching
        mat_teach_idx = 28
        connection_final_dict = connection_skeleton_dict

        # We need to turn Deneve's negative inhibitory connections to positive for CxSystem
        # These connections are fed to gi which has no driving force, because they are I_NDF type.
        # There the conductance itself is negative, which is necessary if we want inhibition
        # without driving force. The DecsI has both negative and positive connection strengths
        # (optimized for decoding in Deneve's code).
        inh_keys = ['CsIE', 'CsII', 'DecsI']

        for this_connection in match_connection_names.keys():
            # Get cxsystem connection strengths (i.e. Physiology parameters J, J_I, k*J or k_I*J_I
            # multiplied by n synapses/connection)
            data_cx = connection_skeleton_dict[this_connection]['data']
            # Get mat_data connection strengths. Transpose because unintuitively (post,pre), except for FsE
            data_mat = mat_data_dict[match_connection_names[this_connection]][
                mat_teach_idx, :, :].T
            if match_connection_names[
                    this_connection] == 'FsE':  # FsE is the only (pre,post) in matlab code (SIC!)
                data_mat = data_mat.T

            assert data_mat.shape == data_cx.shape, 'Connection shape mismatch, aborting...'

            # Scale mat_data to min and max values of cxsystem connection strengths (excluding zeros)
            # In constant scaling, just scale with constant_value without any other transformations
            if constant_scaling is False:
                data_out = self.scale_values(data_mat,
                                             target_data=data_cx,
                                             skip_under_zeros_in_scaling=False)
            elif constant_scaling is True:
                data_out = self.scale_with_constant(
                    data_mat, constant_value=constant_value)

            # Turn Deneve's negative inhibitory connections to positive for CxSystem
            if match_connection_names[this_connection] in inh_keys:
                data_out = data_out * -1

            # Randomize by request for control conditions
            if match_connection_names[
                    this_connection] in randomize_connections_list:
                rng = np.random.default_rng()
                # Randomly permute post connections separately for each pre unit.
                data_out = rng.permuted(data_out, axis=1)

            # viz by request
            if show_histograms is True:
                self._show_histogram(data_cx,
                                     figure_title=this_connection,
                                     skip_under_one_pros=False)
                self._show_histogram(
                    data_mat,
                    figure_title=match_connection_names[this_connection],
                    skip_under_one_pros=False)
                # L4_BC_L4__to__L4_CI_SS_L4_soma_out
                self._show_histogram(data_out,
                                     figure_title=this_connection + '_out',
                                     skip_under_one_pros=False)
                plt.show()

            # return scaled values
            connection_final_dict[this_connection]['data'] = scprs.csr_matrix(
                data_out)
            connection_final_dict[this_connection]['n'] = 1

        savepath = os.path.join(self.path, self.connection_filename_out)
        write_to_file(savepath, connection_final_dict)