Ejemplo n.º 1
0
def run_experiments(model,experiment_list,parameters,load_from=None):
    """
    This is function called by :func:.run_workflow that executes the experiments in the `experiment_list` over the model. 
    Alternatively, if load_from is specified it will load an existing simulation from the path specified in load_from.
    
    Parameters
    ----------
    
    model : Model
          The model to execute experiments on.
    
    experiment_list : list
          The list of experiments to execute.
    
    parameters : ParameterSet
               The parameters given to the simulation run.
          
    load_from : str
              If not None it will load the simulation from the specified directory.
              
    Returns
    -------
    
    data_store : DataStore
               The data store containing the recordings.
    """
    
    # first lets run all the measurements required by the experiments
    logger.info('Starting Experiemnts')
    if load_from == None:
        data_store = PickledDataStore(load=False,
                                      parameters=MozaikExtendedParameterSet({'root_directory': Global.root_directory,'store_stimuli' : parameters.store_stimuli}))
    else: 
        data_store = PickledDataStore(load=True,
                                      parameters=MozaikExtendedParameterSet({'root_directory': load_from,'store_stimuli' : parameters.store_stimuli}))
    
    data_store.set_neuron_ids(model.neuron_ids())
    data_store.set_neuron_positions(model.neuron_positions())
    data_store.set_neuron_annotations(model.neuron_annotations())
    
    t0 = time.time()
    simulation_run_time=0
    for i,experiment in enumerate(experiment_list):
        logger.info('Starting experiment: ' + experiment.__class__.__name__)
        stimuli = experiment.return_stimuli()
        unpresented_stimuli = data_store.identify_unpresented_stimuli(stimuli)
        logger.info('Running model')
        simulation_run_time += experiment.run(data_store,unpresented_stimuli)
        logger.info('Experiment %d/%d finished' % (i+1,len(experiment_list)))
    
    total_run_time = time.time() - t0
    mozaik_run_time = total_run_time - simulation_run_time
    
    logger.info('Total simulation run time: %.0fs' % total_run_time)
    logger.info('Simulator run time: %.0fs (%d%%)' % (simulation_run_time, int(simulation_run_time /total_run_time * 100)))
    logger.info('Mozaik run time: %.0fs (%d%%)' % (mozaik_run_time, int(mozaik_run_time /total_run_time * 100)))
    
    return data_store
Ejemplo n.º 2
0
def load_fixed_parameter_set_parameter_search(simulation_name,
                                              master_results_dir,
                                              filter=None):
    """
    Loads all datastores of parameter search over a fixed set of parameters. 
    
    Parameters
    ----------
    simulation_name : str
                    The name of the simulation.
    master_results_dir : str
                       The directory where the parameter search results are stored.
    
    Returns
    -------
    A tuple (parameters,datastores), where `parameters` is a list of parameters over which the parameter search was performed.
    The dsvs is a list of tuples (values,datastore) where `values` is a list of values (in the order as im `parameters`) of the
    parameters, and dsv is a DataStore with results recorded to the combination of parameter values.
    """
    f = open(master_results_dir + '/parameter_combinations', 'rb')
    combinations = pickle.load(f)
    f.close()

    # first check whether all parameter combinations contain the same parameter names
    assert len(
        set([tuple(set(comb.keys())) for comb in combinations])
    ) == 1, "The parameter search didn't occur over a fixed set of parameters"

    parameters = combinations[0].keys()

    datastore = []
    number_of_unloadable_datastores = 0
    for i, combination in enumerate(combinations):
        print i
        rdn = result_directory_name('ParameterSearch', simulation_name,
                                    combination)
        try:
            data_store = PickledDataStore(load=True,
                                          parameters=ParameterSet({
                                              'root_directory':
                                              master_results_dir + '/' + rdn,
                                              'store_stimuli':
                                              False
                                          }),
                                          replace=False)
            if filter != None:
                filter.query(data_store).remove_ads_outside_of_dsv()

            datastore.append(
                ([combination[k] for k in parameters], data_store))
        except IOError:
            number_of_unloadable_datastores = number_of_unloadable_datastores + 1
            print "Error loading datastore: " + rdn

    return (parameters, datastore, number_of_unloadable_datastores)
Ejemplo n.º 3
0
def exportToElphy(data_store_location,elphy_export_location,sheets=None,threshold=None):
    import os.path
    if not os.path.isdir(elphy_export_location):
       if os.path.exists(elphy_export_location):
          raise ValueError("The elphy export path is not a directory")
       else:
          os.makedirs(elphy_export_location)
              
    setup_logging()
    data_store = PickledDataStore(load=True,parameters=ParameterSet({'root_directory':data_store_location, 'store_stimuli' : False}))
    ps = MP.parameter_value_list([MP.MozaikParametrized.idd(s) for s in data_store.get_stimuli()],'name')
    for i,sn in enumerate(ps):
        if sheets == None: sheets = data_store.sheets() 
        
        for shn in sheets:
            dsv = param_filter_query(data_store,st_name = sn,sheet_name = shn)
            if dsv.get_stimuli() == []: continue
            varying_parameters = MP.varying_parameters([MP.MozaikParametrized.idd(s) for s in dsv.get_stimuli()])
            
            segments,stimuli = MP.colapse(dsv.get_segments(),[MP.MozaikParametrized.idd(s) for s in dsv.get_stimuli()],parameter_list=['trial'],allow_non_identical_objects=True)
            j = 0 
            for segs,st in zip(segments,stimuli):
                # just make sure all segments are fully loaded, in future this should probably soreted out such that this line can be deleted
                for s in segs: s.load_full()
                
                # create file name:
                filename = "name=" + sn + "#" + "sheet_name=" + shn 
                for pn in varying_parameters:
                    if pn != "trial":
                        filename += "#" + str(pn) + "=" + str(getattr(MP.MozaikParametrized.idd(st),pn)) 
                path = os.path.join(elphy_export_location,filename+".dat")
                
                # if the threshold is defined add spikes into Vms
                if threshold != None:
                    for seg in segs : addSpikes(seg,threshold)
                    
                createFileFromSegmentList( segs, path)
                print "Finished saving file %d/%d for sheet %s and %d-th stimulus" % (j+1,len(segments),shn,i)
                # release segments from memory
                for s in segs: s.release()
                j = j + 1
        print "Finished saving %d/%d stimulus" % (i+1,len(ps))
Ejemplo n.º 4
0
    def __init__(self, path, name="Spontaneous activity of V1"):
        """ path should be a string containing  the path of the files containing the results of the simulation of the model
		"""
        self.data_store = PickledDataStore(load=True,
                                           parameters=ParameterSet({
                                               'root_directory':
                                               path,
                                               'store_stimuli':
                                               False
                                           }),
                                           replace=True)

        self.data_store_spont = param_filter_query(
            self.data_store,
            st_direct_stimulation_name=None,
            st_name="InternalStimulus")
        super(ModelV1Spont, self).__init__(name=name)
Ejemplo n.º 5
0
    def load_datastore(base_dir):
        """
        Load PickledDataStore for reading.

        Parameters
        ----------
        base_dir : base directory where DataStore files are saved

        Returns
        -------
        PickledDataStore with the data from base_dir
        """
        return PickledDataStore(
            load=True,
            parameters=ParameterSet(
                {"root_directory": base_dir, "store_stimuli": False}
            ),
            replace=False,
        )
Ejemplo n.º 6
0
def run_experiments(model,experiment_list):
    # first lets run all the measurements required by the experiments
    print 'Starting Experiemnts'
    data_store = PickledDataStore(load=False,parameters=ParameterSet({'root_directory':Global.root_directory}))
    data_store.set_neuron_positions(model.neuron_positions())
    data_store.set_neuron_annotations(model.neuron_annotations())
    
    for i,experiment in enumerate(experiment_list):
        print 'Starting experiment: ', experiment.__class__.__name__
        stimuli = experiment.return_stimuli()
        unpresented_stimuli = data_store.identify_unpresented_stimuli(stimuli)
        print 'Running model'
        experiment.run(data_store,unpresented_stimuli)
        print 'Experiment %d/%d finished' % (i,len(experiment_list))
        
    return data_store
Ejemplo n.º 7
0
    MPI = None
if MPI:
    mpi_comm = MPI.COMM_WORLD
MPI_ROOT = 0

logger = mozaik.getMozaikLogger()

if True:
    data_store, model = run_workflow('FFI', PushPullCCModel,
                                     create_experiments)
    #model.connectors['V1L4ExcL4ExcConnection'].store_connections(data_store)
    #model.connectors['V1L4ExcL4InhConnection'].store_connections(data_store)
    #model.connectors['V1L4InhL4ExcConnection'].store_connections(data_store)
    #model.connectors['V1L4InhL4InhConnection'].store_connections(data_store)
    #model.connectors['V1AffConnection'].store_connections(data_store)
    #model.connectors['V1AffInhConnection'].store_connections(data_store)

else:
    setup_logging()
    data_store = PickledDataStore(
        load=True,
        parameters=ParameterSet({
            'root_directory': 'FFI_combined-high_resolution_3000_21_____',
            'store_stimuli': False
        }),
        replace=True)
    logger.info('Loaded data store')

if mpi_comm.rank == MPI_ROOT:
    perform_analysis_and_visualization(data_store)
Ejemplo n.º 8
0
from mozaik.framework.experiment_controller import run_experiments, setup_experiments, setup_logging
from mozaik.visualization.plotting import *
from mozaik.visualization.MRfig import *
from mozaik.analysis.analysis import *
from mozaik.analysis.technical import NeuronAnnotationsToPerNeuronValues
from mozaik.visualization.Kremkow_plots import *
from mozaik.storage.datastore import Hdf5DataStore,PickledDataStore
from parameters import ParameterSet
from mozaik.storage.queries import *
from mozaik.tools.circ_stat import circular_dist
import mozaik

logger = mozaik.getMozaikLogger("Mozaik")

setup_logging()
data_store = PickledDataStore(load=True,parameters=ParameterSet({'root_directory':'ST'}))
logger.info('Loaded data store')
 
NeuronAnnotationsToPerNeuronValues(data_store,ParameterSet({})).analyse()
# find neuron with preference closet to 0  

analog_indexes = param_filter_query(data_store,sheet_name="V1_Exc_L4").get_segments()[0].get_stored_isyn_ids()
analog_indexes_inh = param_filter_query(data_store,sheet_name="V1_Inh_L4").get_segments()[0].get_stored_isyn_ids()

#find neuron with preference closet to 0  
NeuronAnnotationsToPerNeuronValues(data_store,ParameterSet({})).analyse()
l4_exc_or = data_store.get_analysis_result(identifier='PerNeuronValue',value_name = 'LGNAfferentOrientation', sheet_name = 'V1_Exc_L4')
l4_exc_phase = data_store.get_analysis_result(identifier='PerNeuronValue',value_name = 'LGNAfferentPhase', sheet_name = 'V1_Exc_L4')
l4_exc = analog_indexes[numpy.argmin([circular_dist(o,numpy.pi/2,numpy.pi)  for (o,p) in zip(l4_exc_or[0].get_value_by_id(analog_indexes),l4_exc_phase[0].get_value_by_id(analog_indexes))])]
l4_inh_or = data_store.get_analysis_result(identifier='PerNeuronValue',value_name = 'LGNAfferentOrientation', sheet_name = 'V1_Inh_L4')
l4_inh_phase = data_store.get_analysis_result(identifier='PerNeuronValue',value_name = 'LGNAfferentPhase', sheet_name = 'V1_Inh_L4')
Ejemplo n.º 9
0
from parameters import ParameterSet

#mpi_comm = MPI.COMM_WORLD
logger = mozaik.getMozaikLogger()
simulation_name = "VogelsAbbott2005"
simulation_run_name, _, _, _, modified_parameters = parse_workflow_args()

if True:
    data_store,model = run_workflow(simulation_name,VogelsAbbott,create_experiments)
    model.connectors['ExcExcConnection'].store_connections(data_store)    
else: 
    setup_logging()
    data_store = PickledDataStore(
        load=True,
        parameters=ParameterSet(
            {
                "root_directory": result_directory_name(
                    simulation_run_name, simulation_name, modified_parameters
                ),
                "store_stimuli": False,
            }
        ),
        replace=True,
    )
    logger.info('Loaded data store')

#if mpi_comm.rank == 0:
print("Starting visualization")
perform_analysis_and_visualization(data_store)
data_store.save() 
Ejemplo n.º 10
0
logger = mozaik.getMozaikLogger()

if False:
    data_store, model = run_workflow('FFI', PushPullCCModel,
                                     create_experiments)
    #jens_model.connectors['ON_to_[V1_Exc_L4]'].store_connections(data_store)
    #jens_model.connectors['OFF_to_[V1_Exc_L4]'].store_connections(data_store)
    #jens_model.connectors['ON_to_[V1_Inh_L4]'].store_connections(data_store)
    #jens_model.connectors['OFF_to_[V1_Inh_L4]'].store_connections(data_store)
    #jens_model.connectors['V1L4ExcL4ExcConnection'].store_connections(data_store)
    #jens_model.connectors['V1L4ExcL4InhConnection'].store_connections(data_store)
    #jens_model.connectors['V1L4InhL4ExcConnection'].store_connections(data_store)
    #jens_model.connectors['V1L4InhL4InhConnection'].store_connections(data_store)
    #jens_model.connectors['V1ExcL23ExcL23Connection'].store_connections(data_store)
    #jens_model.connectors['V1ExcL23InhL23Connection'].store_connections(data_store)
    #jens_model.connectors['V1InhL23ExcL23Connection'].store_connections(data_store)
    #jens_model.connectors['V1InhL23InhL23Connection'].store_connections(data_store)
    #jens_model.connectors['V1ExcL4ExcL23Connection'].store_connections(data_store)

else:
    setup_logging()
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet(
                                      {'root_directory': 'FFI_PP1_____'}),
                                  replace=True)
    logger.info('Loaded data store')

if mozaik.mpi_comm.rank == mozaik.MPI_ROOT:
    perform_analysis_and_visualization(data_store)
Ejemplo n.º 11
0
from parameters import ParameterSet

try:
    from mpi4py import MPI
except ImportError:
    MPI = None
if MPI:
    mpi_comm = MPI.COMM_WORLD
MPI_ROOT = 0

if True:
    data_store, model = run_workflow('SSCorrelationConnectivity',
                                     SSCorrelationConnectivity,
                                     create_experiments)
    data_store.save()
else:
    setup_logging()
    data_store = PickledDataStore(
        load=True,
        parameters=ParameterSet({
            'root_directory':
            '/home/jan/cluster/dev/pkg/mozaik/mozaik/contrib/SSCorrelationConn/20140313-113338[param_sd.defaults]CombinationParamSearch{7}/SSCorrelationConnectivity_ParameterSearch_____base_weight:0.00045_sigma:0.5_base_weight:0.0007_rand_struct_ratio:0.5_ExcInhAfferentRatio:1.0_base_weight:0.0007_gain:15.0',
            'store_stimuli': False
        }),
        replace=True)

if mpi_comm.rank == 0:
    print "Starting visualization"
    perform_analysis_and_visualization(data_store)
#   data_store.save()
Ejemplo n.º 12
0
                model.connectors['V1EffConnectionOn'].store_connections(
                    data_store)
                model.connectors['V1EffConnectionOff'].store_connections(
                    data_store)
            if withPGN and withFeedback_CxPGN:
                model.connectors['V1EffConnectionPGN'].store_connections(
                    data_store)

    data_store.save()
# or only load pickled data
else:
    setup_logging()
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      'ThalamoCorticalModel_data_____',
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    logger.info('Loaded data store')
    data_store.save()

# Analysis and Plotting
if mpi_comm.rank == MPI_ROOT:
    # perform_analysis_test( data_store )
    # perform_analysis_and_visualization( data_store, 'luminance', withPGN, withV1 )
    # perform_analysis_and_visualization( data_store, 'contrast', withPGN, withV1 )
    perform_analysis_and_visualization(data_store, 'spatial_frequency',
                                       withPGN, withV1)
    # perform_analysis_and_visualization( data_store, 'temporal_frequency', withPGN, withV1 )
    # perform_analysis_and_visualization( data_store, 'size', withPGN, withV1 )
Ejemplo n.º 13
0
def trial_averaged_raster(sheet,
                          folder,
                          stimulus,
                          parameter,
                          opposite=False,
                          box=None,
                          radius=None,
                          addon=""):
    print inspect.stack()[0][3]
    print "folder: ", folder
    print "sheet: ", sheet
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      folder,
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    data_store.print_content(full_recordings=False)

    spike_ids = param_filter_query(
        data_store,
        sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids()
    if spike_ids == None:
        print "No spikes recorded.\n"
        return
    print "Recorded neurons:", len(spike_ids)

    if sheet == 'V1_Exc_L4' or sheet == 'V1_Inh_L4':
        NeuronAnnotationsToPerNeuronValues(data_store,
                                           ParameterSet({})).analyse()
        l4_exc_or = data_store.get_analysis_result(
            identifier='PerNeuronValue',
            value_name='LGNAfferentOrientation',
            sheet_name=sheet)[0]
        if opposite:
            addon = addon + "_opposite"
            l4_exc_or_many = numpy.array(spike_ids)[numpy.nonzero(
                numpy.array([
                    circular_dist(l4_exc_or.get_value_by_id(i), numpy.pi /
                                  2, numpy.pi) for i in spike_ids
                ]) < .1)[0]]
        else:
            addon = addon + "_same"
            l4_exc_or_many = numpy.array(spike_ids)[numpy.nonzero(
                numpy.array([
                    circular_dist(l4_exc_or.get_value_by_id(i), 0, numpy.pi)
                    for i in spike_ids
                ]) < .1)[0]]
        spike_ids = list(l4_exc_or_many)

    if radius or box:
        sheet_ids = data_store.get_sheet_indexes(sheet_name=sheet,
                                                 neuron_ids=spike_ids)
        positions = data_store.get_neuron_postions()[sheet]
        if box:
            ids1 = select_ids_by_position(positions, sheet_ids, box=box)
        if radius:
            ids1 = select_ids_by_position(positions, sheet_ids, radius=radius)
        spike_ids = data_store.get_sheet_ids(sheet_name=sheet, indexes=ids1)

    print "Selected neurons:", len(spike_ids)
    if len(spike_ids) < 1:
        return

    dsv = param_filter_query(data_store, sheet_name=sheet, st_name=stimulus)
    dist = box if not radius else radius

    # Raster + Histogram
    RasterPlot(dsv,
               ParameterSet({
                   'sheet_name': sheet,
                   'neurons': list(spike_ids),
                   'trial_averaged_histogram': True,
                   'spontaneous': True
               }),
               fig_param={
                   'dpi': 100,
                   'figsize': (100, 50)
               },
               plot_file_name=folder + "/HistRaster_" + parameter + "_" +
               str(sheet) + "_radius" + str(dist) + "_" + addon + ".svg").plot(
                   {'SpikeRasterPlot.group_trials': True})
Ejemplo n.º 14
0
if True:
    data_store, model = run_workflow('FeedForwardInhibition', PushPullCCModel,
                                     create_experiments)
    #model.connectors['V1L4ExcL4ExcConnection'].store_connections(data_store)
    #model.connectors['V1L4ExcL4InhConnection'].store_connections(data_store)
    #model.connectors['V1L4InhL4ExcConnection'].store_connections(data_store)
    #model.connectors['V1L4InhL4InhConnection'].store_connections(data_store)
    #model.connectors['V1AffConnectionOn'].store_connections(data_store)
    #model.connectors['V1AffConnectionOff'].store_connections(data_store)
    #model.connectors['V1AffInhConnectionOn'].store_connections(data_store)
    #model.connectors['V1AffInhConnectionOff'].store_connections(data_store)
    data_store.save()
    if mpi_comm.rank == MPI_ROOT:
        from analysis_and_visualization import perform_analysis_and_visualization
        perform_analysis_and_visualization(data_store)

else:
    setup_logging()
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      'FeedForwardInhibition_test_____',
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    logger.info('Loaded data store')
    #data_store.save()
    from analysis_and_visualization import perform_analysis_and_visualization
    perform_analysis_and_visualization(data_store)
Ejemplo n.º 15
0
    #jens_model.connectors['V1L4InhL4ExcConnection'].store_connections(data_store)
    #jens_model.connectors['V1L4InhL4InhConnection'].store_connections(data_store)
    #jens_model.connectors['V1ExcL23ExcL23Connection'].store_connections(data_store)
    #jens_model.connectors['V1ExcL23InhL23Connection'].store_connections(data_store)
    #jens_model.connectors['V1InhL23ExcL23Connection'].store_connections(data_store)
    #jens_model.connectors['V1InhL23InhL23Connection'].store_connections(data_store)
    #jens_model.connectors['V1ExcL4ExcL23Connection'].store_connections(data_store)

    logger.info('Saving Datastore')
    if (not MPI) or (mpi_comm.rank == MPI_ROOT):
        data_store.save()
else:
    setup_logging()
    #data_store = PickledDataStore(load=True,parameters=ParameterSet({'root_directory':'/media/antolikjan/New Volume/DATA/mozaik/PushPullCCLISSOMModel/OR'}),replace=True)
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet(
                                      {'root_directory': 'E'}),
                                  replace=True)
    logger.info('Loaded data store')

import resource

print "Current memory usage: %iMB" % (
    resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / (1024))

pref_or = numpy.pi / 2

#find neuron with preference closet to pref_or
l4_analog_ids = param_filter_query(
    data_store,
    sheet_name="V1_Exc_L4").get_segments()[0].get_stored_esyn_ids()
l4_analog_ids_inh = param_filter_query(
Ejemplo n.º 16
0
            # model.connectors['V1L4InhL4ExcConnection'].store_connections(data_store)    
            # model.connectors['V1L4InhL4InhConnection'].store_connections(data_store)    
            # model.connectors['V1L4ExcL4ExcConnectionRand'].store_connections(data_store)    
            # model.connectors['V1L4ExcL4InhConnectionRand'].store_connections(data_store)    
            # model.connectors['V1L4InhL4ExcConnectionRand'].store_connections(data_store)    
            # model.connectors['V1L4InhL4InhConnectionRand'].store_connections(data_store)
            if withFeedback_CxLGN:
                model.connectors['V1EffConnectionOn'].store_connections(data_store)    
                model.connectors['V1EffConnectionOff'].store_connections(data_store)    
            if withPGN and withFeedback_CxPGN:
                model.connectors['V1EffConnectionPGN'].store_connections(data_store)    

    data_store.save()
# or only load pickled data
else:
    setup_logging()
    data_store = PickledDataStore(load=True,parameters=ParameterSet({'root_directory':'ThalamoCorticalModel_data_spontaneous_____', 'store_stimuli' : False}),replace=True)
    logger.info('Loaded data store')
    data_store.save()

# Analysis and Plotting
if mpi_comm.rank == MPI_ROOT:
    # perform_analysis_test( data_store )
    perform_analysis_and_visualization( data_store, 'subcortical_conn', withPGN, withV1 )
    # perform_analysis_and_visualization( data_store, 'luminance', withPGN, withV1 )
    # perform_analysis_and_visualization( data_store, 'contrast', withPGN, withV1 )
    # perform_analysis_and_visualization( data_store, 'spatial_frequency', withPGN, withV1 )
    # perform_analysis_and_visualization( data_store, 'temporal_frequency', withPGN, withV1 )
    # perform_analysis_and_visualization( data_store, 'size', withPGN, withV1 )
    # perform_analysis_and_visualization( data_store, 'orientation', withPGN, withV1 )
Ejemplo n.º 17
0
if MPI:
    mpi_comm = MPI.COMM_WORLD
MPI_ROOT = 0

logger = mozaik.getMozaikLogger()

print sys.argv

if False:
    data_store, model = run_workflow('T05', T05_Model, create_experiments)
    model.connectors['LGN_PGN_ConnectionOn'].store_connections(data_store)
    model.connectors['LGN_PGN_ConnectionOff'].store_connections(data_store)
    model.connectors['PGN_PGN_Connection'].store_connections(data_store)
    model.connectors['PGN_LGN_ConnectionOn'].store_connections(data_store)
    model.connectors['PGN_LGN_ConnectionOff'].store_connections(data_store)

else:
    setup_logging()
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'store_stimuli':
                                      False,
                                      'root_directory':
                                      'T05_data_____'
                                  }),
                                  replace=True)
    logger.info('Loaded data store')

if mpi_comm.rank == MPI_ROOT:
    perform_analysis_and_visualization(data_store)
Ejemplo n.º 18
0
def merge_datastores(
    datastores,
    root_directory,
    merge_recordings=True,
    merge_analysis=True,
    merge_stimuli=True,
    replace=False,
):
    """
    This function takes a tuple of datastore in input and merge them into one single datastore which will be saved in root_directory.
    The type of data that should be merged can be controlled through the merge_recordings, merge_analysis and merge_stimuli booleans
    It returns this datastore as a Datastore object.
    """
    merged_datastore = PickledDataStore(
        load=False,
        parameters=ParameterSet({
            "root_directory": root_directory,
            "store_stimuli": merge_stimuli
        }),
        replace=replace,
    )
    j = 0

    # Here we check if sheets and neurons are the same in all datastores
    assert compare_sheets_datastores(
        datastores), "All datastores should contain the same sheets"
    assert compare_neurons_ids_datastores(
        datastores), "Neurons in the datastores should have the same ids"
    assert compare_neurons_position_datastores(
        datastores), "Neurons in the datastores should have the same position"
    assert compare_neurons_annotations_datastores(
        datastores
    ), "Neurons in the datastores should have the same annotations"

    if not os.path.isdir(root_directory):
        os.makedirs(root_directory)

    # Change the block annotations so that it gets the merged version of the experiment parameters
    merged_datastore.block.annotations = datastores[0].block.annotations
    merged_datastore.block.annotations[
        "experiment_parameters"] = merge_experiment_parameters_datastores(
            datastores)

    j = 0
    for datastore in datastores:

        # Merge the recording of all the datastores if this flag is set to true
        if merge_recordings:
            segments = datastore.get_segments()
            segments += datastore.get_segments(null=True)
            for seg in segments:
                for s in merged_datastore.get_segments():
                    if seg.annotations == s.annotations and seg.null == s.null:
                        print(
                            "Warning: A segment with the same parametrization was already added in the datastore.: %s"
                            % (seg.annotations))
                        raise ValueError(
                            "A segment with the same parametrization was already added in the datastore already added in the datastore. Currently uniqueness is required. User should check what caused this and modify his simulations  to avoid this!: %s \n %s"
                            % (str(seg.annotations), str(s.annotations)))

                # Load the full segment and adds it to the merged datastore
                if not seg.full:
                    seg.load_full()
                merged_datastore.block.segments.append(
                    PickledDataStoreNeoWrapper(seg,
                                               "Segment" + str(j),
                                               root_directory,
                                               null=seg.null))
                merged_datastore.stimulus_dict[
                    seg.annotations["stimulus"]] = True

                # Create a new pickle file for this mozaik segment and store a corresponding neo segment there
                f = open(root_directory + "/" + "Segment" + str(j) + ".pickle",
                         "wb")
                s = Segment(description=seg.description,
                            file_origin=seg.file_origin,
                            file_datetime=seg.file_datetime,
                            rec_datetime=seg.rec_datetime,
                            index=seg.index,
                            **seg.annotations)
                s.spiketrains = seg.spiketrains
                s.analogsignals = seg.analogsignals
                pickle.dump(s, f)

                # Release each segment once it has been added to the merged datastore to save memory
                seg.release()
                j = j + 1

        # Merge the analysis of all the datastores if this flag is set to true
        if merge_analysis:
            adss = datastore.get_analysis_result()
            for ads in adss:
                merged_datastore.add_analysis_result(ads)

        # Merge the stimuli all the datastores if this flag is set to true
        if merge_stimuli:
            for key, value in datastore.sensory_stimulus.items():
                merged_datastore.sensory_stimulus[key] = value

    return merged_datastore
Ejemplo n.º 19
0
import mozaik
from mozaik.controller import run_workflow, setup_logging
from experiments import create_experiments
from model import VogelsAbbott
from mozaik.storage.datastore import Hdf5DataStore, PickledDataStore
from analysis_and_visualization import perform_analysis_and_visualization
from parameters import ParameterSet

#mpi_comm = MPI.COMM_WORLD
logger = mozaik.getMozaikLogger()

if True:
    data_store, model = run_workflow('VogelsAbbott2005', VogelsAbbott,
                                     create_experiments)
else:
    setup_logging()
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      'VogelsAbbott2005_test_____',
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    logger.info('Loaded data store')

#if mpi_comm.rank == 0:
print "Starting visualization"
perform_analysis_and_visualization(data_store)
data_store.save()
Ejemplo n.º 20
0
def perform_comparison_size_tuning(sheet,
                                   reference_position,
                                   step,
                                   sizes,
                                   folder_full,
                                   folder_inactive,
                                   reverse=False,
                                   Ismaller=[2, 3],
                                   Iequal=[4, 5],
                                   Ilarger=[6, 8],
                                   box=[],
                                   csvfile=None):
    print folder_full
    data_store_full = PickledDataStore(load=True,
                                       parameters=ParameterSet({
                                           'root_directory':
                                           folder_full,
                                           'store_stimuli':
                                           False
                                       }),
                                       replace=True)
    data_store_full.print_content(full_recordings=False)
    print folder_inactive
    data_store_inac = PickledDataStore(load=True,
                                       parameters=ParameterSet({
                                           'root_directory':
                                           folder_inactive,
                                           'store_stimuli':
                                           False
                                       }),
                                       replace=True)
    data_store_inac.print_content(full_recordings=False)

    print "Checking data..."
    # Full
    dsv1 = queries.param_filter_query(data_store_full,
                                      identifier='PerNeuronValue',
                                      sheet_name=sheet)
    # dsv1.print_content(full_recordings=False)
    pnvs1 = [dsv1.get_analysis_result()]
    # get stimuli
    st1 = [MozaikParametrized.idd(s.stimulus_id) for s in pnvs1[-1]]
    # print st1

    # Inactivated
    dsv2 = queries.param_filter_query(data_store_inac,
                                      identifier='PerNeuronValue',
                                      sheet_name=sheet)
    pnvs2 = [dsv2.get_analysis_result()]
    # get stimuli
    st2 = [MozaikParametrized.idd(s.stimulus_id) for s in pnvs2[-1]]

    # rings analysis
    neurons_full = []
    neurons_inac = []
    rowplots = 0
    max_size = 0.6

    # GET RECORDINGS BY POSITION (either step or box. In case of using box, inefficiently repetition of box-ing step times!)
    slice_ranges = numpy.arange(step, max_size + step, step)
    print "slice_ranges:", slice_ranges
    for col, cur_range in enumerate(slice_ranges):
        radius = [cur_range - step, cur_range]
        print col
        # get the list of all recorded neurons in X_ON
        # Full
        spike_ids1 = param_filter_query(
            data_store_full,
            sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids()
        positions1 = data_store_full.get_neuron_postions()[sheet]
        # print numpy.min(positions1), numpy.max(positions1)
        sheet_ids1 = data_store_full.get_sheet_indexes(sheet_name=sheet,
                                                       neuron_ids=spike_ids1)
        radius_ids1 = select_ids_by_position(reference_position, radius,
                                             sheet_ids1, positions1, reverse,
                                             box)
        neurons1 = data_store_full.get_sheet_ids(sheet_name=sheet,
                                                 indexes=radius_ids1)

        # Inactivated
        spike_ids2 = param_filter_query(
            data_store_inac,
            sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids()
        positions2 = data_store_inac.get_neuron_postions()[sheet]
        sheet_ids2 = data_store_inac.get_sheet_indexes(sheet_name=sheet,
                                                       neuron_ids=spike_ids2)
        radius_ids2 = select_ids_by_position(reference_position, radius,
                                             sheet_ids2, positions2, reverse,
                                             box)
        neurons2 = data_store_inac.get_sheet_ids(sheet_name=sheet,
                                                 indexes=radius_ids2)

        print neurons1
        print neurons2
        if not set(neurons1) == set(neurons2):
            neurons1 = numpy.intersect1d(neurons1, neurons2)
            neurons2 = neurons1

        if len(neurons1) > rowplots:
            rowplots = len(neurons1)

        neurons_full.append(neurons1)
        neurons_inac.append(neurons2)

        print "radius_ids", radius_ids2
        print "neurons_full:", len(neurons_full[col]), neurons_full[col]
        print "neurons_inac:", len(neurons_inac[col]), neurons_inac[col]

        assert len(neurons_full[col]
                   ) > 0, "ERROR: the number of recorded neurons is 0"

    # subplot figure creation
    plotOnlyPop = False
    print 'rowplots', rowplots
    print "Starting plotting ..."
    print "slice_ranges:", len(slice_ranges), slice_ranges
    if len(slice_ranges) > 1:
        fig, axes = plt.subplots(nrows=len(slice_ranges),
                                 ncols=rowplots + 1,
                                 figsize=(3 * rowplots, 3 * len(slice_ranges)),
                                 sharey=False)
    else:
        fig, axes = plt.subplots(nrows=2, ncols=2, sharey=False)
        plotOnlyPop = True
    print axes.shape

    p_significance = .02
    for col, cur_range in enumerate(slice_ranges):
        radius = [cur_range - step, cur_range]
        print col
        interval = str(radius[0]) + " - " + str(radius[1]) + " deg radius"
        print interval
        axes[col, 0].set_ylabel(interval + "\n\nResponse change (%)")
        print "range:", col
        if len(neurons_full[col]) < 1:
            continue

        tc_dict1 = []
        tc_dict2 = []

        # Full
        # group values
        dic = colapse_to_dictionary(
            [z.get_value_by_id(neurons_full[col]) for z in pnvs1[-1]], st1,
            'radius')
        for k in dic:
            (b, a) = dic[k]
            par, val = zip(*sorted(zip(b, numpy.array(a))))
            dic[k] = (par, numpy.array(val))
        tc_dict1.append(dic)

        # Inactivated
        # group values
        dic = colapse_to_dictionary(
            [z.get_value_by_id(neurons_inac[col]) for z in pnvs2[-1]], st2,
            'radius')
        for k in dic:
            (b, a) = dic[k]
            par, val = zip(*sorted(zip(b, numpy.array(a))))
            dic[k] = (par, numpy.array(val))
        tc_dict2.append(dic)

        print "(stimulus conditions, cells):", tc_dict1[0].values()[0][
            1].shape  # ex. (10, 32) firing rate for each stimulus condition (10) and each cell (32)

        # Population histogram
        diff_full_inac = []
        sem_full_inac = []
        num_cells = tc_dict1[0].values()[0][1].shape[1]
        smaller_pvalue = 0.
        equal_pvalue = 0.
        larger_pvalue = 0.

        # 1. SELECT ONLY CHANGING UNITS
        all_open_values = tc_dict2[0].values()[0][1]
        all_closed_values = tc_dict1[0].values()[0][1]

        # 1.1 Search for the units that are NOT changing (within a certain absolute tolerance)
        unchanged_units = numpy.isclose(all_closed_values,
                                        all_open_values,
                                        rtol=0.,
                                        atol=4.)
        # print unchanged_units.shape

        # 1.2 Reverse them into those that are changing
        changed_units = numpy.invert(unchanged_units)
        # print numpy.nonzero(changed_units)

        # 1.3 Get the indexes of all units that are changing
        changing_idxs = []
        for i in numpy.nonzero(changed_units)[0]:
            for j in numpy.nonzero(changed_units)[1]:
                if j not in changing_idxs:
                    changing_idxs.append(j)
        # print sorted(changing_idxs)

        # 1.4 Get the changing units
        open_values = [x[changing_idxs] for x in all_open_values]
        open_values = numpy.array(open_values)
        closed_values = [x[changing_idxs] for x in all_closed_values]
        closed_values = numpy.array(closed_values)
        print "chosen open units:", open_values.shape
        print "chosen closed units:", closed_values.shape
        num_cells = closed_values.shape[1]

        # 2. AUTOMATIC SEARCH FOR INTERVALS
        # peak = max(numpy.argmax(closed_values, axis=0 ))
        peaks = numpy.argmax(closed_values, axis=0)
        # peak = int( numpy.argmax( closed_values ) / closed_values.shape[1] ) # the returned single value is from the flattened array
        # print "numpy.argmax( closed_values ):", numpy.argmax( closed_values )
        print "peaks:", peaks
        # minimum = int( numpy.argmin( closed_values ) / closed_values.shape[1] )
        # minimum = min(numpy.argmin(closed_values, axis=0 ))
        minimums = numpy.argmin(
            closed_values,
            axis=0) + 1  # +N to get the response out of the smallest
        # print "numpy.argmin( closed_values ):", numpy.argmin( closed_values )
        print "minimums:", minimums

        # -------------------------------------
        # DIFFERENCE BETWEEN INACTIVATED AND CONTROL
        # We want to have a summary measure of the population of cells with and without inactivation.
        # Our null-hypothesis is that the inactivation does not change the activity of cells.
        # A different result will tell us that the inactivation DOES something.
        # Therefore our null-hypothesis is the result obtained in the intact system.
        # Procedure:
        # We have several stimulus sizes
        # We want to group them in three: smaller than optimal, optimal, larger than optimal
        # We do the mean response for each cell for the grouped stimuli
        #    i.e. sum the responses for each cell across stimuli in the group, divided by the number of stimuli in the group
        # We repeat for each group

        # average of all trial-averaged response for each cell for grouped stimulus size
        # we want the difference / normalized by the highest value * expressed as percentage
        # print num_cells
        # print "inac",numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0)
        # print "full",numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)
        # print "diff",(numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0))
        # print "diff_norm",((numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) / (numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)))
        # print "diff_norm_perc",((numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) / (numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0))) * 100

        # USING PROVIDED INTERVALS
        # diff_smaller = ((numpy.sum(open_values[Ismaller[0]:Ismaller[1]], axis=0) - numpy.sum(closed_values[Ismaller[0]:Ismaller[1]], axis=0)) / numpy.sum(closed_values[Ismaller[0]:Ismaller[1]], axis=0)) * 100
        # diff_equal = ((numpy.sum(open_values[Iequal[0]:Iequal[1]], axis=0) - numpy.sum(closed_values[Iequal[0]:Iequal[1]], axis=0)) / numpy.sum(closed_values[Iequal[0]:Iequal[1]], axis=0)) * 100
        # diff_larger = ((numpy.sum(open_values[Ilarger[0]:Ilarger[1]], axis=0) - numpy.sum(closed_values[Ilarger[0]:Ilarger[1]], axis=0)) / numpy.sum(closed_values[Ilarger[0]:Ilarger[1]], axis=0)) * 100

        # USING AUTOMATIC SEARCH
        # print "open"
        # print open_values[minimums]
        # print "closed"
        # print closed_values[minimums]
        # print open_values[peaks]
        # print closed_values[peaks]

        diff_smaller = ((numpy.sum(open_values[minimums], axis=0) -
                         numpy.sum(closed_values[minimums], axis=0)) /
                        numpy.sum(closed_values[minimums], axis=0)) * 100
        diff_equal = ((numpy.sum(open_values[peaks], axis=0) -
                       numpy.sum(closed_values[peaks], axis=0)) /
                      numpy.sum(closed_values[peaks], axis=0)) * 100
        diff_larger = (
            (numpy.sum(open_values[Ilarger[0]:Ilarger[1]], axis=0) -
             numpy.sum(closed_values[Ilarger[0]:Ilarger[1]], axis=0)) /
            numpy.sum(closed_values[Ilarger[0]:Ilarger[1]], axis=0)) * 100
        # print "diff_smaller", diff_smaller
        # print "diff_equal", diff_smaller
        # print "diff_larger", diff_smaller

        # average of all cells
        smaller = sum(diff_smaller) / num_cells
        equal = sum(diff_equal) / num_cells
        larger = sum(diff_larger) / num_cells
        print "smaller", smaller
        print "equal", equal
        print "larger", larger

        if csvfile:
            csvfile.write("(" + str(smaller) + ", " + str(equal) + ", " +
                          str(larger) + "), ")

        # 0/0
        # Check using scipy
        # and we want to compare the responses of full and inactivated
        # smaller, smaller_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][0:3], axis=0)/3, numpy.sum(tc_dict1[0].values()[0][1][0:3], axis=0)/3 )
        # equal, equal_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][3:5], axis=0)/2, numpy.sum(tc_dict1[0].values()[0][1][3:5], axis=0)/2 )
        # larger, larger_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][5:], axis=0)/5, numpy.sum(tc_dict1[0].values()[0][1][5:], axis=0)/5 )
        # print "smaller, smaller_pvalue:", smaller, smaller_pvalue
        # print "equal, equal_pvalue:", equal, equal_pvalue
        # print "larger, larger_pvalue:", larger, larger_pvalue

        diff_full_inac.append(smaller)
        diff_full_inac.append(equal)
        diff_full_inac.append(larger)

        # -------------------------------------
        # Standard Error Mean calculated on the full sequence
        sem_full_inac.append(scipy.stats.sem(diff_smaller))
        sem_full_inac.append(scipy.stats.sem(diff_equal))
        sem_full_inac.append(scipy.stats.sem(diff_larger))

        # print diff_full_inac
        # print sem_full_inac
        barlist = axes[col, 0].bar([0.5, 1.5, 2.5],
                                   diff_full_inac,
                                   yerr=sem_full_inac,
                                   width=0.8)
        axes[col, 0].plot([0, 4], [0, 0], 'k-')  # horizontal 0 line
        for ba in barlist:
            ba.set_color('white')
        if smaller_pvalue < p_significance:
            barlist[0].set_color('brown')
        if equal_pvalue < p_significance:
            barlist[1].set_color('darkgreen')
        if larger_pvalue < p_significance:
            barlist[2].set_color('blue')

        # Plotting tuning curves
        x_full = tc_dict1[0].values()[0][0]
        x_inac = tc_dict2[0].values()[0][0]
        # each cell couple
        axes[col, 1].set_ylabel("Response (spikes/sec)", fontsize=10)
        for j, nid in enumerate(neurons_full[col][changing_idxs]):
            # print col,j,nid
            if len(neurons_full[col][changing_idxs]
                   ) > 1:  # case with just one neuron in the group
                y_full = closed_values[:, j]
                y_inac = open_values[:, j]
            else:
                y_full = closed_values
                y_inac = open_values
            if not plotOnlyPop:
                axes[col, j + 1].plot(x_full, y_full, linewidth=2, color='b')
                axes[col, j + 1].plot(x_inac, y_inac, linewidth=2, color='r')
                axes[col, j + 1].set_title(str(nid), fontsize=10)
                axes[col, j + 1].set_xscale("log")

    fig.subplots_adjust(hspace=0.4)
    # fig.suptitle("All recorded cells grouped by circular distance", size='xx-large')
    fig.text(0.5, 0.04, 'cells', ha='center', va='center')
    fig.text(0.06,
             0.5,
             'ranges',
             ha='center',
             va='center',
             rotation='vertical')
    for ax in axes.flatten():
        ax.set_ylim([0, 60])
        ax.set_xticks(sizes)
        ax.set_xticklabels([0.1, '', '', '', '', 1, '', 2, 4, 6])
        # ax.set_xticklabels([0.1, '', '', '', '', '', '', '', '', '', '', 1, '', '', 2, '', '', '', 4, '', 6])

    for col, _ in enumerate(slice_ranges):
        # axes[col,0].set_ylim([-.8,.8])
        axes[col, 0].set_ylim([-60, 60])
        axes[col, 0].set_yticks([-60, -40, -20, 0., 20, 40, 60])
        axes[col, 0].set_yticklabels([-60, -40, -20, 0, 20, 40, 60])
        axes[col, 0].set_xlim([0, 4])
        axes[col, 0].set_xticks([.9, 1.9, 2.9])
        axes[col, 0].set_xticklabels(['small', 'equal', 'larger'])
        axes[col, 0].spines['right'].set_visible(False)
        axes[col, 0].spines['top'].set_visible(False)
        axes[col, 0].spines['bottom'].set_visible(False)

    # plt.show()
    plt.savefig(folder_inactive + "/TrialAveragedSizeTuningComparison_" +
                sheet + "_step" + str(step) + "_box" + str(box) + ".png",
                dpi=100)
    # plt.savefig( folder_full+"/TrialAveragedSizeTuningComparison_"+sheet+"_"+interval+".png", dpi=100 )
    fig.clf()
    plt.close()
    # garbage
    gc.collect()
Ejemplo n.º 21
0
def trial_averaged_LFP_rate(sheet,
                            folder,
                            stimulus,
                            parameter,
                            start,
                            end,
                            xlabel="",
                            ylabel="",
                            color="black",
                            ylim=[0., 100.],
                            radius=None,
                            addon=""):
    print inspect.stack()[0][3]
    print "folder: ", folder
    print "sheet: ", sheet
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      folder,
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    data_store.print_content(full_recordings=False)

    neurons = []
    neurons = param_filter_query(
        data_store, sheet_name=sheet,
        st_name=stimulus).get_segments()[0].get_stored_spike_train_ids()
    print "Recorded neurons:", len(neurons)

    ### cascading requirements
    if radius:
        sheet_ids = data_store.get_sheet_indexes(sheet_name=sheet,
                                                 neuron_ids=neurons)
        positions = data_store.get_neuron_postions()[sheet]
        if radius:
            ids1 = select_ids_by_position(positions, sheet_ids, radius=radius)
        neurons = data_store.get_sheet_ids(sheet_name=sheet, indexes=ids1)
    ####
    # if orientation:
    #	NeuronAnnotationsToPerNeuronValues(data_store,ParameterSet({})).analyse()
    # 	l4_or = data_store.get_analysis_result(identifier='PerNeuronValue',value_name='LGNAfferentOrientation', sheet_name=sheet)
    # 	l4_phase = data_store.get_analysis_result(identifier='PerNeuronValue',value_name='LGNAfferentPhase', sheet_name=sheet)
    # 	# print "l4_phase", l4_phase
    # 	neurons = numpy.array([neurons[numpy.argmin([circular_dist(o,numpy.pi/2,numpy.pi) for (o,p) in zip(l4_or[0].get_value_by_id(neurons),l4_phase[0].get_value_by_id(neurons))])] ])

    print "Selected neurons:", len(neurons)  #, neurons
    if len(neurons) < 1:
        return

    SpikeCount(
        param_filter_query(data_store, sheet_name=sheet, st_name=stimulus),
        ParameterSet({
            'bin_length': 5,
            'neurons': list(neurons),
            'null': False
        })
        # ParameterSet({'bin_length':bin, 'neurons':list(neurons), 'null':False})
    ).analyse()
    # datastore.save()
    TrialMean(
        param_filter_query(data_store,
                           name='AnalogSignalList',
                           analysis_algorithm='SpikeCount'),
        ParameterSet({
            'vm': False,
            'cond_exc': False,
            'cond_inh': False
        })).analyse()

    dsvTM = param_filter_query(data_store,
                               sheet_name=sheet,
                               st_name=stimulus,
                               analysis_algorithm='TrialMean')
    # dsvTM.print_content(full_recordings=False)
    pnvsTM = [dsvTM.get_analysis_result()]
    # print pnvsTM
    # get stimuli from PerNeuronValues
    st = [MozaikParametrized.idd(s.stimulus_id) for s in pnvsTM[-1]]

    asl_id = numpy.array([z.get_asl_by_id(neurons) for z in pnvsTM[-1]])
    print asl_id.shape
    # Example:
    # (8, 133, 1029)
    # 8 stimuli
    # 133 cells
    # 1029 bins

    dic = colapse_to_dictionary([z.get_asl_by_id(neurons) for z in pnvsTM[-1]],
                                st, parameter)
    for k in dic:
        (b, a) = dic[k]
        par, val = zip(*sorted(zip(b, numpy.array(a))))
        dic[k] = (par, numpy.array(val))

    stimuli = dic.values()[0][0]
    means = asl_id.mean(axis=1)  # mean of
    print means.shape
    # print "means", means, "stimuli", stimuli

    #plot the LFP for each stimulus
    for s in range(0, len(means)):
        # for each stimulus plot the average conductance per cell over time
        matplotlib.rcParams.update({'font.size': 22})
        fig, ax = plt.subplots()

        ax.plot(range(0, len(means[s])), means[s], color=color, linewidth=3)

        # ax.set_ylim([lfp.min(), lfp.max()])
        # ax.set_ylim(ylim)
        ax.set_ylabel("LFP (uV)")
        ax.set_xlabel("Time (us)")
        ax.spines['right'].set_visible(False)
        ax.spines['top'].set_visible(False)

        # text
        plt.tight_layout()
        plt.savefig(folder + "/TimecourseLFPrate_" + sheet + "_" + parameter +
                    "_" + str(s) + "_" + addon + ".svg",
                    dpi=200,
                    transparent=True)
        fig.clf()
        plt.close()
        # garbage
        gc.collect()
Ejemplo n.º 22
0
# -*- coding: utf-8 -*-
"""

"""
import matplotlib
matplotlib.use('Agg')
from mpi4py import MPI 
#from pyNN import nest
import sys
import mozaik.controller
from mozaik.controller import run_workflow, setup_logging
import mozaik
from experiments import create_experiments_short,create_experiments_old,create_experiments,create_experiments_tmp
from model import SelfSustainedPushPull
from mozaik.storage.datastore import Hdf5DataStore,PickledDataStore
from analysis_and_visualization import perform_analysis_and_visualization
from parameters import ParameterSet

mpi_comm = MPI.COMM_WORLD

if True:
    data_store,model = run_workflow('CorticalStimulationModel',SelfSustainedPushPull,create_experiments_tmp)
    data_store.save() 
else: 
    setup_logging()
    data_store = PickledDataStore(load=True,parameters=ParameterSet({'root_directory':'CorticalStimulationModel_visual_stimulation_____base_weight:0.0022_inhibitory_connection_ratio:0.5_layer23_aff_ratio:0.4_stdev:2.7','store_stimuli' : False}),replace=True)

if mpi_comm.rank == 0:
   print "Starting visualization" 
   perform_analysis_and_visualization(data_store,gratings=True,cort_stim=False,nat_stim=False,tp=0)
Ejemplo n.º 23
0
def VSDI(sheet, folder, stimulus, parameter, num_stim=2, addon=""):
    import matplotlib as ml
    import quantities as pq
    print inspect.stack()[0][3]
    print "folder: ", folder
    print "sheet: ", sheet
    polarity = True  # exc
    c = 'red'
    if "Inh" in sheet:
        polarity = False
        c = 'blue'

    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      folder,
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    data_store.print_content(full_recordings=False)

    segs = sorted(
        param_filter_query(data_store, st_name=stimulus,
                           sheet_name=sheet).get_segments(),
        key=lambda x: getattr(
            MozaikParametrized.idd(x.annotations['stimulus']), parameter))
    spont_segs = sorted(
        param_filter_query(data_store, st_name=stimulus,
                           sheet_name=sheet).get_segments(
                               null=True),  # Init 150ms with no stimulus
        # param_filter_query(data_store, sheet_name=sheet, st_direct_stimulation_name="None", st_name='InternalStimulus').get_segments(),
        # param_filter_query(data_store, direct_stimulation_name='None', sheet_name=sheet).get_segments(), # 1029ms NoStimulation
        key=lambda x: getattr(
            MozaikParametrized.idd(x.annotations['stimulus']), parameter))
    # print segs
    print "spont_trials:", len(spont_segs)
    spont_trials = len(spont_segs) / num_stim
    print "spont_trials:", spont_trials
    trials = len(segs) / num_stim
    print "trials:", trials

    analog_ids = param_filter_query(
        data_store, sheet_name=sheet,
        st_name=stimulus).get_segments()[0].get_stored_vm_ids()
    if analog_ids == None or len(analog_ids) < 1:
        print "No Vm recorded.\n"
        return
    print "Recorded neurons:", len(analog_ids)
    # 900 neurons over 6000 micrometers, 200 micrometers interval

    # avg vm
    sheet_indexes = data_store.get_sheet_indexes(sheet_name=sheet,
                                                 neuron_ids=analog_ids)
    positions = data_store.get_neuron_postions()[sheet]
    print positions.shape  # all 10800

    ###############################
    # # Vm PLOTS
    ###############################
    # segs = spont_segs

    # the cortical surface is going to be divided into annuli (beyond the current stimulus size)
    # this mean vm composes a plot of each annulus (row) over time
    # annulus_radius = 0.3
    # start = 1.4
    # stop = 3. - annulus_radius
    # num = 5 # annuli

    annulus_radius = 0.3
    start = 0.0
    stop = 1.6 - annulus_radius
    num = 5  # annuli

    # open image
    fig = plt.figure(figsize=(8, 8))
    gs = gridspec.GridSpec(num, 1, hspace=0.3)
    arrival = []
    for n, r in enumerate(numpy.linspace(start, stop, num=num)):
        radius = [r, r + annulus_radius]
        annulus_ids = select_ids_by_position(positions,
                                             sheet_indexes,
                                             radius=radius)
        print "annulus:  ", radius, "(radii)  ", len(annulus_ids), "(#ids)"
        # print len(annulus_ids), annulus_ids

        trial_avg_prime_response = []
        trial_avg_annulus_mean_vm = []
        for s in segs:

            dist = eval(s.annotations['stimulus'])
            if dist['radius'] < 0.1:
                continue
            print "radius", dist['radius'], "trial", dist['trial']

            s.load_full()
            # print "s.analogsignalarrays", s.analogsignalarrays # if not pre-loaded, it results empty in loop

            # print gs, n
            ax = plt.subplot(gs[n])

            for a in s.analogsignalarrays:
                # print "a.name: ",a.name
                if a.name == 'v':
                    # print "a",a.shape # (10291, 900)  (vm instants t, cells)

                    # annulus population average
                    # print "annulus_ids",len(annulus_ids)
                    # print annulus_ids
                    # for aid in annulus_ids:
                    # 	print aid, numpy.nonzero(sheet_indexes == aid)[0][0]

                    # annulus_vms = numpy.array([a[:,numpy.nonzero(sheet_indexes == aid)[0]] for aid in annulus_ids])
                    annulus_mean_vm = numpy.array([
                        a[:, numpy.nonzero(sheet_indexes == aid)[0]]
                        for aid in annulus_ids
                    ]).mean(axis=0)[0:2000, :]
                    # print "annulus_vms",annulus_vms.shape
                    # only annulus ids in the mean
                    # annulus_mean_vm = numpy.mean( annulus_vms, axis=0)[0:2000,:]
                    # print "annulus_mean_vm", annulus_mean_vm.shape
                    trial_avg_annulus_mean_vm.append(annulus_mean_vm)
                    # print "annulus_mean_vm", annulus_mean_vm
                    # threshold = annulus_mean_vm.max() - (annulus_mean_vm.max()-annulus_mean_vm.min())/10 # threshold at: 90% of the max-min interval
                    # prime_response = numpy.argmax(annulus_mean_vm > threshold)
                    # trial_avg_prime_response.append(prime_response)

                    plt.axvline(x=numpy.argmax(annulus_mean_vm),
                                color=c,
                                alpha=0.5)
                    ax.plot(annulus_mean_vm, color=c, alpha=0.5)
                    ax.set_ylim([-75., -50.])

        # means
        # trial_avg_prime_response = numpy.mean(trial_avg_prime_response)
        trial_avg_annulus_mean_vm = numpy.mean(trial_avg_annulus_mean_vm,
                                               axis=0)

        from scipy.signal import argrelextrema
        peaks = argrelextrema(trial_avg_annulus_mean_vm,
                              numpy.greater,
                              order=200)[0]
        print peaks

        for peak in peaks:
            plt.axvline(x=peak, color=c, linewidth=3.)  #, linestyle=linestyle)

        ax.plot(trial_avg_annulus_mean_vm, color=c, linewidth=3.)
        ax.set_ylim([-75., -50.])
        fig.add_subplot(ax)
        # s.release()

    # close image
    # title = "propagation velocity {:f} SD {:f} m/s".format((annulus_radius*.001)/(numpy.mean(arrival)*.0001), numpy.std(arrival)) #
    plt.xlabel("time (0.1 ms) ")  #+title)
    plt.savefig(folder + "/VSDI_mean_vm_" + parameter + "_" + str(sheet) +
                "_radius" + str(dist['radius']) + "_" + addon + ".svg",
                dpi=300,
                transparent=True)
    plt.close()
    gc.collect()
Ejemplo n.º 24
0
# -*- coding: utf-8 -*-
"""
"""
import matplotlib
matplotlib.use('Agg')
import sys
from mozaik.controller import setup_logging
import mozaik
from mozaik.storage.datastore import Hdf5DataStore, PickledDataStore
from analysis_and_visualization import perform_analysis_and_visualization
from parameters import ParameterSet

from mozaik.controller import Global
Global.root_directory = sys.argv[1] + '/'

setup_logging()
data_store = PickledDataStore(load=True,
                              parameters=ParameterSet({
                                  'root_directory':
                                  sys.argv[1],
                                  'store_stimuli':
                                  False
                              }),
                              replace=True)
perform_analysis_and_visualization(data_store,
                                   gratings=False,
                                   cort_stim=True,
                                   nat_stim=False,
                                   tp=1,
                                   scale=True)
Ejemplo n.º 25
0
def exportToElphy(data_store_location,
                  elphy_export_location,
                  sheets=None,
                  threshold=None):
    import os.path
    if not os.path.isdir(elphy_export_location):
        if os.path.exists(elphy_export_location):
            raise ValueError("The elphy export path is not a directory")
        else:
            os.makedirs(elphy_export_location)

    setup_logging()
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      data_store_location,
                                      'store_stimuli':
                                      False
                                  }))
    ps = MP.parameter_value_list(
        [MP.MozaikParametrized.idd(s) for s in data_store.get_stimuli()],
        'name')
    for i, sn in enumerate(ps):
        if sheets == None: sheets = data_store.sheets()

        for shn in sheets:
            dsv = param_filter_query(data_store, st_name=sn, sheet_name=shn)
            if dsv.get_stimuli() == []: continue
            varying_parameters = MP.varying_parameters(
                [MP.MozaikParametrized.idd(s) for s in dsv.get_stimuli()])

            segments, stimuli = MP.colapse(
                dsv.get_segments(),
                [MP.MozaikParametrized.idd(s) for s in dsv.get_stimuli()],
                parameter_list=['trial'],
                allow_non_identical_objects=True)
            j = 0
            for segs, st in zip(segments, stimuli):
                # just make sure all segments are fully loaded, in future this should probably soreted out such that this line can be deleted
                for s in segs:
                    s.load_full()

                # create file name:
                filename = "name=" + sn + "#" + "sheet_name=" + shn
                for pn in varying_parameters:
                    if pn != "trial":
                        filename += "#" + str(pn) + "=" + str(
                            getattr(MP.MozaikParametrized.idd(st), pn))
                path = os.path.join(elphy_export_location, filename + ".dat")

                # if the threshold is defined add spikes into Vms
                if threshold != None:
                    for seg in segs:
                        addSpikes(seg, threshold)

                createFileFromSegmentList(segs, path)
                print "Finished saving file %d/%d for sheet %s and %d-th stimulus" % (
                    j + 1, len(segments), shn, i)
                # release segments from memory
                for s in segs:
                    s.release()
                j = j + 1
        print "Finished saving %d/%d stimulus" % (i + 1, len(ps))
Ejemplo n.º 26
0
def trial_averaged_Vm(sheet,
                      folder,
                      stimulus,
                      parameter,
                      opposite=False,
                      box=None,
                      radius=None,
                      addon=""):
    print inspect.stack()[0][3]
    print "folder: ", folder
    print "sheet: ", sheet
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      folder,
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    data_store.print_content(full_recordings=False)

    analog_ids = param_filter_query(
        data_store, sheet_name=sheet).get_segments()[0].get_stored_vm_ids()
    if analog_ids == None:
        print "No Vm recorded.\n"
        return
    print "Recorded neurons:", len(analog_ids)

    if sheet == 'V1_Exc_L4' or sheet == 'V1_Inh_L4':
        NeuronAnnotationsToPerNeuronValues(data_store,
                                           ParameterSet({})).analyse()
        l4_exc_or = data_store.get_analysis_result(
            identifier='PerNeuronValue',
            value_name='LGNAfferentOrientation',
            sheet_name=sheet)[0]
        if opposite:
            addon = addon + "_opposite"
            l4_exc_or_many = numpy.array(analog_ids)[numpy.nonzero(
                numpy.array([
                    circular_dist(l4_exc_or.get_value_by_id(i), numpy.pi /
                                  2, numpy.pi) for i in analog_ids
                ]) < .1)[0]]
        else:
            addon = addon + "_same"
            l4_exc_or_many = numpy.array(analog_ids)[numpy.nonzero(
                numpy.array([
                    circular_dist(l4_exc_or.get_value_by_id(i), 0, numpy.pi)
                    for i in analog_ids
                ]) < .1)[0]]
        analog_ids = list(l4_exc_or_many)

    if radius or box:
        sheet_ids = data_store.get_sheet_indexes(sheet_name=sheet,
                                                 neuron_ids=analog_ids)
        positions = data_store.get_neuron_postions()[sheet]
        if box:
            ids1 = select_ids_by_position(positions, sheet_ids, box=box)
        if radius:
            ids1 = select_ids_by_position(positions, sheet_ids, radius=radius)
        analog_ids = data_store.get_sheet_ids(sheet_name=sheet, indexes=ids1)

    print "Selected neurons:", len(analog_ids)
    if len(analog_ids) < 1:
        return

    dsv = param_filter_query(data_store, sheet_name=sheet, st_name=stimulus)

    dist = box if not radius else radius
    for n in analog_ids:
        VmPlot(
            dsv,
            ParameterSet({
                'neuron': n,
                'sheet_name': sheet,
                'spontaneous': True,
            }),
            fig_param={
                'dpi': 300,
                'figsize': (40, 5)
            },
            # plot_file_name=folder+"/Vm_"+parameter+"_"+str(sheet)+"_"+str(dist)+"_"+str(n)+"_"+addon+".png"
            plot_file_name=folder + "/Vm_" + parameter + "_" + str(sheet) +
            "_radius" + str(dist) + "_" + str(n) + "_" + addon + ".svg"
        ).plot({
            # '*.y_lim':(0,60),
            # '*.x_scale':'log', '*.x_scale_base':2,
            # '*.y_ticks':[5, 10, 25, 50, 60],
            # # '*.y_scale':'linear',
            # '*.y_scale':'log', '*.y_scale_base':2,
            # '*.fontsize':24
        })
Ejemplo n.º 27
0
from mozaik.storage.datastore import Hdf5DataStore, PickledDataStore
from analysis_and_visualization import perform_analysis_and_visualization
from parameters import ParameterSet

mpi_comm = MPI.COMM_WORLD

if True:
    data_store, model = run_workflow(
        'CorticalStimulationModel', SelfSustainedPushPull,
        create_experiments_cortical_stimulation_or_nolat)
    data_store.save()
else:
    setup_logging()
    data_store = PickledDataStore(
        load=True,
        parameters=ParameterSet({
            'root_directory':
            'CorticalStimulationModel_visual_stimulation_full_protocol_____',
            'store_stimuli': False
        }),
        replace=True)

if mpi_comm.rank == 0:
    print "Starting visualization"
    perform_analysis_and_visualization(data_store,
                                       gratings=False,
                                       cort_stim=True,
                                       nat_stim=False,
                                       tp=4,
                                       scale=False)
Ejemplo n.º 28
0
def LFP(sheet,
        folder,
        stimulus,
        parameter,
        tip=[.0, .0, .0],
        sigma=0.300,
        ylim=[0., -1.],
        addon="",
        color='black'):
    import matplotlib as ml
    import quantities as pq
    print inspect.stack()[0][3]
    print "folder: ", folder
    print "sheet: ", sheet

    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      folder,
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    data_store.print_content(full_recordings=False)

    ids = param_filter_query(
        data_store, sheet_name=sheet,
        st_name=stimulus).get_segments()[0].get_stored_esyn_ids()
    if ids == None or len(ids) < 1:
        print "No gesyn recorded.\n"
        return
    print "Recorded gesyn:", len(ids), ids

    ids = param_filter_query(
        data_store, sheet_name=sheet,
        st_name=stimulus).get_segments()[0].get_stored_vm_ids()
    if ids == None or len(ids) < 1:
        print "No Vm recorded.\n"
        return
    print "Recorded Vm:", len(ids), ids

    NeuronAnnotationsToPerNeuronValues(data_store, ParameterSet({})).analyse()
    l4_exc_or = data_store.get_analysis_result(
        identifier='PerNeuronValue',
        value_name='LGNAfferentOrientation',
        sheet_name=sheet)[0]
    l4_exc_or_many = numpy.array(ids)[numpy.nonzero(
        numpy.array([
            circular_dist(l4_exc_or.get_value_by_id(i), 0, numpy.pi)
            for i in ids
        ]) < .1)[0]]
    ids = list(l4_exc_or_many)

    print "Recorded neurons:", len(ids), ids
    # 900 neurons over 6000 micrometers, 200 micrometers interval

    sheet_indexes = data_store.get_sheet_indexes(sheet_name=sheet,
                                                 neuron_ids=ids)

    positions = data_store.get_neuron_postions()[sheet]
    print positions.shape  # all 10800

    # take the positions of the ids
    ids_positions = numpy.transpose(positions)[sheet_indexes, :]
    print ids_positions.shape
    print ids_positions

    # Pre-compute distances from the LFP tip
    distances = []
    for i in range(len(ids)):
        distances.append(
            numpy.linalg.norm(
                numpy.array(ids_positions[i][0]) - numpy.array(tip)))
    distances = numpy.array(distances)
    print "distances:", len(distances), distances

    # ##############################
    # LFP
    # tip = [[x],[y],[.0]]
    # For each recorded cell:
    # Gaussianly weight it by its distance from tip
    # produce the currents
    # Divide the whole by the norm factor (area): 4 * numpy.pi * sigma

    # 95% of the LFP signal is a result of all exc and inh cells conductances from 250um radius from the tip of the electrode (Katzner et al. 2009).
    # Mostly excitatory neurons are relevant for the LFP (because of their geometry) Bartos
    # Therefore we include all recorded cells but account for the distance-dependent contribution weighting currents /r^2
    # We assume that the electrode has been placed in the cortical coordinates <tip>
    # Given that the current V1 orientation map has a pixel for each 100 um, a reasonable way to look at a neighborhood is in a radius of 300 um

    print "LFP electrode tip location (x,y) in degrees:", tip

    # Gather vm and conductances
    segs = sorted(
        param_filter_query(data_store, st_name=stimulus,
                           sheet_name=sheet).get_segments(),
        key=lambda x: getattr(
            MozaikParametrized.idd(x.annotations['stimulus']), parameter))
    ticks = set([])
    for x in segs:
        ticks.add(
            getattr(MozaikParametrized.idd(x.annotations['stimulus']),
                    parameter))
    ticks = sorted(ticks)
    num_ticks = len(ticks)
    print ticks
    trials = len(segs) / num_ticks
    print "trials:", trials

    pop_vm = []
    pop_gsyn_e = []
    pop_gsyn_i = []
    for n, idd in enumerate(ids):
        print "idd", idd
        full_vm = [s.get_vm(idd) for s in segs]  # all segments
        full_gsyn_es = [s.get_esyn(idd) for s in segs]
        full_gsyn_is = [s.get_isyn(idd) for s in segs]
        print "len full_gsyn_e", len(
            full_gsyn_es)  # segments = stimuli * trials
        print "shape gsyn_e[0]", full_gsyn_es[0].shape  # stimulus lenght
        # mean input over trials
        mean_full_vm = numpy.zeros((num_ticks, full_vm[0].shape[0]))  # init
        mean_full_gsyn_e = numpy.zeros(
            (num_ticks, full_gsyn_es[0].shape[0]))  # init
        mean_full_gsyn_i = numpy.zeros((num_ticks, full_gsyn_es[0].shape[0]))
        # print "shape mean_full_gsyn_e/i", mean_full_gsyn_e.shape
        sampling_period = full_gsyn_es[0].sampling_period
        t_stop = float(full_gsyn_es[0].t_stop - sampling_period)  # 200.0
        t_start = float(full_gsyn_es[0].t_start)
        time_axis = numpy.arange(0, len(full_gsyn_es[0]), 1) / float(
            len(full_gsyn_es[0])) * abs(t_start - t_stop) + t_start
        # sum by size
        t = 0
        for v, e, i in zip(full_vm, full_gsyn_es, full_gsyn_is):
            s = int(t / trials)
            v = v.rescale(mozaik.tools.units.mV)
            e = e.rescale(
                mozaik.tools.units.nS)  # NEST is in nS, PyNN is in uS
            i = i.rescale(
                mozaik.tools.units.nS)  # NEST is in nS, PyNN is in uS
            mean_full_vm[s] = mean_full_vm[s] + numpy.array(v.tolist())
            mean_full_gsyn_e[s] = mean_full_gsyn_e[s] + numpy.array(e.tolist())
            mean_full_gsyn_i[s] = mean_full_gsyn_i[s] + numpy.array(i.tolist())
            t = t + 1

        # average by trials
        for st in range(num_ticks):
            mean_full_vm[st] = mean_full_vm[st] / trials
            mean_full_gsyn_e[st] = mean_full_gsyn_e[st] / trials
            mean_full_gsyn_i[st] = mean_full_gsyn_i[st] / trials

        pop_vm.append(mean_full_vm)
        pop_gsyn_e.append(mean_full_gsyn_e)
        pop_gsyn_i.append(mean_full_gsyn_i)

    pop_v = numpy.array(pop_vm)
    pop_e = numpy.array(pop_gsyn_e)
    pop_i = numpy.array(pop_gsyn_i)

    # Produce the current for each cell for this time interval, with the Ohm law:
    # I = ge(V-Ee) + gi(V+Ei)
    # where
    # Ee is the equilibrium for exc, which is 0.0
    # Ei is the equilibrium for inh, which is -80.0
    i = pop_e * (pop_v - 0.0) + pop_i * (pop_v - 80.0)
    # i = pop_e*(pop_v-0.0) + 0.3*pop_i*(pop_v-80.0)
    # i = pop_e*(pop_v-0.0) # only exc
    # the LFP is the result of cells' currents divided by the distance
    sum_i = numpy.sum(i, axis=0)
    lfp = sum_i / (4 * numpy.pi * sigma)  #
    lfp /= 1000.  # from milli to micro
    print "LFP:", lfp.shape, lfp.mean(), lfp.min(), lfp.max()
    # print lfp
    # lfp = np.convolve(lfp, np.ones((10,))/10, mode='valid') # moving avg or running mean implemented as a convolution over steps of 10, divided by 10
    # lfp = np.convolve(lfp, np.ones((10,))/10, mode='valid') # moving avg or running mean implemented as a convolution over steps of 10, divided by 10

    #plot the LFP for each stimulus
    for s in range(num_ticks):
        # for each stimulus plot the average conductance per cell over time
        matplotlib.rcParams.update({'font.size': 22})
        fig, ax = plt.subplots()

        ax.plot(range(0, len(lfp[s])), lfp[s], color=color, linewidth=3)

        # ax.set_ylim([lfp.min(), lfp.max()])
        ax.set_ylim(ylim)
        ax.set_ylabel("LFP (uV)")
        ax.set_xlabel("Time (us)")
        ax.spines['right'].set_visible(False)
        ax.spines['top'].set_visible(False)
        ax.xaxis.set_ticks_position('bottom')
        ax.xaxis.set_ticks(ticks, ticks)
        ax.yaxis.set_ticks_position('left')

        # text
        plt.tight_layout()
        plt.savefig(folder + "/TimecourseLFP_" + sheet + "_" + parameter +
                    "_" + str(ticks[s]) + "_" + addon + ".svg",
                    dpi=200,
                    transparent=True)
        fig.clf()
        plt.close()
        # garbage
        gc.collect()
Ejemplo n.º 29
0
from pyNN import nest
import sys
import mozaik.controller
from mozaik.controller import run_workflow, setup_logging
import mozaik
from experiments import create_experiments_cs, create_experiments_bar
from model import SelfSustainedPushPull
from mozaik.storage.datastore import Hdf5DataStore, PickledDataStore
from analysis_and_visualization import perform_analysis_and_visualization_bar, perform_analysis_and_visualization_contrast_sensitivity, perform_analysis_and_visualization_small
from parameters import ParameterSet

#mpi_comm = MPI.COMM_WORLD

if True:
    data_store, model = run_workflow('TestLGN', SelfSustainedPushPull,
                                     create_experiments_cs)
    data_store.save()
else:
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      'TestLGN_test_____',
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)

#if mpi_comm.rank == 0:
#   print "Starting visualization"
perform_analysis_and_visualization_contrast_sensitivity(data_store)
Ejemplo n.º 30
0
def trial_averaged_tuning_curve_errorbar(sheet,
                                         folder,
                                         stimulus,
                                         parameter,
                                         start,
                                         end,
                                         xlabel="",
                                         ylabel="",
                                         color="black",
                                         percentile=False,
                                         useXlog=False,
                                         useYlog=False,
                                         ylim=[0., 100.],
                                         xlim=False,
                                         opposite=False,
                                         box=None,
                                         radius=None,
                                         addon="",
                                         data=None,
                                         data_curve=True):
    print inspect.stack()[0][3]
    print "folder: ", folder
    print "sheet: ", sheet
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      folder,
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    data_store.print_content(full_recordings=False)

    neurons = []
    neurons = param_filter_query(
        data_store, sheet_name=sheet,
        st_name=stimulus).get_segments()[0].get_stored_spike_train_ids()
    print "Recorded neurons:", len(neurons)

    if radius:
        sheet_ids = data_store.get_sheet_indexes(sheet_name=sheet,
                                                 neuron_ids=neurons)
        positions = data_store.get_neuron_postions()[sheet]
        if radius:
            ids1 = select_ids_by_position(positions, sheet_ids, radius=radius)
        neurons = data_store.get_sheet_ids(sheet_name=sheet, indexes=ids1)

    NeuronAnnotationsToPerNeuronValues(data_store, ParameterSet({})).analyse()
    l4_exc_or = data_store.get_analysis_result(
        identifier='PerNeuronValue',
        value_name='LGNAfferentOrientation',
        sheet_name=sheet)[0]
    l4_exc_or_many = numpy.array(neurons)[numpy.nonzero(
        numpy.array([
            circular_dist(l4_exc_or.get_value_by_id(i), 0, numpy.pi)
            for i in neurons
        ]) < .1)[0]]
    neurons = list(l4_exc_or_many)

    print "Selected neurons:", len(neurons)  #, neurons
    if len(neurons) < 1:
        return

    TrialAveragedFiringRate(
        param_filter_query(data_store, sheet_name=sheet, st_name=stimulus),
        ParameterSet({'neurons': list(neurons)})).analyse()

    PlotTuningCurve(
        param_filter_query(data_store,
                           st_name=stimulus,
                           analysis_algorithm=['TrialAveragedFiringRate']),
        ParameterSet({
            'polar': False,
            'pool': False,
            'centered': False,
            'percent': False,
            'mean': True,
            'parameter_name': parameter,
            'neurons': list(neurons),
            'sheet_name': sheet
        }),
        fig_param={
            'dpi': 200
        },
        plot_file_name=folder + "/TrialAveragedSensitivityNew_" + stimulus +
        "_" + parameter + "_" + str(sheet) + "_" + addon + "_mean.svg"
    ).plot({
        # '*.y_lim':(0,30),
        # '*.x_lim':(-10,100),
        # '*.x_scale':'log', '*.x_scale_base':10,
        '*.fontsize': 17
    })
    return
Ejemplo n.º 31
0
"""
import matplotlib
matplotlib.use('Agg')
from mpi4py import MPI 
from pyNN import nest
import sys
import mozaik.controller
from mozaik.controller import run_workflow, setup_logging
import mozaik
from experiments import create_experiments,create_experiments_bar,create_experiments_short,create_experiments_old
from model import SelfSustainedPushPull
from mozaik.storage.datastore import Hdf5DataStore,PickledDataStore
from analysis_and_visualization import perform_analysis_and_visualization
from parameters import ParameterSet


mpi_comm = MPI.COMM_WORLD

if True:
    data_store,model = run_workflow('MorganTaylorModel',SelfSustainedPushPull,create_experiments)
    data_store.save() 
else: 
    setup_logging()
    data_store = PickledDataStore(load=True,parameters=ParameterSet({'root_directory':'MorganTaylorModel_visual_space_update=1ms_RF_resolution=1ms','store_stimuli' : False}),replace=True)

if mpi_comm.rank == 0:
   print "Starting visualization" 
   perform_analysis_and_visualization(data_store,gratings=True,bars=True)
#   data_store.save() 
Ejemplo n.º 32
0
            data_store)
        model.connectors['V1L4InhL4InhConnection'].store_connections(
            data_store)
        model.connectors['V1L4ExcL4ExcConnectionRand'].store_connections(
            data_store)
        model.connectors['V1L4ExcL4InhConnectionRand'].store_connections(
            data_store)
        model.connectors['V1L4InhL4ExcConnectionRand'].store_connections(
            data_store)
        model.connectors['V1L4InhL4InhConnectionRand'].store_connections(
            data_store)
        model.connectors['V1L23ExcL23ExcConnection'].store_connections(
            data_store)
        model.connectors['V1L23ExcL23InhConnection'].store_connections(
            data_store)
        model.connectors['V1L23InhL23ExcConnection'].store_connections(
            data_store)
        model.connectors['V1L23InhL23InhConnection'].store_connections(
            data_store)
        model.connectors['L4ExcL23ExcConnection'].store_connections(data_store)
        model.connectors['L4ExcL23InhConnection'].store_connections(data_store)
    data_store.save()
else:
    setup_logging()
    data_store = PickledDataStore(load=True, parameters=ParameterSet(
        {'root_directory': 'SelfSustainedPushPull_test____', 'store_stimuli': False}), replace=True)

# if mpi_comm.rank == 0:
print("Starting visualization")
perform_analysis_and_visualization(data_store)
Ejemplo n.º 33
0
def perform_comparison_size_tuning( sheet, reference_position, step, sizes, folder_full, folder_inactive, reverse=False, Ssmaller=3, Sequal=4, SequalStop=5, Slarger=6, box=[] ):
	print folder_full
	data_store_full = PickledDataStore(load=True, parameters=ParameterSet({'root_directory':folder_full, 'store_stimuli' : False}),replace=True)
	data_store_full.print_content(full_recordings=False)
	print folder_inactive
	data_store_inac = PickledDataStore(load=True, parameters=ParameterSet({'root_directory':folder_inactive, 'store_stimuli' : False}),replace=True)
	data_store_inac.print_content(full_recordings=False)

	print "Checking data..."
	# Full
	dsv1 = queries.param_filter_query( data_store_full, identifier='PerNeuronValue', sheet_name=sheet )
	# dsv1.print_content(full_recordings=False)
	pnvs1 = [ dsv1.get_analysis_result() ]
	# get stimuli
	st1 = [MozaikParametrized.idd(s.stimulus_id) for s in pnvs1[-1]]
	# print st1

	# Inactivated
	dsv2 = queries.param_filter_query( data_store_inac, identifier='PerNeuronValue', sheet_name=sheet )
	pnvs2 = [ dsv2.get_analysis_result() ]
	# get stimuli
	st2 = [MozaikParametrized.idd(s.stimulus_id) for s in pnvs2[-1]]

	# rings analysis
	neurons_full = []
	neurons_inac = []
	rowplots = 0
	max_size = 0.6

	slice_ranges = numpy.arange(step, max_size+step, step)
	for col,cur_range in enumerate(slice_ranges):
		radius = [cur_range-step,cur_range]
		print col
		# get the list of all recorded neurons in X_ON
		# Full
		spike_ids1 = param_filter_query(data_store_full, sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids()
		positions1 = data_store_full.get_neuron_postions()[sheet]
		# print numpy.min(positions1), numpy.max(positions1) 
		sheet_ids1 = data_store_full.get_sheet_indexes(sheet_name=sheet,neuron_ids=spike_ids1)
		radius_ids1 = select_ids_by_position(reference_position, radius, sheet_ids1, positions1, reverse, box)
		# 0/0
		neurons1 = data_store_full.get_sheet_ids(sheet_name=sheet, indexes=radius_ids1)
		if len(neurons1) > rowplots:
			rowplots = len(neurons1)
		neurons_full.append(neurons1)

		# Inactivated
		spike_ids2 = param_filter_query(data_store_inac, sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids()
		positions2 = data_store_inac.get_neuron_postions()[sheet]
		sheet_ids2 = data_store_inac.get_sheet_indexes(sheet_name=sheet,neuron_ids=spike_ids2)
		radius_ids2 = select_ids_by_position(reference_position, radius, sheet_ids2, positions2, reverse, box)
		neurons2 = data_store_inac.get_sheet_ids(sheet_name=sheet, indexes=radius_ids2)
		neurons_inac.append(neurons2)

		print "radius_ids", radius_ids2
		print "neurons_full", neurons_full
		print "neurons_inac", neurons_inac

		assert len(neurons_full[col]) == len(neurons_inac[col]) , "ERROR: the number of recorded neurons is different"
		assert set(neurons_full[col]) == set(neurons_inac[col]) , "ERROR: the neurons in the two arrays are not the same"

	# to analyse old simulation it is necessary to choose corresponding ids,
	# do it by hand, running this script several times and noting them down here:
	# neurons_full = [numpy.array([2912, 3205, 1867, 2731, 2248])]
	# neurons_inac = [numpy.array([2912, 3205, 1867, 2731, 2248])]
	# neurons_full =[numpy.array([10921, 10024, 13851,  9855, 11648, 13277])]
	# neurons_inac =[numpy.array([10921, 10024, 13851,  9855, 11648, 13277])]

	# subplot figure creation
	print 'rowplots', rowplots
	print "Starting plotting ..."
	print len(slice_ranges), slice_ranges
	fig, axes = plt.subplots(nrows=len(slice_ranges), ncols=rowplots+1, figsize=(3*rowplots, 3*len(slice_ranges)), sharey=False)
	# fig, axes = plt.subplots(nrows=2, ncols=rowplots+1, figsize=(3*rowplots, 3*len(slice_ranges)), sharey=False)
	print axes.shape

	p_significance = .02
	for col,cur_range in enumerate(slice_ranges):
		radius = [cur_range-step,cur_range]
		print col
		interval = str(radius[0]) +" - "+ str(radius[1]) +" deg radius"
		print interval
		axes[col,0].set_ylabel(interval+"\n\nResponse change (%)")
		print "range:",col
		if len(neurons_full[col]) < 1:
			continue
		print "neurons_full:", len(neurons_full[col]), neurons_full[col]
		print "neurons_inac:", len(neurons_inac[col]), neurons_inac[col]

		tc_dict1 = []
		tc_dict2 = []

		# Full
		# group values 
		dic = colapse_to_dictionary([z.get_value_by_id(neurons_full[col]) for z in pnvs1[-1]], st1, 'radius')
		for k in dic:
		    (b, a) = dic[k]
		    par, val = zip( *sorted( zip(b, numpy.array(a)) ) )
		    dic[k] = (par,numpy.array(val))
		tc_dict1.append(dic)

		# Inactivated
		# group values 
		dic = colapse_to_dictionary([z.get_value_by_id(neurons_inac[col]) for z in pnvs2[-1]], st2, 'radius')
		for k in dic:
		    (b, a) = dic[k]
		    par, val = zip( *sorted( zip(b, numpy.array(a)) ) )
		    dic[k] = (par,numpy.array(val))
		tc_dict2.append(dic)

		# Plotting tuning curves
		x_full = tc_dict1[0].values()[0][0]
		x_inac = tc_dict2[0].values()[0][0]
		# each cell couple 
		print "(stimulus conditions, cells):", tc_dict1[0].values()[0][1].shape # ex. (10, 32) firing rate for each stimulus condition (10) and each cell (32)
		axes[col,1].set_ylabel("Response (spikes/sec)", fontsize=10)
		for j,nid in enumerate(neurons_full[col]):
			# print col,j,nid
			if len(neurons_full[col])>1: # case with just one neuron in the group
				y_full = tc_dict1[0].values()[0][1][:,j]
				y_inac = tc_dict2[0].values()[0][1][:,j]
			else:
				y_full = tc_dict1[0].values()[0][1]
				y_inac = tc_dict2[0].values()[0][1]
			axes[col,j+1].plot(x_full, y_full, linewidth=2, color='b')
			axes[col,j+1].plot(x_inac, y_inac, linewidth=2, color='r')
			axes[col,j+1].set_title(str(nid), fontsize=10)
			axes[col,j+1].set_xscale("log")

		# Population histogram
		diff_full_inac = []
		sem_full_inac = []
		num_cells = tc_dict1[0].values()[0][1].shape[1]
		smaller_pvalue = 0.
		equal_pvalue = 0.
		larger_pvalue = 0.

		# -------------------------------------
		# NON-PARAMETRIC TWO-TAILED TEST ON THE DIFFERENCE BETWEEN INACTIVATED AND CONTROL
		# We want to have a summary measure of the population of cells with and without inactivation.
		# Our null-hypothesis is that the inactivation does not change the activity of cells.
		# A different result will tell us that the inactivation DOES something.
		# Therefore our null-hypothesis is the result obtained in the intact system.
		# Procedure:
		# We have several stimulus sizes
		# We want to group them in three: smaller than optimal, optimal, larger than optimal
		# We do the mean response for each cell for the grouped stimuli
		#    i.e. sum the responses for each cell across stimuli in the group, divided by the number of stimuli in the group
		# We repeat for each group

		# average of all trial-averaged response for each cell for grouped stimulus size
		# we want the difference / normalized by the highest value * expressed as percentage
		# print num_cells
		# print "inac",numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0)
		# print "full",numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)
		# print "diff",(numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0))
		# print "diff_norm",((numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) / (numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)))
		# print "diff_norm_perc",((numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) / (numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0))) * 100

		# diff_smaller = ((numpy.sum(tc_dict2[0].values()[0][1][1:3], axis=0)/2 - numpy.sum(tc_dict1[0].values()[0][1][1:3], axis=0)/2) / (numpy.sum(tc_dict1[0].values()[0][1][1:3], axis=0)/2)) * 100
		# diff_equal = ((numpy.sum(tc_dict2[0].values()[0][1][3:5], axis=0)/2 - numpy.sum(tc_dict1[0].values()[0][1][3:5], axis=0)/2) / (numpy.sum(tc_dict1[0].values()[0][1][3:5], axis=0)/2)) * 100
		# diff_larger = ((numpy.sum(tc_dict2[0].values()[0][1][5:], axis=0)/5 - numpy.sum(tc_dict1[0].values()[0][1][5:], axis=0)/5) / (numpy.sum(tc_dict1[0].values()[0][1][5:], axis=0)/5)) * 100
		# diff_smaller = ((numpy.sum(tc_dict2[0].values()[0][1][1:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][1:3], axis=0)) / numpy.sum(tc_dict1[0].values()[0][1][1:3], axis=0)) * 100
		diff_smaller = ((numpy.sum(tc_dict2[0].values()[0][1][Ssmaller:Sequal], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][Ssmaller:Sequal], axis=0)) / numpy.sum(tc_dict1[0].values()[0][1][Ssmaller:Sequal], axis=0)) * 100
		diff_equal = ((numpy.sum(tc_dict2[0].values()[0][1][Sequal:SequalStop], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][Sequal:SequalStop], axis=0)) / numpy.sum(tc_dict1[0].values()[0][1][Sequal:SequalStop], axis=0)) * 100
		diff_larger = ((numpy.sum(tc_dict2[0].values()[0][1][Slarger:], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][Slarger:], axis=0)) / numpy.sum(tc_dict1[0].values()[0][1][Slarger:], axis=0)) * 100
		# print "diff_smaller", diff_smaller
		# average of all cells
		smaller = sum(diff_smaller) / num_cells
		equal = sum(diff_equal) / num_cells
		larger = sum(diff_larger) / num_cells

		# Check using scipy
		# and we want to compare the responses of full and inactivated
		# smaller, smaller_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][0:3], axis=0)/3, numpy.sum(tc_dict1[0].values()[0][1][0:3], axis=0)/3 )
		# equal, equal_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][3:5], axis=0)/2, numpy.sum(tc_dict1[0].values()[0][1][3:5], axis=0)/2 )
		# larger, larger_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][5:], axis=0)/5, numpy.sum(tc_dict1[0].values()[0][1][5:], axis=0)/5 )
		# print "smaller, smaller_pvalue:", smaller, smaller_pvalue
		# print "equal, equal_pvalue:", equal, equal_pvalue
		# print "larger, larger_pvalue:", larger, larger_pvalue

		diff_full_inac.append( smaller )
		diff_full_inac.append( equal )
		diff_full_inac.append( larger )

		# -------------------------------------
		# Standard Error Mean calculated on the full sequence
		sem_full_inac.append( scipy.stats.sem(diff_smaller) )
		sem_full_inac.append( scipy.stats.sem(diff_equal) )
		sem_full_inac.append( scipy.stats.sem(diff_larger) )

		# print diff_full_inac
		# print sem_full_inac
		barlist = axes[col,0].bar([0.5,1.5,2.5], diff_full_inac, width=0.8)
		axes[col,0].plot([0,4], [0,0], 'k-') # horizontal 0 line
		for ba in barlist:
			ba.set_color('white')
		if smaller_pvalue < p_significance:
			barlist[0].set_color('brown')
		if equal_pvalue < p_significance:
			barlist[1].set_color('darkgreen')
		if larger_pvalue < p_significance:
			barlist[2].set_color('blue')
		# colors = ['brown', 'darkgreen', 'blue']
		# for patch, color in zip(bp['boxes'], colors):
		# 	patch.set_facecolor(color)

	fig.subplots_adjust(hspace=0.4)
	# fig.suptitle("All recorded cells grouped by circular distance", size='xx-large')
	fig.text(0.5, 0.04, 'cells', ha='center', va='center')
	fig.text(0.06, 0.5, 'ranges', ha='center', va='center', rotation='vertical')
	for ax in axes.flatten():
		ax.set_ylim([0,60])
		ax.set_xticks(sizes)
		# ax.set_xticklabels([0.1, '', '', '', '', 1, '', 2, 4, 6])
		ax.set_xticklabels([0.1, '', '', '', '', '', '', '', '', '', '', 1, '', '', 2, '', '', '', 4, '', 6])

	for col,_ in enumerate(slice_ranges):
		# axes[col,0].set_ylim([-.8,.8])
		axes[col,0].set_ylim([-60,60])
		axes[col,0].set_yticks([-60, -40, -20, 0., 20, 40, 60])
		axes[col,0].set_yticklabels([-60, -40, -20, 0, 20, 40, 60])
		axes[col,0].set_xlim([0,4])
		axes[col,0].set_xticks([.9,1.9,2.9])
		axes[col,0].set_xticklabels(['small', 'equal', 'larger'])
		axes[col,0].spines['right'].set_visible(False)
		axes[col,0].spines['top'].set_visible(False)
		axes[col,0].spines['bottom'].set_visible(False)

	# plt.show()
	plt.savefig( folder_inactive+"/TrialAveragedSizeTuningComparison_"+sheet+"_step"+str(step)+"_box"+str(box)+".png", dpi=100 )
	# plt.savefig( folder_full+"/TrialAveragedSizeTuningComparison_"+sheet+"_"+interval+".png", dpi=100 )
	fig.clf()
	plt.close()
	# garbage
	gc.collect()
Ejemplo n.º 34
0
The Journal of neuroscience : the official journal of the Society for Neuroscience, 25(46), 10786–95. 
"""
from pyNN import nest
import sys
import mozaik
from mozaik.controller import run_workflow, setup_logging
from experiments import create_experiments
from model import VogelsAbbott
from mozaik.storage.datastore import Hdf5DataStore, PickledDataStore
from analysis_and_visualization import perform_analysis_and_visualization

from mpi4py import MPI
mpi_comm = MPI.COMM_WORLD

if True:
    logger = mozaik.getMozaikLogger()
    data_store, model = run_workflow('VogeslAbbott2005', VogelsAbbott,
                                     create_experiments)
else:
    setup_logging()
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet(
                                      {'root_directory': 'A'}),
                                  replace=True)
    logger.info('Loaded data store')

if mpi_comm.rank == 0:
    print "Starting visualization"
    perform_analysis_and_visualization(data_store)
    data_store.save()
Ejemplo n.º 35
0
def perform_comparison_size_inputs( sheet, sizes, folder_full, folder_inactive, with_ppd=False ):
	print folder_full
	data_store_full = PickledDataStore(load=True, parameters=ParameterSet({'root_directory':folder_full, 'store_stimuli' : False}),replace=True)
	data_store_full.print_content(full_recordings=False)
	print folder_inactive
	data_store_inac = PickledDataStore(load=True, parameters=ParameterSet({'root_directory':folder_inactive, 'store_stimuli' : False}),replace=True)
	data_store_inac.print_content(full_recordings=False)

	print "Checking data..."

	analog_ids1 = param_filter_query(data_store_full, sheet_name=sheet).get_segments()[0].get_stored_vm_ids()
	print analog_ids1
	analog_ids2 = param_filter_query(data_store_inac, sheet_name=sheet).get_segments()[0].get_stored_vm_ids()
	print analog_ids2

	assert len(analog_ids1) == len(analog_ids2) , "ERROR: the number of recorded neurons is different"
	assert set(analog_ids1) == set(analog_ids2) , "ERROR: the neurons in the two arrays are not the same"

	num_sizes = len( sizes )

	for _,idd in enumerate(analog_ids1):

		# get trial averaged gsyn for each stimulus condition
		# then subtract full - inactive for each stimulus condition (size)
		# then summarize the time differences in one number, to have one point for each size

		# Full
		segs = sorted( 
			param_filter_query(data_store_full, st_name='DriftingSinusoidalGratingDisk', sheet_name=sheet).get_segments(), 
			key = lambda x : MozaikParametrized.idd(x.annotations['stimulus']).radius 
		)
		print "full idd", idd # 
		# print len(segs), "/", num_sizes
		trials = len(segs) / num_sizes
		# print trials
		full_gsyn_es = [s.get_esyn(idd) for s in segs]
		full_gsyn_is = [s.get_isyn(idd) for s in segs]
		# print "len full_gsyn_e/i", len(full_gsyn_es) # 61 = 1 spontaneous + 6 trial * 10 num_sizes
		# print "shape gsyn_e/i", full_gsyn_es[0].shape
		# mean input over trials
		mean_full_gsyn_e = numpy.zeros((num_sizes, full_gsyn_es[0].shape[0])) # init
		mean_full_gsyn_i = numpy.zeros((num_sizes, full_gsyn_es[0].shape[0]))
		# print "shape mean_full_gsyn_e/i", mean_full_gsyn_e.shape
		sampling_period = full_gsyn_es[0].sampling_period
		t_stop = float(full_gsyn_es[0].t_stop - sampling_period)
		t_start = float(full_gsyn_es[0].t_start)
		time_axis = numpy.arange(0, len(full_gsyn_es[0]), 1) / float(len(full_gsyn_es[0])) * abs(t_start-t_stop) + t_start
		# sum by size
		t = 0
		for e,i in zip(full_gsyn_es, full_gsyn_is):
			s = int(t/trials)
			e = e.rescale(mozaik.tools.units.nS) #e=e*1000
			i = i.rescale(mozaik.tools.units.nS) #i=i*1000
			mean_full_gsyn_e[s] = mean_full_gsyn_e[s] + numpy.array(e.tolist())
			mean_full_gsyn_i[s] = mean_full_gsyn_i[s] + numpy.array(i.tolist())
			t = t+1
		# average by trials
		for s in range(num_sizes):
			mean_full_gsyn_e[s] = mean_full_gsyn_e[s] / trials
			mean_full_gsyn_i[s] = mean_full_gsyn_i[s] / trials
		# print "mean_full_gsyn_e", len(mean_full_gsyn_e), mean_full_gsyn_e
		# print "mean_full_gsyn_i", len(mean_full_gsyn_i), mean_full_gsyn_i

		# Inactivated
		segs = sorted( 
			param_filter_query(data_store_inac, st_name='DriftingSinusoidalGratingDisk', sheet_name=sheet).get_segments(), 
			key = lambda x : MozaikParametrized.idd(x.annotations['stimulus']).radius 
		)
		print "inactivation idd", idd # 
		# print len(segs), "/", num_sizes
		trials = len(segs) / num_sizes
		# print trials
		inac_gsyn_es = [s.get_esyn(idd) for s in segs]
		inac_gsyn_is = [s.get_isyn(idd) for s in segs]
		# print "len full_gsyn_e/i", len(inac_gsyn_es) # 61 = 1 spontaneous + 6 trial * 10 num_sizes
		# print "shape gsyn_e/i", inac_gsyn_es[0].shape
		# mean input over trials
		mean_inac_gsyn_e = numpy.zeros((num_sizes, inac_gsyn_es[0].shape[0])) # init
		mean_inac_gsyn_i = numpy.zeros((num_sizes, inac_gsyn_es[0].shape[0]))
		# print "shape mean_inac_gsyn_e/i", mean_inac_gsyn_e.shape
		sampling_period = inac_gsyn_es[0].sampling_period
		t_stop = float(inac_gsyn_es[0].t_stop - sampling_period)
		t_start = float(inac_gsyn_es[0].t_start)
		time_axis = numpy.arange(0, len(inac_gsyn_es[0]), 1) / float(len(inac_gsyn_es[0])) * abs(t_start-t_stop) + t_start
		# sum by size
		t = 0
		for e,i in zip(inac_gsyn_es, inac_gsyn_is):
			s = int(t/trials)
			e = e.rescale(mozaik.tools.units.nS) #e=e*1000
			i = i.rescale(mozaik.tools.units.nS) #i=i*1000
			mean_inac_gsyn_e[s] = mean_inac_gsyn_e[s] + numpy.array(e.tolist())
			mean_inac_gsyn_i[s] = mean_inac_gsyn_i[s] + numpy.array(i.tolist())
			t = t+1
		# average by trials
		for s in range(num_sizes):
			mean_inac_gsyn_e[s] = mean_inac_gsyn_e[s] / trials
			mean_inac_gsyn_i[s] = mean_inac_gsyn_i[s] / trials
		# print "mean_inac_gsyn_e", len(mean_inac_gsyn_e), mean_inac_gsyn_e.shape
		# print "mean_inac_gsyn_i", len(mean_inac_gsyn_i), mean_inac_gsyn_i.shape

		# PSP Area response plot (as in LindstromWrobel2011)
		max_full_gsyn_e = numpy.amax(mean_full_gsyn_e, axis=1)
		max_full_gsyn_i = numpy.amax(mean_full_gsyn_i, axis=1)
		norm_full_gsyn_e = (mean_full_gsyn_e.sum(axis=1) / 10291) / max_full_gsyn_e *100
		norm_full_gsyn_i = (mean_full_gsyn_i.sum(axis=1) / 10291) / max_full_gsyn_i *100

		max_inac_gsyn_e = numpy.amax(mean_inac_gsyn_e, axis=1)
		max_inac_gsyn_i = numpy.amax(mean_inac_gsyn_i, axis=1)
		norm_inac_gsyn_e = (mean_inac_gsyn_e.sum(axis=1) / 10291) / max_full_gsyn_e *100
		norm_inac_gsyn_i = (mean_inac_gsyn_i.sum(axis=1) / 10291) / max_full_gsyn_i *100

		plt.figure()
		plt.errorbar(sizes, norm_full_gsyn_e, color='red', linewidth=2)#, xerr=0.2, yerr=0.4)
		plt.errorbar(sizes, norm_full_gsyn_i, color='blue', linewidth=2)#, xerr=0.2, yerr=0.4)
		plt.errorbar(sizes, norm_inac_gsyn_e, color='purple', linewidth=2)#, xerr=0.2, yerr=0.4)
		plt.errorbar(sizes, norm_inac_gsyn_i, color='cyan', linewidth=2)#, xerr=0.2, yerr=0.4)
		plt.xscale("log")
		plt.xticks(sizes, sizes)
		plt.ylabel("PSP (%)", fontsize=10)
		plt.xlabel("sizes", fontsize=10)
		plt.title("PSP Area response plot "+sheet)
		plt.savefig( folder_inactive+"/TrialAveragedPSP_"+sheet+".png", dpi=100 )
		plt.close()

		# Point-to-Point difference 
		if with_ppd:
			diff_e_full_inac = mean_full_gsyn_e - mean_inac_gsyn_e
			diff_i_full_inac = mean_full_gsyn_i - mean_inac_gsyn_i
			# print "diff_e_full_inac", len(diff_e_full_inac), diff_e_full_inac
			# print "diff_i_full_inac", len(diff_i_full_inac), diff_i_full_inac
			fig, axes = plt.subplots(nrows=1, ncols=num_sizes, figsize=(10*num_sizes, 10))
			print axes.shape
			# http://paletton.com/#uid=7020Q0km5KqbrV8hkPPqCEHz+z+
			for s in range(num_sizes):
				axes[s].plot(mean_full_gsyn_e[s], color='#F93026')
				axes[s].plot(mean_full_gsyn_i[s], color='#294BA8')
				axes[s].plot(mean_inac_gsyn_e[s], color='#FF7C75')
				axes[s].plot(mean_inac_gsyn_i[s], color='#7592E1')
				axes[s].plot(diff_e_full_inac[s], color='#FFC64C')
				axes[s].plot(diff_i_full_inac[s], color='#6CEA7B')
				axes[s].set_title(str(sizes[s]))
			plt.savefig( folder_inactive+"/TrialAveragedConductanceComparison_"+sheet+".png", dpi=100 )
			# plt.savefig( folder_full+"/TrialAveragedSizeTuningComparison_"+sheet+"_"+interval+".png", dpi=100 )
			plt.close()

		plt.close()
		# garbage
		gc.collect()
Ejemplo n.º 36
0
def run_experiments(model, experiment_list, parameters, load_from=None):
    """
    This is function called by :func:.run_workflow that executes the experiments in the `experiment_list` over the model. 
    Alternatively, if load_from is specified it will load an existing simulation from the path specified in load_from.
    
    Parameters
    ----------
    
    model : Model
          The model to execute experiments on.
    
    experiment_list : list
          The list of experiments to execute.
    
    parameters : ParameterSet
               The parameters given to the simulation run.
          
    load_from : str
              If not None it will load the simulation from the specified directory.
              
    Returns
    -------
    
    data_store : DataStore
               The data store containing the recordings.
    """

    # first lets run all the measurements required by the experiments
    logger.info('Starting Experiemnts')
    if load_from == None:
        data_store = PickledDataStore(load=False,
                                      parameters=MozaikExtendedParameterSet({
                                          'root_directory':
                                          Global.root_directory,
                                          'store_stimuli':
                                          parameters.store_stimuli
                                      }))
    else:
        data_store = PickledDataStore(load=True,
                                      parameters=MozaikExtendedParameterSet({
                                          'root_directory':
                                          load_from,
                                          'store_stimuli':
                                          parameters.store_stimuli
                                      }))

    data_store.set_neuron_ids(model.neuron_ids())
    data_store.set_neuron_positions(model.neuron_positions())
    data_store.set_neuron_annotations(model.neuron_annotations())
    data_store.set_model_parameters(str(parameters))
    data_store.set_sheet_parameters(str(model.sheet_parameters()))
    data_store.set_experiment_parametrization_list([
        (str(exp.__class__), str(exp.parameters)) for exp in experiment_list
    ])

    t0 = time.time()
    simulation_run_time = 0
    for i, experiment in enumerate(experiment_list):
        logger.info('Starting experiment: ' + experiment.__class__.__name__)
        stimuli = experiment.return_stimuli()
        unpresented_stimuli_indexes = data_store.identify_unpresented_stimuli(
            stimuli)
        logger.info('Running model')
        simulation_run_time += experiment.run(data_store,
                                              unpresented_stimuli_indexes)
        logger.info('Experiment %d/%d finished' %
                    (i + 1, len(experiment_list)))

    total_run_time = time.time() - t0
    mozaik_run_time = total_run_time - simulation_run_time

    logger.info('Total simulation run time: %.0fs' % total_run_time)
    logger.info(
        'Simulator run time: %.0fs (%d%%)' %
        (simulation_run_time, int(simulation_run_time / total_run_time * 100)))
    logger.info('Mozaik run time: %.0fs (%d%%)' %
                (mozaik_run_time, int(mozaik_run_time / total_run_time * 100)))

    return data_store
Ejemplo n.º 37
0
def perform_percent_tuning( sheet, reference_position, step, sizes, folder_full, folder_inactive ):
	print folder_full
	data_store_full = PickledDataStore(load=True, parameters=ParameterSet({'root_directory':folder_full, 'store_stimuli' : False}),replace=True)
	data_store_full.print_content(full_recordings=False)
	print folder_inactive
	data_store_inac = PickledDataStore(load=True, parameters=ParameterSet({'root_directory':folder_inactive, 'store_stimuli' : False}),replace=True)
	data_store_inac.print_content(full_recordings=False)

	# full
	spike_ids1 = param_filter_query(data_store_full, sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids()
	dsv = param_filter_query( data_store_full, st_name='DriftingSinusoidalGratingDisk', analysis_algorithm=['TrialAveragedFiringRateCutout'] )
	PlotTuningCurve(
		dsv,
		ParameterSet({
			'polar': False,
			'pool': False,
			'centered': False,
			'percent': True,
			'mean': True,
			'parameter_name' : 'radius', 
			# 'neurons': list(spike_ids1[11:12]), 
			'neurons': list(spike_ids1), 
			'sheet_name' : sheet
		}), 
		fig_param={'dpi' : 100,'figsize': (8,8)}, 
		# plot_file_name=folder_full+"/"+"SizeTuning_Grating_"+sheet+"_percent_"+str(spike_ids1[11:12])+".png"
		plot_file_name=folder_full+"/"+"SizeTuning_Grating_"+sheet+"_mean_percent.png"
	).plot({
		'*.y_lim':(0,100), 
		'*.y_label': "Response (%)",
		# '*.y_ticks':[10, 20, 30, 40, 50], 
		'*.x_ticks':[0.1, 1, 2, 4, 6], 
		'*.x_scale':'linear',
		#'*.x_scale':'log', '*.x_scale_base':2,
		'*.fontsize':24
	})

	# inactivated
	spike_ids2 = param_filter_query(data_store_inac, sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids()
	print spike_ids2
	dsv = param_filter_query( data_store_inac, st_name='DriftingSinusoidalGratingDisk', analysis_algorithm=['TrialAveragedFiringRateCutout'] )
	PlotTuningCurve(
		dsv,
		ParameterSet({
			'polar': False,
			'pool': False,
			'centered': False,
			'percent': True,
			'mean': True,
			'parameter_name' : 'radius', 
			'neurons': list(spike_ids2), 
			# 'neurons': list(spike_ids2[11:12]), 
			'sheet_name' : sheet
		}), 
		fig_param={'dpi' : 100,'figsize': (8,8)}, 
		# plot_file_name=folder_inactive+"/"+"SizeTuning_Grating_"+sheet+"_percent_"+str(spike_ids2[11:12])+".png"
		plot_file_name=folder_inactive+"/"+"SizeTuning_Grating_"+sheet+"_mean_percent.png"
	).plot({
		'*.y_lim':(0,100), 
		'*.y_label': "Response (%)",
		# '*.y_ticks':[10, 20, 30, 40, 50], 
		'*.x_ticks':[0.1, 1, 2, 4, 6], 
		'*.x_scale':'linear',
		#'*.x_scale':'log', '*.x_scale_base':2,
		'*.fontsize':24
	})