コード例 #1
0
import time
import h5py_wrapper.wrapper

try:
    job_parameter = int(os.environ['SLURM_ARRAY_TASK_ID'])
    save_res = True
except:
    job_parameter = 1
    save_res = False

# =============================================================================
# Load Spinnaker data
# =============================================================================
if job_parameter == 0:
    filename = '../../data/Spinnaker_Data/results/spikes_L5E.h5'
    session = neo.NeoHdf5IO(filename=filename)
    block = session.read_block()
    sts = block.list_children_by_class(neo.SpikeTrain)[:100]
    print("Number of spinnaker spike trains: " + str(len(sts)))


# =============================================================================
# Load Nest data
# =============================================================================
if job_parameter == 1:
    filename = '../../data/Nest_Data/example_output_10500ms_nrec_100/spikes_L5E.h5'
    session = neo.NeoHdf5IO(filename=filename)

    sts = []

    for k in range(100):
コード例 #2
0
def crosscorrelogram_task(inputdata, number_of_jobs, job_id):
    '''
        Task Manifest Version: 1
        Full Name: crosscorrelogram_task
        Caption: cross-correlogram
        Author: Elephant-Developers
        Description: |
            This task calculates all pair-wise cross-correlograms between all
            combinations of spike trains in the input file. Significance of
            the correlation is evaluated based on spike-dither surrogates.
        Categories:
            - FDAT
        Compatible_queues: ['cscs_viz']
        Accepts:
            inputdata: application/unknown
            number_of_jobs: long
            job_id: long

        Returns:
            res: application/unknown
    '''
    import quantities as pq
    import neo
    import elephant

    if job_id > number_of_jobs:
        print "Input data is invalid, exiting"
        return
    # =========================================================================
    # Load data
    # =========================================================================

    # stage the input file
    original_path = crosscorrelogram_task.task.uri.get_file(inputdata)

    session = neo.NeoHdf5IO(filename=original_path)
    block = session.read_block()

    # select spike trains
    sts = block.filter(use_st=True)

    # print("Number of spike trains: " + str(len(sts)))

    # =========================================================================
    # Cross-correlograms
    # =========================================================================

    max_lag_bins = 200
    lag_res = 1 * pq.ms
    max_lag = max_lag_bins * lag_res
    smoothing = 10 * pq.ms

    num_neurons = len(sts)

    cc = {}
    cc['unit_i'] = {}
    cc['unit_j'] = {}
    cc['times_ms'] = {}
    cc['original'] = {}
    cc['surr'] = {}
    cc['original_measure'] = {}
    cc['surr_measure'] = {}
    cc['pvalue'] = {}

    # create all combinations of tasks
    num_total_pairs = 0
    all_combos_unit_i = []
    all_combos_unit_j = []
    for ni in range(num_neurons):
        for nj in range(ni, num_neurons):
            all_combos_unit_i.append(ni)
            all_combos_unit_j.append(nj)
            num_total_pairs += 1

    # calculate indices in cc['unit_i'] list which to calculate for each task
    idx = np.linspace(0, num_total_pairs, number_of_jobs + 1, dtype=int)
    task_starts_idx = idx[:-1]
    task_stop_idx = idx[1:]

    # Loop over all pairs of neurons
    for calc_i in range(task_starts_idx[job_id], task_stop_idx[job_id]):
        # save neuron i,j index
        ni = all_combos_unit_i[calc_i]
        nj = all_combos_unit_j[calc_i]

        cc['unit_i'][calc_i] = ni
        cc['unit_j'][calc_i] = nj

        print("Cross-correlating %i and %i" % (ni, nj))

        # original CCH
        cco = elephant.spikecorr.cch(sts[ni],
                                     sts[nj],
                                     w=lag_res,
                                     lag=max_lag,
                                     smooth=smoothing)
        cc['original'][calc_i] = cco.magnitude
        cc['times_ms'][calc_i] = cco.times.rescale(pq.ms).magnitude

        # extract measure
        ind = np.argmin(np.abs(cco.times))
        ccom = cch_measure(cco, ind)
        cc['original_measure'][calc_i] = ccom

        surr_i = elephant.surrogates.spike_dithering(sts[ni],
                                                     dither=50. * pq.ms,
                                                     n=num_surrs)
        surr_j = elephant.surrogates.spike_dithering(sts[nj],
                                                     dither=50. * pq.ms,
                                                     n=num_surrs)

        ccs = []
        ccsm = []

        # cross-correlogram of each surrogate pair
        for surrogate in range(num_surrs):
            scc = elephant.spikecorr.cch(surr_i[surrogate],
                                         surr_j[surrogate],
                                         w=lag_res,
                                         lag=max_lag,
                                         smooth=smoothing)
            ccs.append(scc.magnitude)
            ccsm.append(cch_measure(scc, ind))
        cc['surr'][calc_i] = np.array(ccs)
        cc['surr_measure'][calc_i] = np.sort(ccsm)
        cc['pvalue'][calc_i] = np.count_nonzero(np.array(ccsm) >= ccom)

    # save result to hdf5
    outputname = 'cc_result' + str(number_of_jobs) + '_' + str(job_id) + '.h5'
    export_hdf5(cc, outputname)
    return crosscorrelogram_task.task.uri.save_file(mime_type='\
                                                    application/unknown',
                                                    src_path=outputname,
                                                    dst_path=outputname)