コード例 #1
0
def parse_type(original):
    '''parse a type string from the manifest allowing for some syntactic sugar'''
    if not isinstance(original, str):
        raise TypeError('Expected string but got %s instead (for: "%s")' %
                        (type(original).__name__, original))

    value = original.lower()

    alias_map = {
        tt.LongType: ['int', 'long'],
        tt.DoubleType: ['float', 'double'],
        tt.BooleanType: ['bool', 'boolean'],
        tt.StringType: ['str', 'string'],
    }

    for cl, aliases in alias_map.items():
        if value in aliases:
            return cl()

    match = re.search(r'list\(([a-zA-Z0-9=;/\.\+]+)\)', original)
    if match:
        subtype = parse_type(match.group(1))
        return tt.ListOf(subtype=subtype)

    try:
        get_mimetype(original)
        return tt.URIType(category=original)

    except ValueError:
        pass

    return object_from_string_call(original)
コード例 #2
0
    h5_bundle_mime_type = "application/unknown"
    bundle = nestBundle2NeoH5_task.task.uri.build_bundle(h5_bundle_mime_type)

    for gdf in input_files:
        input_file = gdfio.GdfIO(gdf)
        # in a bundle all neurons that spike at least once are registered
        seg = input_file.read_segment(gdf_id_list=[],
                                      t_start=t_start * pq.ms,
                                      t_stop=t_stop * pq.ms)
        output_filename = os.path.splitext(gdf)[0] + '.h5'
        output_file = neo.io.NeoHdf5IO(output_filename)
        output_file.write(seg.spiketrains)
        output_file.close()

        output_dst = os.path.basename(output_filename)

        bundle.add_file(src_path=output_filename,
                        dst_path=output_dst,
                        bundle_path=output_dst,
                        mime_type=h5_bundle_mime_type)

    return bundle.save("neo_h5_bundle")


if __name__ == '__main__':
    input_filename = tt.URI('application/unknown',
                            'bundle_example/microcircuit_model_bundle')
    t_start = 0.
    t_stop = 300.
    nestBundle2NeoH5_task(input_filename, t_start, t_stop)
コード例 #3
0
        res.append(sub_res)

    # Close remaining file h5
    ion.close()

    print res

    # Create plot
    plt.title("LV Histogram")
    plt.xlabel("LV")
    plt.ylabel("")

    # Plotting an histogram
    plt.grid(True)
    plt.hist(res, bins=100, normed=1, histtype='bar', rwidth=1)
    # plt.show()

    output_file = 'result_lv_histogram_task.png'
    with open(output_file, 'w') as output:
        plt.savefig(output)
    return lv_histogram_task.task.uri.save_file(mime_type='image/png',
                                                src_path=output_file,
                                                dst_path=output_file)


if __name__ == '__main__':

    # Run local test
    filename = tt.URI('application/unknown', 'generate_poisson_spiketrains.h5')
    lv_histogram_task(filename)
コード例 #4
0
                description: Neuron IDs in the input file that should be
                    extracted, e.g., [1, 2, 3]. Provide an empty list [] to
                    extract all neurons with at least one spike.
        Returns:
            res: application/unknown
    """

    gdf = gdf2NeoH5_task.task.uri.get_file(gdf_file)
    input_file = gdfio.GdfIO(gdf)
    seg = input_file.read_segment(gdf_id_list=gdf_id_list,
                                  t_start=t_start * pq.ms,
                                  t_stop=t_stop * pq.ms)
    output_filename = os.path.splitext(gdf)[0] + '.h5'
    output_file = neo.io.NeoHdf5IO(output_filename)
    output_file.write(seg.spiketrains)
    output_file.close()

    output_dst = output_filename.split('/')[-1]

    return gdf2NeoH5_task.task.uri.save_file(mime_type='application/unknown',
                                             src_path=output_filename,
                                             dst_path=output_dst)


if __name__ == '__main__':
    input_filename = tt.URI('application/unknown', 'spikes_L4E-77177-0.gdf')
    t_start = 0.
    t_stop = 300.
    gdf_id_list = []
    gdf2NeoH5_task(input_filename, t_start, t_stop, gdf_id_list)
コード例 #5
0
ファイル: cubic_task.py プロジェクト: rgutzen/UP-Tasks
    for k in range(number_of_spike_trains):
        spiketrains.append(ion.get("/" + "SpikeTrain_" + str(k)))

    ion.close()

    psth_as = time_histogram(spiketrains, binsize=binsize * pq.ms)

    result = cubic.cubic(psth_as, alpha=alpha)

    # Plot
    plt.bar(np.arange(0.75, len(result[1]) + .25, 1), result[1], width=.5)
    plt.axhline(alpha, ls='--', color='r')
    plt.xlabel('$\\xi$')
    plt.ylabel('P value')
    plt.title('$\hat\\xi$=' + str(result[0]))

    output_filename = os.path.splitext(h5_path)[0] + '.png'
    with open(output_filename, 'w') as result_pth:
        plt.savefig(result_pth, dpi=100)
    dst_name = os.path.basename(output_filename)
    return cubic_task.task.uri.save_file(mime_type='image/png',
                                         src_path=output_filename,
                                         dst_path=dst_name)


if __name__ == '__main__':
    input_filename = tt.URI('application/unknown', 'spikes_L5I-44930-0.h5')
    alpha = 0.05
    binsize = 1
    cubic_task(input_filename, binsize, alpha)
コード例 #6
0
    l_original = len(cc['original'])
    dataset_orig = file.create_dataset("/cc_group/original",
                                       (l_original, cc['original'][0].size),
                                       dtype=h5py.h5t.NATIVE_FLOAT)
    data = np.zeros((l_original, cc['original'][0].size))
    for i in range(l_original):
        for j in range(cc['original'][i].size):
            data[i][j] = cc['original'][i].item(j)
    dataset_orig[...] = data


if __name__ == '__main__':
    # this number relates to the "-t" parameter:
    #   -t 0-X => number_of_jobs=X+1
    # INPUT-second parameter
    # number_of_jobs is (0, 200]
    number_of_jobs = 1

    # INPUT-third parameter
    # job parameter: a number between 0 and number_of_jobs-1
    import os
    PBS_value = os.getenv('PBS_ARRAYID')
    if PBS_value is not None:
        job_id = int(PBS_value)
    else:
        job_id = 0

    # INPUT-first parameter
    inputdata = tt.URI('application/unknown', 'data/experiment.h5')
    crosscorrelogram_task(inputdata, number_of_jobs, job_id)
コード例 #7
0
from random import randint
from active_worker.task import task
from task_types import TaskTypes as tt

# This decoration is deprecated, from OLD-VERSION of task-sdk

_task_full_name = "elephant_cv_task"
_task_caption = "execute cv-function task"
_task_author = "Long Phan"
_task_description = "Input data is randomized inside function, used for\
    calculate cv and output an image/png as result"
_task_categories = ['elephant']
_task_compatible_queues = ['cscs_viz']


@task(returns=tt.URIType('image/png'))
def elephant_cv_task():

    # create data numpy for cv
    data_list = []
    for i in xrange(98):
        range_size = randint(1, 10)
        data_list.append(np.random.uniform(0, 1000, range_size))
    # print data_list
    # print len(data_list)
    # res: list of results calculated by cv-function
    res = []
    # pass data as input-parameter to cv-function
    for i in data_list:
        res.append(cv(i))