Пример #1
0
def main():
    # Configure readonly access to kbucket
    ca.autoConfig(collection='spikeforest', key='spikeforest2-readonly')

    APP = TheApp()
    server = vd.VDOMRServer(APP)
    server.start()
Пример #2
0
def main():
    parser = argparse.ArgumentParser(description='Browse SpikeForest results')
    parser.add_argument(
        '--port', help='The port to listen on (for a web service). Otherwise, attempt to launch as stand-alone GUI.', required=False, default=None)

    args = parser.parse_args()

    # Configure readonly access to kbucket
    ca.autoConfig(collection='spikeforest', key='spikeforest2-readonly')

    APP = TheApp()

    if args.port is not None:
        vd.config_server()
        server = vd.VDOMRServer(APP)
        server.setPort(int(args.port))
        server.start()
    else:
        vd.config_pyqt5()
        W = APP.createSession()
        vd.pyqt5_start(root=W, title='SFBrowser')
Пример #3
0
#!/usr/bin/env python

from cairio import client as ca

ca.autoConfig(collection='spikeforest',
              key='spikeforest2-readwrite',
              ask_password=True)
sha1_path = ca.saveFile('spyking_circus.simg')
print(sha1_path)
Пример #4
0
def setKBucketConfig(*,config=None,collection=None,key=None):
    ca.autoConfig(collection=collection,key=key)
Пример #5
0
def main():
    # Use this to optionally connect to a kbucket share:
    ca.autoConfig(collection='spikeforest',
                  key='spikeforest2-readwrite',
                  ask_password=True,
                  password=os.environ.get('SPIKEFOREST_PASSWORD', None))

    # Specify the compute resource (see the note above)
    #compute_resource = 'ccmlin008-80'
    #compute_resource_ks = 'ccmlin008-kilosort'
    compute_resource = None
    compute_resource_ks = None

    #compute_resource = 'ccmlin000-80'
    #compute_resource_ks = 'ccmlin000-kilosort'

    # Use this to control whether we force the processing to re-run (by default it uses cached results)
    os.environ['MLPROCESSORS_FORCE_RUN'] = 'FALSE'  # FALSE or TRUE

    # This is the id of the output -- for later retrieval by GUI's, etc
    output_id = 'spikeforest_test3'

    #group_name = 'magland_synth_test'
    group_name = 'mearec_sqmea_test'

    a = ca.loadObject(
        key=dict(name='spikeforest_recording_group', group_name=group_name))

    recordings = a['recordings']
    studies = a['studies']

    recordings = [recordings[0]]

    # Summarize the recordings
    recordings_B = sa.summarize_recordings(recordings=recordings,
                                           compute_resource=compute_resource)

    # Sorters (algs and params) are defined below
    sorters = define_sorters()

    # We will be assembling the sorting results here
    sorting_results_A = []

    for sorter in sorters:
        # Sort the recordings
        compute_resource0 = compute_resource
        if sorter['name'] == 'KiloSort':
            compute_resource0 = compute_resource_ks
        sortings = sa.sort_recordings(sorter=sorter,
                                      recordings=recordings_B,
                                      compute_resource=compute_resource0)

        # Append to results
        sorting_results_A = sorting_results_A + sortings

    # Summarize the sortings
    sorting_results_B = sa.summarize_sortings(
        sortings=sorting_results_A, compute_resource=compute_resource)

    # Compare with ground truth
    sorting_results_C = sa.compare_sortings_with_truth(
        sortings=sorting_results_B, compute_resource=compute_resource)

    # TODO: collect all the units for aggregated analysis

    # Save the output
    print('Saving the output')
    ca.saveObject(key=dict(name='spikeforest_results', output_id=output_id),
                  object=dict(studies=studies,
                              recordings=recordings_B,
                              sorting_results=sorting_results_C))
Пример #6
0
def main():
    ca.autoConfig(collection='spikeforest',
                  key='spikeforest2-readwrite',
                  ask_password=True,
                  password=os.environ.get('SPIKEFOREST_PASSWORD', None))

    # Use this to optionally connect to a kbucket share:
    # for downloading containers if needed
    ca.setRemoteConfig(alternate_share_ids=['69432e9201d0'])

    # Specify the compute resource (see the note above)
    compute_resource = 'default'
    #compute_resource = 'local-computer'
    #compute_resource = 'ccmlin008-default'
    #compute_resource_ks = 'ccmlin008-kilosort'

    # Use this to control whether we force the processing to re-run (by default it uses cached results)
    os.environ['MLPROCESSORS_FORCE_RUN'] = 'FALSE'  # FALSE or TRUE

    # This is the id of the output -- for later retrieval by GUI's, etc
    output_id = 'visapy_mea'

    # Grab the recordings for testing
    group_name = 'visapy_mea'

    a = ca.loadObject(
        key=dict(name='spikeforest_recording_group', group_name=group_name))

    recordings = a['recordings']
    studies = a['studies']

    # recordings = [recordings[0]]
    # recordings = recordings[0:3]

    # Summarize the recordings
    recordings = sa.summarize_recordings(recordings=recordings,
                                         compute_resource=compute_resource)

    # Sorters (algs and params) are defined below
    sorters = _define_sorters()

    # We will be assembling the sorting results here
    sorting_results = []

    for sorter in sorters:
        # Sort the recordings
        compute_resource0 = compute_resource
        if sorter['name'] == 'KiloSort':
            compute_resource0 = compute_resource_ks
        sortings = sa.sort_recordings(sorter=sorter,
                                      recordings=recordings,
                                      compute_resource=compute_resource0)

        # Append to results
        sorting_results = sorting_results + sortings

    # Summarize the sortings
    sorting_results = sa.summarize_sortings(sortings=sorting_results,
                                            compute_resource=compute_resource)

    # Compare with ground truth
    sorting_results = sa.compare_sortings_with_truth(
        sortings=sorting_results, compute_resource=compute_resource)

    # Aggregate the results
    aggregated_sorting_results = sa.aggregate_sorting_results(
        studies, recordings, sorting_results)

    # Save the output
    print('Saving the output')
    ca.saveObject(key=dict(name='spikeforest_results'),
                  subkey=output_id,
                  object=dict(studies=studies,
                              recordings=recordings,
                              sorting_results=sorting_results,
                              aggregated_sorting_results=ca.saveObject(
                                  object=aggregated_sorting_results)))

    for sr in aggregated_sorting_results['study_sorting_results']:
        study_name = sr['study']
        sorter_name = sr['sorter']
        n1 = np.array(sr['num_matches'])
        n2 = np.array(sr['num_false_positives'])
        n3 = np.array(sr['num_false_negatives'])
        accuracies = n1 / (n1 + n2 + n3)
        avg_accuracy = np.mean(accuracies)
        txt = 'STUDY: {}, SORTER: {}, AVG ACCURACY: {}'.format(
            study_name, sorter_name, avg_accuracy)
        print(txt)
Пример #7
0
def main():
    # Use this to optionally connect to a kbucket share:
    ca.autoConfig(collection='spikeforest',
                  key='spikeforest2-readwrite',
                  ask_password=True)

    # Specify the compute resource (see the note above)
    compute_resource = 'jfm-laptop'

    # Use this to control whether we force the processing to re-run (by default it uses cached results)
    os.environ['MLPROCESSORS_FORCE_RUN'] = 'FALSE'  # FALSE or TRUE

    # This is the id of the output -- for later retrieval by GUI's, etc
    output_id = 'spikeforest_test1'

    # Grab a couple recordings for testing
    recording1 = dict(
        recording_name='001_synth',
        study_name='datasets_noise10_K10_C4-test',
        study_set='magland_synth-test',
        directory=
        'kbucket://15734439d8cf/groundtruth/magland_synth/datasets_noise10_K10_C4/001_synth'
    )
    recording2 = dict(
        recording_name='002_synth',
        study_name='datasets_noise10_K10_C4-test',
        study_set='magland_synth-test',
        directory=
        'kbucket://15734439d8cf/groundtruth/magland_synth/datasets_noise10_K10_C4/002_synth'
    )
    recordings = [recording1, recording2]

    # Summarize the recordings
    recordings_B = sa.summarize_recordings(recordings=recordings,
                                           compute_resource=compute_resource)

    # Sorters (algs and params) are defined below
    sorters = define_sorters()

    # We will be assembling the sorting results here
    sorting_results = []

    for sorter in sorters:
        # Sort the recordings
        sortings_A = sa.sort_recordings(sorter=sorter,
                                        recordings=recordings_B,
                                        compute_resource=compute_resource)

        # Summarize the sortings
        sortings_B = sa.summarize_sortings(sortings=sortings_A,
                                           compute_resource=compute_resource)

        # Compare with ground truth
        sortings_C = sa.compare_sortings_with_truth(
            sortings=sortings_B, compute_resource=compute_resource)

        # Append to results
        sorting_results = sorting_results + sortings_C

    # TODO: collect all the units for aggregated analysis

    # Save the output
    print('Saving the output')
    ca.saveObject(key=dict(name='spikeforest_results', output_id=output_id),
                  object=dict(recordings=recordings_B,
                              sorting_results=sorting_results))