예제 #1
0
def main():

    # Example use case of processing detections
    # Load probability map
    metadataFN = '/Users/anish/Documents/Connectome/SynapseAnalysis/data/M247514_Rorb_1/Site3Align2/site3_metadata.json'
    metadata = syn.loadMetadata(metadataFN)

    queryFN = metadata['querylocation']

    # List of Queries
    listOfQueries = syn.loadQueriesJSON(queryFN)

    for n in range(0, len(listOfQueries)):
        fn = os.path.join(metadata['datalocation'], 'resultVol')
        fn = fn + str(n) + '.npy'

        resultVol = np.load(fn)
        print(fn)

        pd.probMapToJSON(resultVol, metadata, listOfQueries[n], n)
예제 #2
0
def main():
    """
    Evaluation of site3 synapse detection results
    """

    # Load metadata
    metadataFN = 'site3_metadata.json'
    metadata = syn.loadMetadata(metadataFN)
    outputJSONlocation = metadata['outputJSONlocation']

    queryFN = metadata['querylocation']
    evalparam = metadata['evalparam']

    # List of Queries
    listOfQueries = syn.loadQueriesJSON(queryFN)
    listOfThresholds = []
    listOfThresholds_to_text = []
    listOfQueries_to_text = []
    listofevals = []
    thresh_list = [0.7, 0.8, 0.9]
    # Evaluate each query individually
    for n, query in enumerate(listOfQueries):
        listOfThresholds.append(query['thresh'])

        for thresh in thresh_list:
            listOfThresholds_to_text.append(thresh)
            query['thresh'] = thresh
            print(query)
            listOfQueries_to_text.append(query)
            queryresult = pd.combineResultVolumes([n], [thresh], metadata,
                                                  evalparam)
            listofevals.append(queryresult)

    pd.printEvalToText(listofevals, listOfQueries_to_text,
                       listOfThresholds_to_text)

    # Combine Queries
    evaluation_parameters = metadata['evalparam']

    pd.combineResultVolumes(list(range(0, len(listOfQueries))),
                            listOfThresholds, metadata, evaluation_parameters)
예제 #3
0
def main():
    """
    Site3 Synaptograms
    """

    metadata_fn = '/Users/anish/Documents/Connectome/SynapseAnalysis/data/M247514_Rorb_1/Site3Align2/site3_metadata_dev.json'
    metadata = syn.loadMetadata(metadata_fn)
    query_fn = metadata['querylocation']
    listOfQueries = syn.loadQueriesJSON(query_fn)
    evalargs = metadata['evalparam']

    # listOfThresholds = []
    # for query in listOfQueries:
    #     listOfThresholds.append(query['thresh'])

    # listOfQueryNumbers = list(range(0, len(listOfQueries)))
    listOfThresholds = [0.8, 0.7, 0.7]
    listOfQueryNumbers = [0, 2, 4]

    queryresult = pd.combineResultVolumes(listOfQueryNumbers, listOfThresholds,
                                          metadata, evalargs)

    data_location = metadata['datalocation']
    outputpath = '/Users/anish/Documents/Connectome/Synaptome-Duke/data/collman17/Site3Align2Stacks/synaptograms/'
    stack_list = [
        'results', 'PSD95', 'synapsin', 'VGlut1', 'GluN1', 'GABA', 'Gephyrin',
        'TdTomato'
    ]
    text_x_offset = 0
    text_y_offset = 5
    win_xy = 4
    win_z = 1

    generateResultTiffStacks(listOfQueryNumbers, listOfThresholds,
                             data_location, metadata['outputNPYlocation'])

    synaptogram_args = {
        'win_xy': win_xy,
        'win_z': win_z,
        'data_location': data_location,
        'stack_list': stack_list,
        'text_x_offset': text_x_offset,
        'text_y_offset': text_y_offset,
        'outputpath': outputpath
    }

    # Detected synapses (True Positives)
    # detected_annotations = queryresult['detected_annotations']
    # synaptogram_args['outputpath'] = os.path.join(outputpath, 'true_positive_detections')
    # for counter, synapse in enumerate(detected_annotations):
    #     synaptogram.synapseAnnoToSynaptogram(synapse, synaptogram_args)

    # False negatives
    missed_annotations = queryresult['missed_annotations']
    synaptogram_args['outputpath'] = os.path.join(outputpath, 'false_negative')
    for counter, synapse in enumerate(missed_annotations):
        synaptogram.synapseAnnoToSynaptogram(synapse, synaptogram_args)

    # False positive detections
    false_positives = queryresult['false_positives']
    synaptogram_args['outputpath'] = os.path.join(outputpath, 'false_positive')
    for synapse in false_positives:
        synaptogram.synapseAnnoToSynaptogram(synapse, synaptogram_args)
예제 #4
0
def main():
    """
    run site 3 synapse detection
    """
    # Load metadata
    metadata_fn = '/Users/anish/Documents/Connectome/SynapseAnalysis/data/M247514_Rorb_1/Site3Align2/site3_metadata.json'
    #metadata_fn = 'site3_metadata_TdTomato.json'
    metadata = syn.loadMetadata(metadata_fn)

    datalocation = metadata['datalocation']
    outputNPYlocation = metadata['outputNPYlocation']
    query_fn = metadata['querylocation']

    # List of Queries
    listOfQueries = syn.loadQueriesJSON(query_fn)
    print("Number of Queries: ", len(listOfQueries))

    for n in range(0, len(listOfQueries)):
        query = listOfQueries[n]
        print(query)

        # Load the data
        synapticVolumes = da.loadTiffSeriesFromQuery(query, datalocation)

        # Run Synapse Detection
        # Takes ~5 minutes to run
        resultVol = syn.getSynapseDetections(synapticVolumes, query)

        # Save the probability map to file, if you want
        syn.saveresultvol(resultVol, outputNPYlocation, 'resultVol', n)

        # Save the thresholded results as annotation objects
        # in a json file
        # pd.probMapToJSON(resultVol, metadata, query, n)
    """
    Evaluation of site3 synapse detection results
    """

    # Load metadata
    metadataFN = 'site3_metadata.json'
    metadata = syn.loadMetadata(metadataFN)
    outputJSONlocation = metadata['outputJSONlocation']

    queryFN = metadata['querylocation']
    evalparam = metadata['evalparam']

    # List of Queries
    listOfQueries = syn.loadQueriesJSON(queryFN)
    listOfThresholds = []
    listOfThresholds_to_text = []
    listOfQueries_to_text = []
    listofevals = []
    thresh_list = [0.7, 0.8, 0.9]
    # Evaluate each query individually
    for n, query in enumerate(listOfQueries):
        listOfThresholds.append(query['thresh'])

        for thresh in thresh_list:
            listOfThresholds_to_text.append(thresh)
            query['thresh'] = thresh
            print(query)
            listOfQueries_to_text.append(query)
            queryresult = pd.combineResultVolumes([n], [thresh], metadata,
                                                  evalparam)
            listofevals.append(queryresult)

    pd.printEvalToText(listofevals, listOfQueries_to_text,
                       listOfThresholds_to_text)

    # Combine Queries
    evaluation_parameters = metadata['evalparam']

    pd.combineResultVolumes(list(range(0, len(listOfQueries))),
                            listOfThresholds, metadata, evaluation_parameters)