Пример #1
0
def main(name, arguments):
    parser.prog = name
    options, args = parser.parse_args(arguments)
    args = cli_tools.glob_args(args)
    if options.help:
        print_help()
    if len(args) == 0:
        raise ValueError('Some contour files must be specified!')
    contours = simple_interface.load_contours(args, show_progress = options.show_progress)
    measurements = [m(**kws) for m, kws in options.measurements]
    header, rows = simple_interface.measure_contours(contours, options.show_progress, *measurements)
    datafile.write_data_file([header]+rows, options.output_file)
Пример #2
0
def main(name, arguments):
    parser.prog = name
    options, args = parser.parse_args(arguments)
    args = cli_tools.glob_args(args)
    if len(args) == 0:
        raise ValueError('Some contour files must be specified!')
    contours = simple_interface.load_contours(args, show_progress = options.show_progress)
    shape_model, header, rows, norm_header, norm_rows = simple_interface.make_shape_model(contours, options.variance_explained)
    shape_model.to_file(options.output_prefix + '.contour')
    if options.write_data:
        datafile.write_data_file([header]+rows, options.output_prefix + '-positions.csv')
        datafile.write_data_file([norm_header]+norm_rows, options.output_prefix + '-normalized-positions.csv')
Пример #3
0
def main(name, arguments):
    parser.prog = name
    options, args = parser.parse_args(arguments)
    args = cli_tools.glob_args(args)
    reference = cli_tools.glob_args(options.reference)

    if (len(args) < 2) or (len(args) + len(reference) < 3):
        raise ValueError(
            'A data column and at least two datasets (at least one of which must not be a reference) must be provided!'
        )

    data_column, datasets = args[0], args[1:]

    # if the data column is convertible to an integer, do so and
    # then convert from 1-indexed to 0-indexed
    try:
        data_column = int(data_column)
        data_column -= 1
    except:
        pass

    if options.show_progress:
        datasets = terminal_tools.progress_list(datasets, "Reading input data")
    names, pops = read_files(datasets, data_column)
    if options.show_progress and len(reference) > 0:
        reference = terminal_tools.progress_list(reference,
                                                 "Reading reference data")
    ref_names, ref_pops = read_files(reference, data_column)

    if options.show_progress:
        pb = terminal_tools.IndeterminantProgressBar("Resampling data")
    if len(ref_pops) > 0:
        pvals = ks_resample.compare_to_ref(pops, ref_pops, options.n)
        if len(ref_names) > 1:
            ref_name = 'reference (%s)' % (', '.join(ref_names))
        else:
            ref_name = ref_names[0]
        rows = [[None, 'difference from %s' % ref_name]]
        for name, p in zip(names, pvals):
            rows.append([name, format_pval(p, options.n)])
    else:  # no ref pops
        pvals = ks_resample.symmetric_comparison(pops, options.n)
        rows = [[None] + names]
        for i, (name, p_row) in enumerate(zip(names, pvals)):
            rows.append([name] + [format_pval(p, options.n) for p in p_row])
            rows[-1][i +
                     1] = None  # empty out the self-self comparison diagonal

    datafile.write_data_file(rows, options.output_file)
Пример #4
0
def main(name, arguments):
    parser.prog = name
    options, args = parser.parse_args(arguments)
    args = cli_tools.glob_args(args)
    if options.help:
        print_help()
    if len(args) == 0:
        raise ValueError('Some contour files must be specified!')
    contours = simple_interface.load_contours(
        args, show_progress=options.show_progress)
    measurements = [m(**kws) for m, kws in options.measurements]
    header, rows = simple_interface.measure_contours(contours,
                                                     options.show_progress,
                                                     *measurements)
    datafile.write_data_file([header] + rows, options.output_file)
Пример #5
0
# directory = 'D:/Image_files/Mi9_strongER210+RGECO/'+stim
# parent_dir = path(directory)

# dirs = sorted(os.listdir(parent_dir))

t_dir = 'C:/Users/vymak_i7/Desktop/New_ER/Screen/Mi9_strong/'+stim
target_dir = path(t_dir)

#1) CREATE FILES FROM IMAGE_FILES TO NEW_ER/SCREEN FOLDERS
# for file in dirs[:]: 
    
#     targetname=file
#     os.mkdir(os.path.join(t_dir,targetname))
    
# # """2) Create directories.csv file"""
target_list = sorted(os.listdir(target_dir))
header = ['fly','directory']
dir_list = []

for file in target_list: 
    i=1
    if 'fly' in file:
        
        
        i = file.find('fly') + 3
        fly_no = file[i]
                
        dir_list.append([fly_no,file])

datafile.write_data_file([header]+dir_list,target_dir/'directories.csv')
                        
Пример #6
0
            count_off = count_off-1
            F_off.extend([count_off])
            F_on.extend([F_on])
        else:
            F_on.extend([count_on])
            F_off.extend([count_off])
        frame_labels.extend([count_on*(count_on+count_off)])
        n=n+1
    frame_labels.extend([0])
    # print(frame_labels.index(max(frame_labels)))
    # print(F_on)
    # print(F_off)
    
    plt.plot(frame_labels)
    plt.savefig(directory/'plots/frame_numbers.png')
    plt.show()
    plt.close()
    
    print(count_on)
    print(count_off)
    
    output_array[:,:R.shape[1]] = R
    output_array[:,-1] = frame_labels
    
    OAS = output_array[output_array[:,-1].argsort()]
    i1 = numpy.searchsorted(OAS[:,-1],1)
    
    datafile.write_data_file([header]+list(OAS[i1:,:]),directory/stim_file.namebase+'-frames.csv')


Пример #7
0
    print('# of total mapped RF: ' + str(len(newmap)))
    print('# of total mapped flash: ' + str(len(newfmapCYT) / 2))

    #add unmapped RFs to updated unmapped RFs array
    sub_unmap = np.asarray(sub_unmap)
    sub_funmapCYT = np.asarray(sub_funmapCYT)
    sub_funmapER = np.asarray(sub_funmapER)
    [newunmap.append(sub_unmap[a, :]) for a in range(len(sub_unmap))]
    [
        newfunmapCYT.append(sub_funmapCYT[a, :])
        for a in range(len(sub_funmapCYT))
    ]
    [newfunmapER.append(sub_funmapER[a, :]) for a in range(len(sub_funmapER))]

    #save new mapped and unmapped RFs arrays as .csv files
    datafile.write_data_file([header] + newmap, map_mdir + '/newRF_centers-' +
                             br + '.csv')  #MLB RF centers .csv
    datafile.write_data_file([header] + newunmap,
                             unmap_mdir + '/newRF_centers-' + br +
                             '.csv')  #MLB RF centers .csv
    datafile.write_data_file(
        [headerf] + newfmapER, f_mdir + '/mapped/newavg_flash-' + br +
        '-ER210.csv')  #ER210 flash responses .csv (mapped RFs)
    datafile.write_data_file(
        [headerf] + newfmapCYT, f_mdir + '/mapped/newavg_flash-' + br +
        '-RGECO.csv')  #RGECO flash responses .csv (mapped RFs)
    datafile.write_data_file(
        [headerf] + newfunmapER, f_mdir + '/unmapped/newavg_flash-' + br +
        '-ER210.csv')  #ER210 flash responses .csv (unmapped RFs)
    datafile.write_data_file([headerf] + newfunmapCYT, f_mdir +
                             '/unmapped/newavg_flash-' + br + '-RGECO.csv')
Пример #8
0
            pylab.xticks(numpy.arange(0, T[-1], 5), fontsize=12)
            # pylab.yticks(numpy.arange(ytmin, ytmax, yti), fontsize=12)
            pylab.xlabel('seconds', fontsize=14)
            pylab.ylabel('DF/F', fontsize=14)

            DFF = [row[1], br, n, centx, centy, 'light']
            roi_A = A
            roi_A = numpy.asarray(roi_A)
            # roi_A = smooth(roi_A)
            # roi_A = numpy.asarray(roi_A[:-4])
            baseline = numpy.median(roi_A[:10])
            dff = (roi_A - baseline) / baseline
            DFF.extend(dff)
            pylab.plot(T, dff, color=color, alpha=alphas[1])
            OUT_DFF.append(DFF)
            O_DFF.append(DFF)

            pylab.savefig(plot_dir + '/' + br + '-ROI-' + str(n) +
                          '-ER210.png',
                          dpi=300,
                          bbox_inches='tight')
            #pylab.show()
            pylab.close()
            n = n + 1

        datafile.write_data_file([header_out] + OUT_DFF, m_dir +
                                 'average_responses-' + br + '-ER210.csv')

    datafile.write_data_file([header_out] + O_DFF, parent_m_dir +
                             'average_responses-' + br + '-ER210.csv')
            plt.savefig(unmap_pdir + br + '/' + str(count_unmap).zfill(4) +
                        '-' + str(R[n, 0]) + '-' + br + '-ROI_' +
                        str(R[n, 2]) + '.png',
                        dpi=300,
                        bbox_inches='tight')
            count_unmap = count_unmap + 1

        # plt.show()
        plt.close()

        f = f + 2
        g = g + 1
        n = n + 4

    #Write and save .csv files
    datafile.write_data_file([header_out] + OUT, map_mdir + '/RF_centers-' +
                             br + '.csv')  #MLB RF centers .csv
    datafile.write_data_file([flash_header] + norm_cytf,
                             flash_mdir + '/mapped/average_responses-' + br +
                             '-RGECO.csv')  #flash responses .csv RGECO
    datafile.write_data_file([flash_header] + norm_ERf,
                             flash_mdir + '/mapped/average_responses-' + br +
                             '-ER210.csv')  #flash responses .csv ER210

    #Write up .csv files for unmappable RF centers
    datafile.write_data_file([header_out] + OUT2, unmap_mdir + '/RF_centers-' +
                             br + '.csv')  #MLB RF centers .csv
    datafile.write_data_file([flash_header] + abn_cytf,
                             flash_mdir + '/unmapped/average_responses-' + br +
                             '-RGECO.csv')  #flash responses .csv RGECO
    datafile.write_data_file([flash_header] + abn_ERf,
                             flash_mdir + '/unmapped/average_responses-' + br +
            plt.figure(figsize=(fs, fs))
            ax = plt.subplot(111)  
            ax.spines["top"].set_visible(False)
            ax.spines["right"].set_visible(False)
            ax.get_xaxis().tick_bottom()
            ax.get_yaxis().tick_left()
            # plt.ylim(ymin,ymax)
            plt.xlim(0,T[-1])
            plt.xticks(numpy.arange(0,T[-1],1), fontsize=12)
            # plt.yticks(numpy.arange(ytmin, ytmax, yti), fontsize=12)
            plt.yticks(fontsize=12)
            plt.xlabel('seconds',fontsize = 14)
            plt.ylabel('DF/F',fontsize = 14)
            for i,c,epoch_num in zip(indices,colors,ENUM):
                DFF=[row[1],br,n,centx,centy,epoch_num]
                roi_A = A[i[0]:i[1]]
                baseline = numpy.median(roi_A[:10])
                dff = (roi_A-baseline)/baseline
                DFF.extend(dff)
                ax.plot(T,dff,color = c)
                OUT_DFF.append(DFF)
            plt.savefig(plot_dir+'/'+br+'-RGECO-'+str(n)+'.png',dpi=300,bbox_inches = 'tight')
            # plt.show()
            plt.close()
            
            n=n+1
    
    print(br+' MLB count: '+str(len(OUT_DFF)/4))
    
    datafile.write_data_file([header_out]+OUT_DFF,mdir+'/average_responses-'+br+'.csv')