示例#1
0
def main(args):
    logfile = args['logfile']
    printlog = getattr(flow.Printlog(logfile=logfile), 'print_to_log')

    main_path = "/oak/stanford/groups/trc/data/Brezovec/2P_Imaging/20201221_neural_weighted_behavior/"

    response_files = [
        os.path.join(main_path, file) for file in os.listdir(main_path)
        if 'responses' in file
    ]
    bbb.sort_nicely(response_files)

    responses = []
    for file in response_files:
        responses.append(np.load(file))
    responses = np.asarray(responses)

    responses_split = np.reshape(responses, (49, 2000, 3, 500))
    responses_fft = fft(responses_split, axis=-1)
    responses_fft[:, :, :, 15:23] = 0
    responses_fft[:, :, :, 475:485] = 0
    responses_filtered = ifft(responses_fft, axis=-1)

    to_fit = np.reshape(responses_filtered[:, :, :, :].real,
                        (49 * 2000, 3 * 500))
    printlog('to_fit shape: {}'.format(to_fit.shape))

    printlog('clustering.........')
    model = AgglomerativeClustering(distance_threshold=0,
                                    n_clusters=None,
                                    memory=main_path,
                                    linkage='ward')
    model = model.fit(to_fit)
    printlog('complete!')
示例#2
0
def main():
    # Will look for and get folder that contains __done__
    # If multiple done folders, will get the oldest one (based on alphanumeric sorting)

    imports_path = '/oak/stanford/groups/trc/data/Brezovec/2P_Imaging/imports/build_queue'
    #done_flag = '__done__'
    print('Checking build queue.')
    print('Time: {}'.format(datetime.datetime.now()))
    queued_folders = []
    for item in os.listdir(imports_path):
        print('Found queued folder {}'.format(item))
        #item_path = os.path.join(imports_path, item)
        queued_folders.append(item)
    if len(queued_folders) == 0:
        print('No queued folders found. Raising SystemExit.')
        raise SystemExit
    else:
        bbb.sort_nicely(queued_folders)
        # strip flag and pass directory name
        #folder_stripped = queued_folders[0].strip(done_flag)
        #os.rename(queued_folders[0], folder_stripped)
        #sleep(20) # sleep to give system time to rename folder.
        folder_to_build = os.path.join(
            os.path.split(imports_path)[0], queued_folders[0])
        print('Passing control to build_fly.sh for folder {}'.format(
            folder_to_build))
        os.system('sbatch build_fly.sh {}'.format(folder_to_build))
        os.remove(os.path.join(imports_path, queued_folders[0]))
示例#3
0
def main(args):

    logfile = args['logfile']
    imports_path = args['imports_path']
    printlog = getattr(flow.Printlog(logfile=logfile), 'print_to_log')

    width = 120
    printlog(F"{'Checking build queue':.<{width}}")
    #printlog('Time: {}'.format(datetime.datetime.now()))

    queued_folders = []
    for item in os.listdir(imports_path):
        queued_folders.append(item)
    printlog(F"Found queued folders{str(queued_folders):.>{width-20}}")

    if len(queued_folders) == 0:
        printlog('No queued folders found. Raising SystemExit.')
        raise SystemExit
    else:
        bbb.sort_nicely(queued_folders)
        folder_to_build = os.path.join(
            os.path.split(imports_path)[0], queued_folders[0])
        print(folder_to_build)
        printlog(F"Commencing processing of{folder_to_build:.>{width-24}}")
        printlog(f"{'>   '+str(queued_folders[0])+'   <':-^{width}}")
示例#4
0
def main(args):
    ### Move folders from imports to fly dataset - need to restructure folders ###

    logfile = args['logfile']
    flagged_dir = args['flagged_dir']
    target_path = args['dataset_path']
    fly_dirs = args['fly_dirs']
    printlog = getattr(flow.Printlog(logfile=logfile), 'print_to_log')
    #printlog('\nBuilding flies from directory {}'.format(flagged_dir))
    width = 120
    #printlog(F"\n{'#' * width}\n"
    #         F"{'   Building flies from directory ' + os.path.split(flagged_dir)[-1] + '   ':#^{width}}\n"
    #         F"{'#' * width}")

    # Assume this folder contains fly1 etc
    # This folder may (or may not) contain separate areas # False, now enforcing experiment subfolders
    # Each area will have a T and a Z
    # Avoid grabbing other weird xml files, reference folder etc.
    # Need to move into fly_X folder that reflects it's date

    # get fly folders in flagged directory and sort to ensure correct fly order
    likely_fly_folders = os.listdir(flagged_dir)
    bbb.sort_nicely(likely_fly_folders)
    likely_fly_folders = [i for i in likely_fly_folders if 'fly' in i]
    printlog(F"Found fly folders{str(likely_fly_folders):.>{width-17}}")

    if fly_dirs is not None:
        likely_fly_folders = fly_dirs
        printlog(F"Continuing with only{str(likely_fly_folders):.>{width-20}}")

    for likely_fly_folder in likely_fly_folders:
        if 'fly' in likely_fly_folder:

            new_fly_number = get_new_fly_number(target_path)
            #printlog(f'\n*Building {likely_fly_folder} as fly number {new_fly_number}*')
            printlog(
                f"\n{'   Building '+likely_fly_folder+' as fly_'+ str(new_fly_number) + '   ':-^{width}}"
            )

            # Define source fly directory
            source_fly = os.path.join(flagged_dir, likely_fly_folder)

            # Define destination fly directory
            #fly_time = get_fly_time(source_fly)
            new_fly_folder = 'fly_' + str(new_fly_number)

            destination_fly = os.path.join(target_path, new_fly_folder)
            os.mkdir(destination_fly)
            printlog(F'Created fly directory:{destination_fly:.>{width-22}}')

            # Copy fly data
            copy_fly(source_fly, destination_fly, printlog)

            # Add date to fly.json file
            add_date_to_fly(destination_fly)

            # Add json metadata to master dataset
            add_fly_to_xlsx(destination_fly)
示例#5
0
def main(args):
    ### Move folders from imports to fly dataset - need to restructure folders ###

    flagged_directory = args[0]
    print('Building fly from directory {}'.format(flagged_directory))
    sys.stdout.flush()
    imports_path = '/oak/stanford/groups/trc/data/Brezovec/2P_Imaging/imports'
    target_path = '/oak/stanford/groups/trc/data/Brezovec/2P_Imaging/20190101_walking_dataset/'

    # Assume this folder contains fly_1 etc
    # This folder may (or may not) contain separate areas # False, now enforcing experiment subfolders
    # Each area will have a T and a Z
    # Avoid grabbing other weird xml files, reference folder etc.
    # Need to move into fly_X folder that reflects it's date

    # Get new destination fly number by looking at last 2 char of current flies
    current_fly_number = get_new_fly_number(target_path)

    # get fly folders in flagged directory and sort to ensure correct fly order
    likely_fly_folders = os.listdir(flagged_directory)
    print('Found fly folders: {}'.format(likely_fly_folders))
    sys.stdout.flush()
    bbb.sort_nicely(likely_fly_folders)

    for likely_fly_folder in likely_fly_folders:
        if 'fly' in likely_fly_folder:
            print('This fly will be number: {}'.format(current_fly_number))
            print('Creating fly from directory: {}'.format(likely_fly_folder))
            sys.stdout.flush()

            # Define source fly directory
            source_fly = os.path.join(flagged_directory, likely_fly_folder)

            # Define destination fly directory
            #fly_time = get_fly_time(source_fly)
            new_fly_folder = 'fly_' + str(current_fly_number)
            destination_fly = os.path.join(target_path, new_fly_folder)
            os.mkdir(destination_fly)
            print('Created fly directory: {}'.format(destination_fly))
            sys.stdout.flush()

            # Copy fly data
            copy_fly(source_fly, destination_fly)

            # Add date to fly.json file
            add_date_to_fly(destination_fly)

            # Add json metadata to master dataset
            add_fly_to_xlsx(destination_fly)

            # Get new fly number
            current_fly_number += 1
def main():
    root_directory = '/oak/stanford/groups/trc/data/Brezovec/2P_Imaging/20190101_walking_dataset/'
    fly_folders = [os.path.join(root_directory,x) for x in os.listdir(root_directory) if 'fly' in x and int(x.split('_')[-1]) in [1,2,3,4,5,6,7,8,9]]
    bbb.sort_nicely(fly_folders)
    fly_folders = fly_folders[::-1]
    #fly_folders = [os.path.join(root_directory, 'fly_1')]
    for fly in fly_folders:
        expt_folders = []
        expt_folders = [os.path.join(fly,x) for x in os.listdir(fly) if 'func' in x]
        if len(expt_folders) > 0:
            for expt_folder in expt_folders:
                xml_file = os.path.join(expt_folder, 'imaging', 'functional.xml')
                create_imaging_json(xml_file)
def main():
    print('Performing X on all flies')
    root_directory = '/oak/stanford/groups/trc/data/Brezovec/2P_Imaging/20190101_walking_dataset/'
    fly_folders = [
        os.path.join(root_directory, x) for x in os.listdir(root_directory)
        if 'fly' in x
    ]
    bbb.sort_nicely(fly_folders)
    fly_folders = fly_folders[::-1]
    #fly_folders = [os.path.join(root_directory, 'fly_52')]
    for fly in fly_folders:
        expt_folders = []
        expt_folders = [
            os.path.join(fly, x) for x in os.listdir(fly) if 'func' in x
        ]
        if len(expt_folders) > 0:
            for expt_folder in expt_folders:
                create_imaging_json(
                    os.path.join(expt_folder, 'imaging', 'functional.xml'))
示例#8
0
def add_date_to_fly(destination_fly):
    ''' get date from xml file and add to fly.json'''

    ### Get date
    try:  # Check if there are func folders
        # Get func folders
        func_folders = [
            os.path.join(destination_fly, x)
            for x in os.listdir(destination_fly) if 'func' in x
        ]
        bbb.sort_nicely(func_folders)
        func_folder = func_folders[0]
        # Get full xml file path
        xml_file = os.path.join(func_folder, 'imaging', 'functional.xml')
    except:  # Use anatomy folder
        # Get anat folders
        anat_folders = [
            os.path.join(destination_fly, x)
            for x in os.listdir(destination_fly) if 'anat' in x
        ]
        bbb.sort_nicely(anat_folders)
        anat_folder = anat_folders[0]
        # Get full xml file path
        xml_file = os.path.join(anat_folder, 'imaging', 'anatomy.xml')
    # Extract datetime
    datetime_str, _, _ = get_datetime_from_xml(xml_file)
    # Get just date
    date = datetime_str.split('-')[0]
    time = datetime_str.split('-')[1]

    ### Add to fly.json
    json_file = os.path.join(destination_fly, 'fly.json')
    with open(json_file, 'r+') as f:
        metadata = json.load(f)
        metadata['date'] = str(date)
        metadata['time'] = str(time)
        f.seek(0)
        json.dump(metadata, f, indent=4)
        f.truncate()
示例#9
0
def main(args):
    filename = '/oak/stanford/groups/trc/data/Brezovec/2P_Imaging/20190101_walking_dataset/master_2P.xlsx'
    wb = load_workbook(filename=filename, read_only=False)
    ws = wb.active

    root_directory = '/oak/stanford/groups/trc/data/Brezovec/2P_Imaging/20190101_walking_dataset/'

    if args[0] == 'all':
        fly_folders = [
            os.path.join(root_directory, x) for x in os.listdir(root_directory)
            if 'fly' in x
        ]
        bbb.sort_nicely(fly_folders)
    else:
        fly_folders = [os.path.join(root_directory, 'fly_' + str(args[0]))]

    for fly_folder in fly_folders:
        print(fly_folder)
        # If no fly.json, just skip (atleast one fly in empty for some reason)
        try:
            fly_file = os.path.join(fly_folder, 'fly.json')
            fly_data = load_json(fly_file)
        except:
            continue

        expt_folders = []
        expt_folders = [
            os.path.join(fly_folder, x) for x in os.listdir(fly_folder)
            if 'func' in x
        ]
        bbb.sort_nicely(expt_folders)
        for expt_folder in expt_folders:

            expt_file = os.path.join(expt_folder, 'expt.json')
            expt_data = load_json(expt_file)

            # Occasionally a fly may not have an imaging folder (if only fictrac was recorded for example)
            try:
                scan_file = os.path.join(expt_folder, 'imaging', 'scan.json')
                scan_data = load_json(scan_file)
                scan_data['x_voxel_size'] = '{:.1f}'.format(
                    scan_data['x_voxel_size'])
                scan_data['y_voxel_size'] = '{:.1f}'.format(
                    scan_data['y_voxel_size'])
                scan_data['z_voxel_size'] = '{:.1f}'.format(
                    scan_data['z_voxel_size'])
            except:
                scan_data['laser_power'] = None
                scan_data['PMT_green'] = None
                scan_data['PMT_red'] = None
                scan_data['x_dim'] = None
                scan_data['y_dim'] = None
                scan_data['z_dim'] = None
                scan_data['x_voxel_size'] = None
                scan_data['y_voxel_size'] = None
                scan_data['z_voxel_size'] = None

            visual_file = os.path.join(expt_folder, 'visual', 'visual.json')
            try:
                visual_data = load_json(visual_file)
                visual_input = visual_data[0]['name'] + ' ({})'.format(
                    len(visual_data))
            except:
                visual_input = None

            # Get fly_id
            fly_folder = os.path.split(os.path.split(expt_folder)[0])[-1]
            fly_id = fly_folder.split('_')[-1]

            # Get expt_id
            expt_id = expt_folder.split('_')[-1]

            # Append the new row
            new_row = []
            new_row = [
                int(fly_id),
                int(expt_id), fly_data['date'], expt_data['brain_area'],
                fly_data['genotype'], visual_input, None, fly_data['notes'],
                expt_data['notes'], expt_data['time'],
                fly_data['circadian_on'], fly_data['circadian_off'],
                fly_data['gender'], fly_data['age'], fly_data['temp'],
                scan_data['laser_power'], scan_data['PMT_green'],
                scan_data['PMT_red'], scan_data['x_dim'], scan_data['y_dim'],
                scan_data['z_dim'], scan_data['x_voxel_size'],
                scan_data['y_voxel_size'], scan_data['z_voxel_size']
            ]

            #if visual_data is not None:
            #    new_row['visual_stimuli'] = visual_data
            #if fictrac_data is not None:
            #    new_row['fictrac'] = fictrac

            ws.append(new_row)

    # Save the file
    filename = '/oak/stanford/groups/trc/data/Brezovec/2P_Imaging/20190101_walking_dataset/master_2P.xlsx'
    wb.save(filename)
示例#10
0
def main(args):
    logfile = args['logfile']
    printlog = getattr(flow.Printlog(logfile=logfile), 'print_to_log')

    n_clusters = 2000
    labels_file = '/oak/stanford/groups/trc/data/Brezovec/2P_Imaging/20201129_super_slices/cluster_labels.npy'
    cluster_model_labels = np.load(labels_file)

    ################################
    ### Load and correct filters ###
    ################################
    main_path = "/oak/stanford/groups/trc/data/Brezovec/2P_Imaging/20210316_neural_weighted_behavior"

    response_files = [
        os.path.join(main_path, file) for file in os.listdir(main_path)
        if 'responses' in file
    ]
    bbb.sort_nicely(response_files)

    responses = []
    for file in response_files:
        responses.append(np.load(file))
    responses = np.asarray(responses)
    responses.shape

    responses_split = np.reshape(responses, (49 - 18, 2000, 4, 500))
    responses_fft = fft(responses_split, axis=-1)
    responses_fft[:, :, :, 15:23] = 0
    responses_fft[:, :, :, 475:485] = 0
    responses_filtered = ifft(responses_fft, axis=-1)
    responses_filtered = responses_filtered.real

    #################
    ### NORMALIZE ###
    #################
    all_signals = np.reshape(responses_filtered[:, :, :, :],
                             (31 * 2000, 4, 500))
    # (62000,4,500)
    # want to exclude signals that have no response to any behavior
    # each behavior type will have a different distribution of sum(abs(signal))
    all_sums = np.sum(np.abs(all_signals), axis=-1)

    # (62000,4)
    # how to pick the threshold for each behavior?
    # lets aim to use corr map sig threshold,
    # but for now take top 25%
    thresholds = np.percentile(all_sums, 75, axis=0)
    all_signals[(all_sums < thresholds[np.newaxis, :])] = 0

    all_maxs = np.max(np.abs(all_signals), axis=-1)
    all_signals_normalized = all_signals / all_maxs[:, :, np.newaxis]
    all_signals_normalized = np.nan_to_num(all_signals_normalized)

    to_fit = np.reshape(all_signals_normalized, (62000, 2000))

    ###############
    ### CLUSTER ###
    ###############
    t0 = time.time()
    printlog('clustering.........')
    model = AgglomerativeClustering(distance_threshold=0,
                                    n_clusters=None,
                                    memory=main_path,
                                    linkage='ward')
    model = model.fit(to_fit)
    printlog('complete!')
    printlog(str(time.time() - t0))

    ### Create linkage matrix for dendrogram
    counts = np.zeros(model.children_.shape[0])
    n_samples = len(model.labels_)
    for i, merge in enumerate(model.children_):
        current_count = 0
        for child_idx in merge:
            if child_idx < n_samples:
                current_count += 1  # leaf node
            else:
                current_count += counts[child_idx - n_samples]
        counts[i] = current_count

    linkage_matrix = np.column_stack(
        [model.children_, model.distances_, counts]).astype(float)

    sys.setrecursionlimit(70000)
    test = dendrogram(
        linkage_matrix,
        truncate_mode=None,
        p=10,
        color_threshold=None,
        #link_color_func=lambda x: colors[x],
        no_labels=True,
        distance_sort=True,
        no_plot=True)

    printlog('did not fail!')
示例#11
0
def main(args):
    print('Stitcher started.')
    directory = args.directory
    print('directory: {}'.format(directory))

    if args.datadir:
        directory = datadir_appender.datadir_appender(directory)

    # directory will contain motcorr_green_x.nii and motcorr_red_x.nii
    # get list of reds and greens
    reds = []
    greens = []
    for item in os.listdir(directory):
        # sanity check that it is .nii
        if '.nii' in item:
            if 'red' in item:
                reds.append(item)
            elif 'green' in item:
                greens.append(item)

    # need to order correctly for correct stitching
    bbb.sort_nicely(greens)
    bbb.sort_nicely(reds)

    # add directory path
    reds = [os.path.join(directory, x) for x in reds]
    greens = [os.path.join(directory, x) for x in greens]

    if args.channels == 'rg':
        colors = ['red', 'green']
        channels = [reds, greens]
        print('Using red and green channels.')
    elif args.channels == 'r':
        colors = ['red']
        channels = [reds]
        print('Using red channel.')
    elif args.channels == 'g':
        colors = ['green']
        channels = [greens]
        print('Using green channel.')
    elif args.channels is None:
        colors = ['red', 'green']
        channels = [reds, greens]
        print('Using red and green channels.')

    ### load brains ###
    # This part in based on the input argparse
    for i, channel in enumerate(channels):
        brains = []
        for brain_file in channel:
            brain = bbb.load_numpy_brain(brain_file)

            # Handle edgecase of single volume brain
            if len(np.shape(brain)) == 3:
                brain = brain[:, :, :, np.newaxis]
            print('shape of partial brain: {}'.format(np.shape(brain)))
            brains.append(brain)

        print('brains len: {}'.format(len(brains)))
        stitched_brain = np.concatenate(brains, axis=-1)
        print('stitched_brain shape: {}'.format(np.shape(stitched_brain)))
        save_file = os.path.join(directory,
                                 'stitched_brain_{}.nii'.format(colors[i]))
        bbb.save_brain(save_file, stitched_brain)
        stitched_brain = None

        # delete partial brains
        [os.remove(file) for file in channel]

    ### Stitch motcorr params and create motcorr graph
    # get motcorr param files
    motcorr_param_files = []
    for item in os.listdir(directory):
        if '.npy' in item:
            file = os.path.join(directory, item)
            motcorr_param_files.append(file)
    bbb.sort_nicely(motcorr_param_files)

    # Load motcorr param files (needed to sort first)
    motcorr_params = []
    for file in motcorr_param_files:
        motcorr_params.append(np.load(file))

    if len(motcorr_params) > 0:
        stitched_params = np.concatenate(motcorr_params, axis=0)
        save_file = os.path.join(directory, 'motcorr_params_stitched')
        np.save(save_file, stitched_params)
        [os.remove(file) for file in motcorr_param_files]
        xml_dir = os.path.join(os.path.split(directory)[0], 'imaging')
        print('directory: {}'.format(directory))
        print('xml_dir: {}'.format(xml_dir))
        sys.stdout.flush()
        bbb.save_motion_figure(stitched_params, xml_dir, directory)
    else:
        print('Empty motcorr params - skipping saving moco figure.')

    ### START Z-SCORING ###
    os.system("sbatch zscore.sh {}".format(directory))
示例#12
0
def add_fly_to_xlsx(fly_folder):

    # Load xlsx
    xlsx_path = '/oak/stanford/groups/trc/data/Brezovec/2P_Imaging/20190101_walking_dataset/master_2P.xlsx'
    wb = load_workbook(filename=xlsx_path, read_only=False)
    ws = wb.active

    # If no fly.json, just skip (atleast one fly in empty for some reason)
    try:
        fly_file = os.path.join(fly_folder, 'fly.json')
        fly_data = load_json(fly_file)
    except:
        return

    expt_folders = []
    expt_folders = [
        os.path.join(fly_folder, x) for x in os.listdir(fly_folder)
        if 'func' in x
    ]
    bbb.sort_nicely(expt_folders)
    for expt_folder in expt_folders:

        expt_file = os.path.join(expt_folder, 'expt.json')
        expt_data = load_json(expt_file)

        # Occasionally a fly may not have an imaging folder (if only fictrac was recorded for example)
        try:
            scan_file = os.path.join(expt_folder, 'imaging', 'scan.json')
            scan_data = load_json(scan_file)
            scan_data['x_voxel_size'] = '{:.1f}'.format(
                scan_data['x_voxel_size'])
            scan_data['y_voxel_size'] = '{:.1f}'.format(
                scan_data['y_voxel_size'])
            scan_data['z_voxel_size'] = '{:.1f}'.format(
                scan_data['z_voxel_size'])
        except:
            scan_data['laser_power'] = None
            scan_data['PMT_green'] = None
            scan_data['PMT_red'] = None
            scan_data['x_dim'] = None
            scan_data['y_dim'] = None
            scan_data['z_dim'] = None
            scan_data['x_voxel_size'] = None
            scan_data['y_voxel_size'] = None
            scan_data['z_voxel_size'] = None

        visual_file = os.path.join(expt_folder, 'visual', 'visual.json')
        try:
            visual_data = load_json(visual_file)
            visual_input = visual_data[0]['name'] + ' ({})'.format(
                len(visual_data))
        except:
            visual_input = None

        # Get fly_id
        fly_folder = os.path.split(os.path.split(expt_folder)[0])[-1]
        fly_id = fly_folder.split('_')[-1]

        # Get expt_id
        expt_id = expt_folder.split('_')[-1]

        # Append the new row
        new_row = []
        new_row = [
            int(fly_id),
            int(expt_id), fly_data['date'], expt_data['brain_area'],
            fly_data['genotype'], visual_input, None, fly_data['notes'],
            expt_data['notes'], expt_data['time'], fly_data['circadian_on'],
            fly_data['circadian_off'], fly_data['gender'], fly_data['age'],
            fly_data['temp'], scan_data['laser_power_max'],
            scan_data['PMT_green'], scan_data['PMT_red'], scan_data['x_dim'],
            scan_data['y_dim'], scan_data['z_dim'], scan_data['x_voxel_size'],
            scan_data['y_voxel_size'], scan_data['z_voxel_size']
        ]

        ws.append(new_row)

    # Save the file
    wb.save(xlsx_path)