def run_cropper(input_path, parameters):
    """
    This function takes in a decoded analysis state and crops it according to
    specified cropping points.

    Args:
        input_path the path of the decoding file

    """

    # Determine output .tif file path
    sql = "SELECT mouse,session,trial,is_rest,decoding_v,cropping_v,input,home_path FROM Analysis WHERE decoding_main=?"
    val = [input_path, ]
    mycursor.execute(sql, val)
    myresult = mycursor.fetchall()
    data = []
    aux = []
    for x in myresult:
        aux = x
    for y in aux:
        data.append(y)

    # update the database
    if data[5] == 0:
        data[5] = 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}"
        output_tif_file_path = f"data/interim/cropping/main/{file_name}.tif"
        sql1 = "UPDATE Analysis SET cropping_main=?,cropping_v=? WHERE decoding_main=? "
        val1 = [output_tif_file_path, data[5], input_path]
        mycursor.execute(sql1, val1)

    else:
        data[5] += 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}"
        output_tif_file_path = f"data/interim/cropping/main/{file_name}.tif"
        sql2 = "INSERT INTO Analysis (cropping_main,cropping_v) VALUES (?,?)"
        val2 = [output_tif_file_path, data[5]]
        mycursor.execute(sql2, val2)
        database.commit()
        sql3 = "UPDATE Analysis SET decoding_main=?,decoding_v=?,mouse=?,session=?,trial=?,is_rest=?,input=?,home_path=? WHERE cropping_main=? AND cropping_v=?"
        val3 = [input_path, data[4], data[0], data[1], data[2], data[3], data[6], data[7], output_tif_file_path,
                data[5]]
        mycursor.execute(sql3, val3)

    # Spatial cropping
    input_path = os.path.join(os.environ['DATA_DIR_LOCAL'], input_path)
    logging.info('Loading movie')
    m = cm.load(input_path)
    logging.info('Loaded movie')

    [x_, _x, y_, _y] = parameters['cropping_points_spatial']

    logging.info('Performing spatial cropping')
    m = m[:, x_:_x, y_:_y]
    logging.info(' Spatial cropping finished')
    output_tif_file_path_full = os.path.join(os.environ['DATA_DIR_LOCAL'], output_tif_file_path)
    # Save the movie
    m.save(output_tif_file_path_full)

    return output_tif_file_path, data[5]
def cropping_interval(mouse):
    """
    This function ask the user for cropping parameters
    """
    # Choose crop parameters
    x1 = int(input("Limit X1 : "))
    x2 = int(input("Limit X2 : "))
    y1 = int(input("Limit Y1 : "))
    y2 = int(input("Limit Y2 : "))
    sql = "UPDATE Analysis SET crop_spatial=?,cropping_points_spatial_x1=?,cropping_points_spatial_x2=?,cropping_points_spatial_y1=?,cropping_points_spatial_y2=?,crop_temporal=?,cropping_points_temporal=? WHERE mouse = ?"
    val = [True, x1, x2, y1, y2, False, None, mouse]
    mycursor.execute(sql, val)
    database.commit()
    parameters_cropping = {'crop_spatial': True, 'cropping_points_spatial': [y1, y2, x1, x2],
                           'crop_temporal': False, 'cropping_points_temporal': [], 'segmentation': True}
    return parameters_cropping
Exemplo n.º 3
0
def run_component_evaluation(input_file,
                             session_wise=False,
                             equalization=False):

    sql = "SELECT source_extraction_session_wise,min_SNR,alignment_main,equalization_main,motion_correction_main,rval_thr,use_cnn FROM Analysis WHERE source_extraction_main=?"
    val = [
        input_file,
    ]
    mycursor.execute(sql, val)
    myresult = mycursor.fetchall()
    data = []
    aux = []
    for x in myresult:
        aux = x
    for y in aux:
        data.append(y)

    if session_wise:
        input_mmap_file_path = data[2]
    elif equalization:
        input_mmap_file_path = data[3]
    else:
        input_mmap_file_path = data[4]

    parameters = {'min_SNR': data[1], 'rval_thr': data[5], 'use_cnn': data[6]}

    data_dir = os.environ['DATA_DIR_LOCAL'] + 'data/interim/component_evaluation/session_wise/' if \
    data[0] else os.environ['DATA_DIR_LOCAL'] + 'data/interim/component_evaluation/trial_wise/'

    sql = "SELECT mouse,session,trial,is_rest,decoding_v,cropping_v,motion_correction_v,alignment_v,equalization_v,source_extraction_v,component_evaluation_v,input,home_path,decoding_main FROM Analysis WHERE source_extraction_main=?"
    val = [
        input_file,
    ]
    mycursor.execute(sql, val)
    result = mycursor.fetchall()
    data = []
    inter = []
    for x in result:
        inter = x
    for y in inter:
        data.append(y)

    # Update the database

    if data[10] == 0:
        data[10] = 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}.{data[7]}.{data[8]}.{data[9]}.{data[10]}"
        output_file_path = f'main/{file_name}.hdf5'
        sql1 = "UPDATE Analysis SET component_evaluation_main=?,component_evaluation_v=? WHERE source_extraction_main=? "
        val1 = [output_file_path, data[10], input_file]
        mycursor.execute(sql1, val1)

    else:
        data[10] += 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}.{data[7]}.{data[8]}.{data[9]}.{data[10]}"
        output_file_path = f'main/{file_name}.hdf5'
        sql2 = "INSERT INTO Analysis (component_evaluation_main,component_evaluation_v) VALUES (?,?)"
        val2 = [output_file_path, data[10]]
        mycursor.execute(sql2, val2)
        database.commit()

    output_file_path_full = data_dir + output_file_path

    # Load CNMF object
    cnm = load_CNMF(input_mmap_file_path)

    # Load the original movie
    Yr, dims, T = cm.load_memmap(input_mmap_file_path)
    images = Yr.T.reshape((T, ) + dims, order='F')

    # Set the parmeters
    cnm.params.set('quality', parameters)

    # Stop the cluster if one exists
    n_processes = psutil.cpu_count()
    try:
        cm.cluster.stop_server()
    except:
        pass

    # Start a new cluster
    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local',
        n_processes=n_processes,
        # number of process to use, if you go out of memory try to reduce this one
        single_thread=False)
    # Evaluate components
    cnm.estimates.evaluate_components(images, cnm.params, dview=dview)

    logging.debug('Number of total components: ', len(cnm.estimates.C))
    logging.debug('Number of accepted components: ',
                  len(cnm.estimates.idx_components))

    # Stop the cluster
    dview.terminate()

    # Save CNMF object
    cnm.save(output_file_path_full)

    return output_file_path
Exemplo n.º 4
0
def run_alignment(mouse, sessions, motion_correction_v, cropping_v, dview):
    """
    This is the main function for the alignment step. It applies methods
    from the CaImAn package used originally in motion correction
    to do alignment.

    """
    for session in sessions:
        # Update the database

        file_name = f"mouse_{mouse}_session_{session}_alignment"
        sql1 = "UPDATE Analysis SET alignment_main=? WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=? "
        val1 = [file_name, mouse, session, motion_correction_v, cropping_v]
        cursor.execute(sql1, val1)

        # Determine the output .mmap file name
        output_mmap_file_path = os.environ[
            'DATA_DIR_LOCAL'] + f'data/interim/alignment/main/{file_name}.mmap'
        sql = "SELECT motion_correction_main  FROM Analysis WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=? "
        val = [mouse, session, motion_correction_v, cropping_v]
        cursor.execute(sql, val)
        result = cursor.fetchall()
        input_mmap_file_list = []
        inter = []
        for x in result:
            inter += x
        for y in inter:
            input_mmap_file_list.append(y)

        sql = "SELECT motion_correction_cropping_points_x1 FROM Analysis WHERE mouse = ? AND session=?AND motion_correction_v =? AND cropping_v=? "
        val = [mouse, session, motion_correction_v, cropping_v]
        cursor.execute(sql, val)
        result = cursor.fetchall()
        x_ = []
        inter = []
        for i in result:
            inter += i
        for j in range(0, len(inter)):
            x_.append(inter[j])

        sql = "SELECT motion_correction_cropping_points_x2 FROM Analysis WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=? "
        val = [mouse, session, motion_correction_v, cropping_v]
        cursor.execute(sql, val)
        result = cursor.fetchall()
        _x = []
        inter = []
        for i in result:
            inter += i
        for j in range(0, len(inter)):
            _x.append(inter[j])

        sql = "SELECT motion_correction_cropping_points_y1 FROM Analysis WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=?"
        val = [mouse, session, motion_correction_v, cropping_v]
        cursor.execute(sql, val)
        result = cursor.fetchall()
        _y = []
        inter = []
        for i in result:
            inter += i
        for j in range(0, len(inter)):
            _y.append(inter[j])

        sql = "SELECT motion_correction_cropping_points_y2 FROM Analysis WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=?"
        val = [mouse, session, motion_correction_v, cropping_v]
        cursor.execute(sql, val)
        result = cursor.fetchall()
        y_ = []
        inter = []
        for i in result:
            inter += i
        for j in range(0, len(inter)):
            y_.append(inter[j])

        new_x1 = max(x_)
        new_x2 = max(_x)
        new_y1 = max(y_)
        new_y2 = max(_y)
        m_list = []
        for i in range(len(input_mmap_file_list)):
            m = cm.load(input_mmap_file_list[i])
            m = m.crop(new_x1 - x_[i], new_x2 - _x[i], new_y1 - y_[i],
                       new_y2 - _y[i], 0, 0)
            m_list.append(m)

        # Concatenate them using the concat function
        m_concat = cm.concatenate(m_list, axis=0)
        fname = m_concat.save(output_mmap_file_path, order='C')

        # MOTION CORRECTING EACH INDIVIDUAL MOVIE WITH RESPECT TO A TEMPLATE MADE OF THE FIRST MOVIE
        logging.info(
            'Performing motion correction on all movies with respect to a template made of the first movie.'
        )
        t0 = datetime.datetime.today()
        # parameters alignment
        sql5 = "SELECT make_template_from_trial,gSig_filt,max_shifts,niter_rig,strides,overlaps,upsample_factor_grid,num_frames_split,max_deviation_rigid,shifts_opencv,use_conda,nonneg_movie, border_nan  FROM Analysis WHERE alignment_main=? "
        val5 = [
            file_name,
        ]
        cursor.execute(sql5, val5)
        myresult = cursor.fetchall()
        para = []
        aux = []
        for x in myresult:
            aux = x
        for y in aux:
            para.append(y)
        parameters = {
            'make_template_from_trial': para[0],
            'gSig_filt': (para[1], para[1]),
            'max_shifts': (para[2], para[2]),
            'niter_rig': para[3],
            'strides': (para[4], para[4]),
            'overlaps': (para[5], para[5]),
            'upsample_factor_grid': para[6],
            'num_frames_split': para[7],
            'max_deviation_rigid': para[8],
            'shifts_opencv': para[9],
            'use_cuda': para[10],
            'nonneg_movie': para[11],
            'border_nan': para[12]
        }
        # Create a template of the first movie
        template_index = parameters['make_template_from_trial']
        m0 = cm.load(input_mmap_file_list[1])
        [x1, x2, y1, y2] = [x_, _x, y_, _y]
        for i in range(len(input_mmap_file_list)):
            m0 = m0.crop(new_x1 - x_[i], new_x2 - _x[i], new_y1 - y_[i],
                         new_y2 - _y[i], 0, 0)
        m0_filt = cm.movie(
            np.array([
                high_pass_filter_space(m_, parameters['gSig_filt'])
                for m_ in m0
            ]))
        template0 = cm.motion_correction.bin_median(
            m0_filt.motion_correct(
                5, 5, template=None)[0])  # may be improved in the future

        # Setting the parameters
        opts = params.CNMFParams(params_dict=parameters)

        # Create a motion correction object
        mc = MotionCorrect(fname, dview=dview, **opts.get_group('motion'))

        # Perform non-rigid motion correction
        mc.motion_correct(template=template0, save_movie=True)

        # Cropping borders
        x_ = math.ceil(
            abs(np.array(mc.shifts_rig)[:, 1].max()
                ) if np.array(mc.shifts_rig)[:, 1].max() > 0 else 0)
        _x = math.ceil(
            abs(np.array(mc.shifts_rig)[:, 1].min()
                ) if np.array(mc.shifts_rig)[:, 1].min() < 0 else 0)
        y_ = math.ceil(
            abs(np.array(mc.shifts_rig)[:, 0].max()
                ) if np.array(mc.shifts_rig)[:, 0].max() > 0 else 0)
        _y = math.ceil(
            abs(np.array(mc.shifts_rig)[:, 0].min()
                ) if np.array(mc.shifts_rig)[:, 0].min() < 0 else 0)

        # Load the motion corrected movie into memory
        movie = cm.load(mc.fname_tot_rig[0])
        # Crop all movies to those border pixels
        movie.crop(x_, _x, y_, _y, 0, 0)
        sql1 = "UPDATE Analysis SET alignment_x1=?, alignment_x2 =?, alignment_y1=?, alignment_y2=? WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=?"
        val1 = [
            x_, _x, y_, _y, mouse, session, motion_correction_v, cropping_v
        ]
        cursor.execute(sql1, val1)

        # save motion corrected and cropped movie
        output_mmap_file_path_tot = movie.save(
            os.environ['DATA_DIR_LOCAL'] +
            f'data/interim/alignment/main/{file_name}.mmap',
            order='C')
        logging.info(
            f' Cropped and saved rigid movie as {output_mmap_file_path_tot}')
        # Remove the remaining non-cropped movie
        os.remove(mc.fname_tot_rig[0])

        # Create a timeline and store it
        sql = "SELECT trial FROM Analysis WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=?"
        val = [mouse, session, motion_correction_v, cropping_v]
        cursor.execute(sql, val)
        result = cursor.fetchall()
        trial_index_list = []
        inter = []
        for i in result:
            inter += i
        for j in range(0, len(inter)):
            trial_index_list.append(inter[j])

        timeline = [[trial_index_list[0], 0]]
        timepoints = [0]
        for i in range(1, len(m_list)):
            m = m_list[i]
            timeline.append(
                [trial_index_list[i], timeline[i - 1][1] + m.shape[0]])
            timepoints.append(timepoints[i - 1] + m.shape[0])
            timeline_pkl_file_path = os.environ[
                'DATA_DIR'] + f'data/interim/alignment/meta/timeline/{file_name}.pkl'
            with open(timeline_pkl_file_path, 'wb') as f:
                pickle.dump(timeline, f)
        sql1 = "UPDATE Analysis SET alignment_timeline=? WHERE mouse = ? AND session=?AND motion_correction_v =? AND cropping_v=? "
        val1 = [
            timeline_pkl_file_path, mouse, session, motion_correction_v,
            cropping_v
        ]
        cursor.execute(sql1, val1)
        timepoints.append(movie.shape[0])

        dt = int((datetime.datetime.today() - t0).seconds /
                 60)  # timedelta in minutes
        sql1 = "UPDATE Analysis SET alignment_duration_concatenation=? WHERE mouse = ? AND session=?AND motion_correction_v =? AND cropping_v=? "
        val1 = [dt, mouse, session, motion_correction_v, cropping_v]
        cursor.execute(sql1, val1)
        logging.info(f' Performed concatenation. dt = {dt} min.')

        ## modify all motion correction file to the aligned version
        data_dir = os.environ[
            'DATA_DIR'] + 'data/interim/motion_correction/main/'
        for i in range(len(input_mmap_file_list)):
            aligned_movie = movie[timepoints[i]:timepoints[i + 1]]
            motion_correction_output_aligned = aligned_movie.save(
                data_dir + file_name + '_els' + '.mmap', order='C')
            sql1 = "UPDATE Analysis SET motion_correct_align=? WHERE motion_correction_meta=? AND motion_correction_v"
            val1 = [
                motion_correction_output_aligned, input_mmap_file_list[i],
                motion_correction_v
            ]
            cursor.execute(sql1, val1)

    database.commit()
    return
def run_equalizer(input_file, session_wise=False):
    """
    This function is meant to help with differences in contrast in different trials and session, to equalize general
    brightness or reduce photobleaching. It corrects the video and saves them in the corrected version. It can be run
    with the already aligned videos or trial by trial. for trial by trial, a template is required.
    """
    # Take all the parameters needed for equalization
    sql5 = "SELECT make_template_from_trial,equalizer,histogram_step FROM Analysis WHERE alignment_main=? OR motion_correction_main =? "
    val5 = [input_file, input_file]
    cursor.execute(sql5, val5)
    myresult = cursor.fetchall()
    para = []
    aux = []
    for x in myresult:
        aux = x
    for y in aux:
        para.append(y)
    parameters = {
        'make_template_from_trial': para[0],
        'equalizer': para[1],
        'histogram_step': para[2]
    }

    # determine the output file
    output_tif_file_path = os.environ[
        'DATA_DIR'] + f'data/interim/equalizer/main/'
    #determine the file name
    sql = "SELECT mouse,session,trial,is_rest,decoding_v,cropping_v,motion_correction_v,alignment_v,equalization_v,input,home_path,decoding_main FROM Analysis WHERE alignment_main =? Or motion_correction_main =?"
    val = [input_file, input_file]
    cursor.execute(sql, val)
    result = cursor.fetchall()
    data = []
    inter = []
    for x in result:
        inter = x
    for y in inter:
        data.append(y)

    # Update the database

    if data[8] == 0:
        data[8] = 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}.{data[7]}.{data[8]}"
        sql1 = "UPDATE Analysis SET equalization_main=?,equalization_v=? WHERE alignment_main=? "
        val1 = [file_name, data[8], input_file]
        cursor.execute(sql1, val1)

    else:
        data[8] += 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}.{data[7]}.{data[8]}"
        sql2 = "INSERT INTO Analysis (motion_correction_meta,motion_correction_v) VALUES (?,?)"
        val2 = [file_name, data[8]]
        cursor.execute(sql2, val2)
        database.commit()
    database.commit()

    input_tif_file_list = os.path.join(os.environ['DATA_DIR_LOCAL'],
                                       input_file)
    movie_original = cm.load(
        input_tif_file_list)  # load video as 3d array already concatenated
    if parameters['make_template_from_trial'] == 0:
        movie_equalized = do_equalization(movie_original)
    else:
        movie_equalized = np.empty_like(movie_original)
        source = movie_original[0:100, :, :]
        # equalize all the videos loads in m_list_reshape with the histogram of source
        for j in range(int(movie_original.shape[0] / 100)):
            want_to_equalize = movie_original[j * 100:(j + 1) * 100, :, :]
            movie_equalized[j * 100:(j + 1) *
                            100, :, :] = do_equalization_from_template(
                                reference=want_to_equalize, source=source)
    #Save the movie
    equalized_path = movie_equalized.save(output_tif_file_path + file_name +
                                          '.mmap',
                                          order='C')

    return output_tif_file_path
Exemplo n.º 6
0
def run_motion_correction(cropping_file, dview):
    """
    This is the function for motion correction. Its goal is to take in a decoded and
    cropped .tif file, perform motion correction, and save the result as a .mmap file.

    This function is only runnable on the cn76 server because it requires parallel processing.

    Args:
        cropping_file: tif file after cropping
        dview: cluster

    Returns:
        row: pd.DataFrame object
            The row corresponding to the motion corrected analysis state.
    """
    # Get output file paths

    data_dir = os.environ['DATA_DIR_LOCAL'] + 'data/interim/motion_correction/'
    sql = "SELECT mouse,session,trial,is_rest,decoding_v,cropping_v,motion_correction_v,input,home_path,decoding_main FROM Analysis WHERE cropping_main=? ORDER BY motion_correction_v"
    val = [
        cropping_file,
    ]
    cursor.execute(sql, val)
    result = cursor.fetchall()
    data = []
    inter = []
    for x in result:
        inter = x
    for y in inter:
        data.append(y)

    # Update the database

    if data[6] == 0:
        data[6] = 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}"
        output_meta_pkl_file_path = f'meta/metrics/{file_name}.pkl'
        sql1 = "UPDATE Analysis SET motion_correction_meta=?,motion_correction_v=? WHERE cropping_main=? "
        val1 = [output_meta_pkl_file_path, data[6], cropping_file]
        cursor.execute(sql1, val1)

    else:
        data[6] += 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}"
        output_meta_pkl_file_path = f'meta/metrics/{file_name}.pkl'
        sql2 = "INSERT INTO Analysis (motion_correction_meta,motion_correction_v) VALUES (?,?)"
        val2 = [output_meta_pkl_file_path, data[6]]
        cursor.execute(sql2, val2)
        database.commit()
        sql3 = "UPDATE Analysis SET decoding_main=?,decoding_v=?,mouse=?,session=?,trial=?,is_rest=?,input=?,home_path=?,cropping_v=?,cropping_main=? WHERE motion_correction_meta=? AND motion_correction_v=?"
        val3 = [
            data[9], data[4], data[0], data[1], data[2], data[3], data[7],
            data[8], data[5], cropping_file, output_meta_pkl_file_path, data[6]
        ]
        cursor.execute(sql3, val3)
    database.commit()
    output_meta_pkl_file_path_full = data_dir + output_meta_pkl_file_path

    # Calculate movie minimum to subtract from movie
    cropping_file_full = os.environ['DATA_DIR_LOCAL'] + cropping_file
    min_mov = np.min(cm.load(cropping_file_full))

    # Apply the parameters to the CaImAn algorithm

    sql5 = "SELECT motion_correct,pw_rigid,save_movie_rig,gSig_filt,max_shifts,niter_rig,strides,overlaps,upsample_factor_grid,num_frames_split,max_deviation_rigid,shifts_opencv,use_conda,nonneg_movie, border_nan  FROM Analysis WHERE cropping_main=? "
    val5 = [
        cropping_file,
    ]
    cursor.execute(sql5, val5)
    myresult = cursor.fetchall()
    para = []
    aux = []
    for x in myresult:
        aux = x
    for y in aux:
        para.append(y)
    parameters = {
        'motion_correct': para[0],
        'pw_rigid': para[1],
        'save_movie_rig': para[2],
        'gSig_filt': (para[3], para[3]),
        'max_shifts': (para[4], para[4]),
        'niter_rig': para[5],
        'strides': (para[6], para[6]),
        'overlaps': (para[7], para[7]),
        'upsample_factor_grid': para[8],
        'num_frames_split': para[9],
        'max_deviation_rigid': para[10],
        'shifts_opencv': para[11],
        'use_cuda': para[12],
        'nonneg_movie': para[13],
        'border_nan': para[14]
    }
    caiman_parameters = parameters.copy()
    caiman_parameters['min_mov'] = min_mov
    opts = params.CNMFParams(params_dict=caiman_parameters)

    # Rigid motion correction (in both cases)

    logging.info('Performing rigid motion correction')
    t0 = datetime.datetime.today()

    # Create a MotionCorrect object

    mc = MotionCorrect([cropping_file_full],
                       dview=dview,
                       **opts.get_group('motion'))

    # Perform rigid motion correction

    mc.motion_correct_rigid(save_movie=parameters['save_movie_rig'],
                            template=None)
    dt = int(
        (datetime.datetime.today() - t0).seconds / 60)  # timedelta in minutes
    logging.info(f' Rigid motion correction finished. dt = {dt} min')

    # Obtain template, rigid shifts and border pixels

    total_template_rig = mc.total_template_rig
    shifts_rig = mc.shifts_rig

    # Save template, rigid shifts and border pixels in a dictionary

    meta_pkl_dict = {
        'rigid': {
            'template': total_template_rig,
            'shifts': shifts_rig,
        }
    }
    sql = "UPDATE Analysis SET duration_rigid=? WHERE motion_correction_meta=? AND motion_correction_v=? "
    val = [dt, output_meta_pkl_file_path, data[6]]
    cursor.execute(sql, val)

    if parameters['save_movie_rig'] == 1:
        # Load the movie saved by CaImAn, which is in the wrong
        # directory and is not yet cropped

        logging.info(f' Loading rigid movie for cropping')
        m_rig = cm.load(mc.fname_tot_rig[0])
        logging.info(f' Loaded rigid movie for cropping')

        # Get the cropping points determined by the maximal rigid shifts

        x_, _x, y_, _y = get_crop_from_rigid_shifts(shifts_rig)

        # Crop the movie

        logging.info(
            f' Cropping and saving rigid movie with cropping points: [x_, _x, y_, _y] = {[x_, _x, y_, _y]}'
        )
        m_rig = m_rig.crop(x_, _x, y_, _y, 0, 0)
        meta_pkl_dict['rigid']['cropping_points'] = [x_, _x, y_, _y]
        sql = "UPDATE Analysis SET motion_correction_cropping_points_x1=?,motion_correction_cropping_points_x2=?,motion_correction_cropping_points_y1=?,motion_correction_cropping_points_y2=? WHERE motion_correction_meta=? AND motion_correction_v=? "
        val = [x_, _x, y_, _y, output_meta_pkl_file_path, data[6]]
        cursor.execute(sql, val)

        # Save the movie

        rig_role = 'alternate' if parameters['pw_rigid'] else 'main'
        fname_tot_rig = m_rig.save(data_dir + rig_role + '/' + file_name +
                                   '_rig' + '.mmap',
                                   order='C')
        logging.info(f' Cropped and saved rigid movie as {fname_tot_rig}')

        # Remove the remaining non-cropped movie

        os.remove(mc.fname_tot_rig[0])

        sql = "UPDATE Analysis SET motion_correction_rig_role=? WHERE motion_correction_meta=? AND motion_correction_v=? "
        val = [fname_tot_rig, output_meta_pkl_file_path, data[6]]
        cursor.execute(sql, val)
        database.commit()

    # If specified in the parameters, apply piecewise-rigid motion correction
    if parameters['pw_rigid'] == 1:
        logging.info(f' Performing piecewise-rigid motion correction')
        t0 = datetime.datetime.today()
        # Perform non-rigid (piecewise rigid) motion correction. Use the rigid result as a template.
        mc.motion_correct_pwrigid(save_movie=True, template=total_template_rig)
        # Obtain template and filename
        total_template_els = mc.total_template_els
        fname_tot_els = mc.fname_tot_els[0]

        dt = int((datetime.datetime.today() - t0).seconds /
                 60)  # timedelta in minutes
        meta_pkl_dict['pw_rigid'] = {
            'template': total_template_els,
            'x_shifts': mc.x_shifts_els,
            'y_shifts': mc.
            y_shifts_els  # removed them initially because they take up space probably
        }

        logging.info(
            f' Piecewise-rigid motion correction finished. dt = {dt} min')

        # Load the movie saved by CaImAn, which is in the wrong
        # directory and is not yet cropped

        logging.info(f' Loading pw-rigid movie for cropping')
        m_els = cm.load(fname_tot_els)
        logging.info(f' Loaded pw-rigid movie for cropping')

        # Get the cropping points determined by the maximal rigid shifts

        x_, _x, y_, _y = get_crop_from_pw_rigid_shifts(
            np.array(mc.x_shifts_els), np.array(mc.y_shifts_els))
        # Crop the movie

        logging.info(
            f' Cropping and saving pw-rigid movie with cropping points: [x_, _x, y_, _y] = {[x_, _x, y_, _y]}'
        )
        m_els = m_els.crop(x_, _x, y_, _y, 0, 0)
        meta_pkl_dict['pw_rigid']['cropping_points'] = [x_, _x, y_, _y]

        # Save the movie

        fname_tot_els = m_els.save(data_dir + 'main/' + file_name + '_els' +
                                   '.mmap',
                                   order='C')
        logging.info(f'Cropped and saved rigid movie as {fname_tot_els}')

        # Remove the remaining non-cropped movie

        os.remove(mc.fname_tot_els[0])

        sql = "UPDATE Analysis SET  motion_correction_main=?, motion_correction_cropping_points_x1=?,motion_correction_cropping_points_x2=?,motion_correction_cropping_points_y1=?,motion_correction_cropping_points_y2=?,duration_pw_rigid=? WHERE motion_correction_meta=? AND motion_correction_v=? "
        val = [
            fname_tot_els, x_, _x, y_, _y, dt, output_meta_pkl_file_path,
            data[6]
        ]
        cursor.execute(sql, val)
        database.commit()

    # Write meta results dictionary to the pkl file

    pkl_file = open(output_meta_pkl_file_path_full, 'wb')
    pickle.dump(meta_pkl_dict, pkl_file)
    pkl_file.close()

    return fname_tot_els, data[6]
Exemplo n.º 7
0
def run_registration(input_file):
    """
    This is the main registering function. Is is supposed to be run after trial wise component evaluation.
    Registration takes over different contours of trial wise source extracted contours and do a matching between cells.
    It can use two different methods: Hungarian matching algorithm (RegisterMulti) (as implement in Giovannucci, et al.
    2019) or cell registration (CellReg)using centroids distance and spatial correlation (as implemented in Sheintuch, et al. 2017).
    Default method is registration with no modeling of distributions of centroids and spatial correlation.

    """
    sql = "SELECT mouse,session,trial,is_rest,decoding_v,cropping_v,motion_correction_v,alignment_v,source_extraction_v,equalization_v,component_evaluation_v,registration_v FROM Analysis WHERE component_evaluation_main=?"
    val = [
        input_file,
    ]
    cursor.execute(sql, val)
    result = cursor.fetchall()
    data = []
    inter = []
    for x in result:
        inter = x
    for y in inter:
        data.append(y)

    # Update the database

    if data[11] == 0:
        data[11] = 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}.{data[7]}.{data[9]}.{data[8]}.{data[10]}.{data[11]}"
        sql1 = "UPDATE Analysis SET motion_correction_meta=?,motion_correction_v=? WHERE cropping_main=? "
        val1 = [file_name, data[11], input_file]
        cursor.execute(sql1, val1)

    else:
        data[6] += 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}"
        sql2 = "INSERT INTO Analysis (motion_correction_meta,motion_correction_v) VALUES (?,?)"
        val2 = [file_name, data[11]]
        cursor.execute(sql2, val2)
        database.commit()

    database.commit()

    if parameters['session_wise'] == False:
        data_dir = os.environ[
            'DATA_DIR'] + 'data/interim/registration/trial_wise/main/'
    else:
        data_dir = os.environ[
            'DATA_DIR'] + 'data/interim/registration/session_wise/main/'

    file_name = db.create_file_name(step_index, row_new.name)
    output_file_path = data_dir + f'{file_name}.pkl'

    ##create the dictionary with metadata information
    output = {
        'main': output_file_path,
        'meta': {
            'analysis': {
                'analyst': os.environ['ANALYST'],
                'date': datetime.datetime.today().strftime("%m-%d-%Y"),
                'time': datetime.datetime.today().strftime("%H:%M:%S")
            },
            'duration': {}
        }
    }

    ## take alignment data for the timeline of alingment
    first_row = df.iloc[0]
    alignmnet_output = eval(first_row['alignment_output'])
    alignment_timeline_file = alignmnet_output['meta']['timeline']

    ## multiple list created to append the relevant information for the registration and creation of a unique time trace
    ## matrix (cnm.estimates.A  and cnm.estimates.C ) both taken after component evaluation
    A_list = []  ## list for contour matrix on multiple trials
    #A_size = []  ## list for the size of A (just to verify it is always the same size)
    FOV_size = [
    ]  ## list for the cn filter dim (to verify it is always the same dims)
    A_number_components = [
    ]  ## list with the total number of components extracted for each trial
    C_dims = []  ## dimension of C, to keep track of timeline
    C_list = []  ## list with traces for each trial
    evaluated_trials = []
    evaluated_session = []
    typical_size = []
    for i in range(len(df)):
        row = df.iloc[i]
        component_evaluation_hdf5_file_path = eval(
            row['component_evaluation_output'])['main']
        corr_path = eval(
            row['source_extraction_output'])['meta']['corr']['main']
        cnm = load_CNMF(component_evaluation_hdf5_file_path)
        cn_filter = np.load(db.get_file(corr_path))

        FOV_size.append(cn_filter.shape)
        #A_size.append(cnm.estimates.A.shape[0])
        A_number_components.append(cnm.estimates.idx_components.shape[0])
        A_list.append(cnm.estimates.A[:, cnm.estimates.idx_components])
        C_dims.append(cnm.estimates.C.shape)
        size = cnm.estimates.A[:, cnm.estimates.idx_components].sum(axis=0)
        for j in range(len(cnm.estimates.idx_components)):
            typical_size.append(size[0, j])
        if cnm.estimates.bl is None:
            C_list.append(cnm.estimates.C[cnm.estimates.idx_components, :])
        else:
            C_list.append(cnm.estimates.C[cnm.estimates.idx_components, :] -
                          cnm.estimates.bl[cnm.estimates.idx_components,
                                           np.newaxis])
        evaluated_trials.append(
            (df.iloc[i].name[2] - 1) * 2 +
            df.iloc[i].name[3])  ## number that goes from 0 to 42
        evaluated_session.append(df.iloc[i].name[1])

    ## add a size restriction on the neurons that will further be processed. This restriction boundary
    # decision is based in the histogram of typical neuronal sizes
    min_size = parameters['min_cell_size']
    max_size = parameters['max_cell_size']
    new_A_list = []
    new_C_list = []
    A_components = []
    C_dims_new = []
    new_evaluated_trials = []
    new_evaluated_session = []
    for i in range(len(A_list)):
        accepted_size = []
        size = A_list[i].sum(axis=0)
        for j in range(size.shape[1]):
            if size[0, j] > 10 and size[0, j] < 25:
                accepted_size.append(j)
        if len(accepted_size) > 1:
            new_A_list.append(A_list[i][:, accepted_size])
            new_C_list.append(C_list[i][accepted_size, :])
            A_components.append(A_number_components[i])
            C_dims_new.append(new_C_list[-1].shape)
            new_evaluated_trials.append(evaluated_trials[i])
            new_evaluated_session.append(evaluated_session[i])
    A_list = new_A_list
    C_list = new_C_list

    ## run CaImAn registration rutine that use the Hungarian matching algorithm in the contours list
    spatial_union, assignments, match = register_multisession(
        A=A_list,
        dims=FOV_size[0],
        thresh_cost=parameters['cost_threshold'],
        max_dist=parameters['max_dist'])

    ## open the timeline and create the new traces matrix C_matrix
    with open(alignment_timeline_file, 'rb') as f:
        timeline = pickle.load(f)
    total_time = timeline[len(timeline) - 1][1] + C_list[len(C_list) -
                                                         1].shape[1]
    timeline.append(['End', total_time])
    C_matrix = np.zeros((spatial_union.shape[1], total_time))

    new_assignments = np.zeros((spatial_union.shape[1], len(timeline)))
    for i in range(spatial_union.shape[1]):
        for j in range(assignments.shape[1]):
            trial = new_evaluated_trials[j]
            if math.isnan(assignments[i, j]) == False:
                new_assignments[i][trial] = assignments[i, j] + 1

    unique_session = []
    for x in evaluated_session:
        if x not in unique_session:
            unique_session.append(x)
    session_vector = np.arange(0, len(unique_session))
    final_evaluated_session = []
    for i in range(assignments.shape[1]):
        for j in range(len(unique_session)):
            if new_evaluated_session[i] == unique_session[j]:
                final_evaluated_session.append(session_vector[j])

    for i in range(spatial_union.shape[1]):
        for j in range(assignments.shape[1]):
            trial = (final_evaluated_session[j] + 1) * new_evaluated_trials[j]
            print(trial)
            if math.isnan(assignments[i, j]) == False:
                C_matrix[i][timeline[trial][1]:timeline[trial][1] +
                            C_dims_new[j][1]] = (
                                C_list[j])[int(assignments[i, j]), :]

    cnm_registration = estimates(A=spatial_union, C=C_matrix)
    with open(output_file_path, 'wb') as output_file:
        pickle.dump(cnm_registration, output_file, pickle.HIGHEST_PROTOCOL)

    return
def run_decoder(mouse, session, trial):
    """

    This is the function for the decoding step. In the decoding step
    files are converted from .raw files to .tif files.

    This function is only usable on the Sebastian's account on the pastiera pc.

    Args:
        mouse, session, trial, is_rest: parameters of interest
    Returns:
        path of the decoded file

    """

    sql = "SELECT input, home_path, is_rest FROM Analysis WHERE mouse =? AND session = ? AND trial = ?"
    val = (mouse, session, trial)
    mycursor.execute(sql, val)
    result = mycursor.fetchall()

    input_raw_file = []
    for row in result:
        input_raw_file += row

    input_raw_file_paths = input_raw_file[1] + input_raw_file[0] + '.raw'

    # create the correct name for the file
    file_name = f"mouse_{mouse}_session_{session}_trial_{trial}_{input_raw_file[2]}.v1"
    output_tif_file_path = f"data/interim/decoding/main/{file_name}.tif"

    # Decoder paths
    py_inscopix = '/home/morgane/anaconda3/envs/inscopix_reader/bin/python'
    decoder = "/home/morgane/src/inscopix_reader_linux/python/downsampler.py"

    # Decoding
    print('Performing decoding on raw file')

    # Convert the output tif file path to the full path such that the downsampler.py script can use them.
    output_tif_file_path_full = os.path.join(os.environ['DATA_DIR_LOCAL'],
                                             output_tif_file_path)

    # Make a command usable by the decoder script (downsampler.py, see the script for more info)

    input_xml_file_path = input_raw_file[1] + input_raw_file[0] + '.xml'

    cmd = ' '.join([
        py_inscopix, decoder, '"' + input_raw_file_paths + '"',
        output_tif_file_path_full, '"' + input_xml_file_path + '"'
    ])

    # Run the command
    subprocess.check_output(cmd, shell=True)

    print('Decoding finished')

    sql1 = "UPDATE Analysis SET decoding_v = ?, decoding_main= ? WHERE mouse = ? AND session = ? AND trial = ? AND is_rest = ?"
    val1 = (1, output_tif_file_path, mouse, session, trial, input_raw_file[2])
    mycursor.execute(sql1, val1)
    database.commit()

    return output_tif_file_path
Exemplo n.º 9
0
def run_source_extraction(input_file, dview):
    """
    This is the function for source extraction.
    Its goal is to take in a .mmap file,
    perform source extraction on it using cnmf-e and save the cnmf object as a .pkl file.
    """

    sql = "SELECT equalization,source_extraction_session_wise,fr,decay_time,min_corr,min_pnr,p,K,gSig,merge_thr,rf,stride,tsub,ssub,p_tsub,p_ssub,low_rank_background,nb,nb_patch,ssub_B,init_iter,ring_size_factor,method_init,method_deconvolution,update_background_components,center_psf,border_pix,normalize_init,del_duplicates,only_init  FROM Analysis WHERE motion_correction_main =?  OR alignment_main = ? OR equalization_main =?"
    val = [input_file, input_file, input_file]
    cursor.execute(sql, val)
    result = cursor.fetchall()
    para = []
    inter = []
    for x in result:
        inter = x
    for y in inter:
        para.append(y)
    gSiz = 4 * para[8] + 1
    parameters = {
        'equalization': para[0],
        'session_wise': para[1],
        'fr': para[2],
        'decay_time': para[3],
        'min_corr': para[4],
        'min_pnr': para[5],
        'p': para[6],
        'K': para[7],
        'gSig': (para[8], para[8]),
        'gSiz': (gSiz, gSiz),
        'merge_thr': para[9],
        'rf': para[10],
        'stride': para[11],
        'tsub': para[12],
        'ssub': para[13],
        'p_tsub': para[14],
        'p_ssub': para[15],
        'low_rank_background': para[16],
        'nb': para[17],
        'nb_patch': para[18],
        'ssub_B': para[19],
        'init_iter': para[20],
        'ring_size_factor': para[21],
        'method_init': para[22],
        'method_deconvolution': para[23],
        'update_background_components': para[24],
        'center_psf': para[25],
        'border_pix': para[26],
        'normalize_init': para[27],
        'del_duplicates': para[28],
        'only_init': para[29]
    }
    # Determine output paths

    if parameters['session_wise']:
        data_dir = os.environ[
            'DATA_DIR_LOCAL'] + 'data/interim/source_extraction/session_wise/'
    else:
        data_dir = os.environ[
            'DATA_DIR_LOCAL'] + 'data/interim/source_extraction/trial_wise/'

    sql1 = "SELECT mouse,session,trial,is_rest,decoding_v,cropping_v,motion_correction_v,alignment_v,equalization_v,source_extraction_v,input,home_path,decoding_main FROM Analysis WHERE  motion_correction_main =?  OR alignment_main = ? OR equalization_main =?"
    val1 = [input_file, input_file, input_file]
    cursor.execute(sql1, val1)
    result = cursor.fetchall()
    data = []
    inter = []
    for x in result:
        inter = x
    for y in inter:
        data.append(y)

    # Update the database

    if data[9] == 0:
        data[9] = 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}.{data[7]}.{data[8]}.{data[9]}"
        output_file_path = data_dir + f'main/{file_name}.hdf5'
        sql1 = "UPDATE Analysis SET source_extraction_main=?,source_extraction_v=? WHERE  motion_correction_main =?  OR alignment_main = ? OR equalization_main =? "
        val1 = [output_file_path, data[9], input_file, input_file, input_file]
        cursor.execute(sql1, val1)

    else:
        data[9] += 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}.{data[7]}.{data[8]}.{data[9]}"
        output_file_path = data_dir + f'main/{file_name}.hdf5'
        sql2 = "INSERT INTO Analysis (source_extraction_main,source_extraction_v) VALUES (?,?)"
        val2 = [output_file_path, data[10]]
        cursor.execute(sql2, val2)
        database.commit()

    database.commit()

    # Load memmory mappable input file
    if os.path.isfile(input_file):
        Yr, dims, T = cm.load_memmap(input_file)
        images = Yr.T.reshape((T, ) + dims, order='F')
    else:
        logging.warning(f' .mmap file does not exist. Cancelling')

    # SOURCE EXTRACTION
    # Check if the summary images are already there
    corr_npy_file_path, pnr_npy_file_path = get_corr_pnr_path(
        gSig_abs=parameters['gSig'][0])

    if corr_npy_file_path != None and os.path.isfile(corr_npy_file_path):
        # Already computed summary images
        logging.info(f' Already computed summary images')
        cn_filter = np.load(corr_npy_file_path)
        pnr = np.load(pnr_npy_file_path)
    else:
        # Compute summary images
        t0 = datetime.datetime.today()
        logging.info(f' Computing summary images')
        cn_filter, pnr = cm.summary_images.correlation_pnr(
            images[::1], gSig=parameters['gSig'][0], swap_dim=False)
        dt = int((datetime.datetime.today() - t0).seconds /
                 60)  # timedelta in minutes
        logging.info(f' Computed summary images. dt = {dt} min')
        # Saving summary images as npy files
        gSig = parameters['gSig'][0]
        corr_npy_file_path = data_dir + f'/meta/corr/{file_name}_gSig_{gSig}.npy'
        pnr_npy_file_path = data_dir + f'/meta/pnr/{file_name}_gSig_{gSig}.npy'
        with open(corr_npy_file_path, 'wb') as f:
            np.save(f, cn_filter)
        with open(pnr_npy_file_path, 'wb') as f:
            np.save(f, pnr)

    # Calculate min, mean, max value for cn_filter and pnr
    corr_min, corr_mean, corr_max = cn_filter.min(), cn_filter.mean(
    ), cn_filter.max()
    pnr_min, pnr_mean, pnr_max = pnr.min(), pnr.mean(), pnr.max()

    # If min_corr and min_pnr are specified via a linear equation, calculate
    # this value
    if type(parameters['min_corr']) == list:
        min_corr = parameters['min_corr'][0] * corr_mean + parameters[
            'min_corr'][1]
        parameters['min_corr'] = min_corr
        logging.info(f' Automatically setting min_corr = {min_corr}')
    if type(parameters['min_pnr']) == list:
        min_pnr = parameters['min_pnr'][0] * pnr_mean + parameters['min_pnr'][1]
        parameters['min_pnr'] = min_pnr
        logging.info(f' Automatically setting min_pnr = {min_pnr}')

    # Set the parameters for caiman
    opts = params.CNMFParams(params_dict=parameters)

    # SOURCE EXTRACTION
    logging.info(f' Performing source extraction')
    t0 = datetime.datetime.today()
    n_processes = psutil.cpu_count()
    logging.info(f' n_processes: {n_processes}')
    cnm = cnmf.CNMF(n_processes=n_processes, dview=dview, params=opts)
    cnm.fit(images)
    cnm.estimates.dims = dims

    # Calculate the center of masses
    cnm.estimates.center = caiman.base.rois.com(cnm.estimates.A,
                                                images.shape[1],
                                                images.shape[2])

    # Save the cnmf object as a hdf5 file
    logging.info(f' Saving cnmf object')
    cnm.save(output_file_path)
    dt = int(
        (datetime.datetime.today() - t0).seconds / 60)  # timedelta in minutes
    logging.info(f' Source extraction finished. dt = {dt} min')

    sql1 = "UPDATE Analysis SET duration_summary_images=?,source_extraction_corr=?, source_extraction_pnr=?, source_extraction_corr_min =?, source_extraction_corr_mean=?, source_extraction_corr_max=?, source_extraction_pnr_min=?,source_extraction_pnr_mean=?,source_extraction_pnr_max=?,source_extraction_k=?,source_extraction_duration=?,min_corr=?,min_pnr=? WHERE source_extraction_main= ? AND source_extraction_v=? "
    val1 = [
        dt, corr_npy_file_path, pnr_npy_file_path, corr_min, corr_mean,
        corr_max, pnr_min, pnr_mean, pnr_max,
        len(cnm.estimates.C), dt, output_file_path, data[9]
    ]
    cursor.execute(sql1, val1)

    return output_file_path, data[9]
Exemplo n.º 10
0
def run_cropper(input_path):
    '''
    This function takes in a decoded analysis state and crops it according to 
    specified cropping points.
    
    Args:
        input_path the path of the decoding file
            
    Returns
        row: pd.DataFrame object
            The row corresponding to the cropped analysis state. 
    '''

    # Determine output .tif file path
    sql = "SELECT mouse,session,trial,is_rest,decoding_v,cropping_v FROM Analysis WHERE decoding_main=%s ORDER BY cropping_v"
    val = [
        input_path,
    ]
    mycursor.execute(sql, val)
    myresult = mycursor.fetchall()
    data = []
    for x in myresult:
        data += x

    file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[5]}.{data[4]}"
    output_tif_file_path = f"data/interim/cropping/main/{file_name}.tif"

    # update the database
    if data[4] == 0:
        data[4] = 1
        sql1 = "UPDATE Analysis SET cropping_main=%s,cropping_v=%s WHERE decoding_main=%s "
        val1 = [output_tif_file_path, data[4], input_path]
        mycursor.execute(sql1, val1)
        database.commit()
    else:
        data[4] += 1
        sql2 = "INSERT INTO Analysis (cropping_main,cropping_v) VALUES (%s,%s)"
        val2 = [output_tif_file_path, data[4]]
        mycursor.execute(sql2, val2)
        database.commit()
        sql3 = "UPDATE Analysis SET decoding_main=%s,decoding_v=%s,mouse=%s,session=%s,trial=%s,is_rest=%s WHERE cropping_main=%s AND cropping_v=%s"
        val3 = [
            input_path, data[5], data[0], data[1], data[2], data[3],
            output_tif_file_path, data[4]
        ]
        mycursor.execute(sql3, val3)
        database.commit()
    # Spatial copping
    logging.info('Loading movie')
    m = cm.load(input_path)
    logging.info('Loaded movie')

    # Choose crop parameters
    x1 = int(input("Limit X1 : "))
    x2 = int(input("Limit X2 : "))
    y1 = int(input("Limit Y1 : "))
    y2 = int(input("Limit Y2 : "))
    sql = "INSERT INTO Analysis (crop_spatial,cropping_points_spatial,crop_temporal,cropping_points_temporal) VALUES (%s,%s,%s,%s) WHERE cropping_main=%s AND cropping_v=%s"
    val = [True, [y1, y2, x1, x2], False, [], output_tif_file_path, data[4]]
    mycursor.execute(sql, val)
    database.commit()

    [x_, _x, y_, _y] = [y1, y2, x1, x2]

    logging.info('Performing spatial cropping')
    m = m[:, x_:_x, y_:_y]
    logging.info(' Spatial cropping finished')

    # Save the movie
    m.save(output_tif_file_path)

    return output_tif_file_path