Example #1
0
def raj_cropper(csv_file):
    ### IMPORT ###
    data = pd.read_csv(csv_file)

    ### BOUNDING ###
    # Remove the excess first column, an artifact from exporting a csv.
    data = data.drop('Unnamed: 0', axis=1)

    # Filters the data set by the area to remove files that are above or below the thresholding limits.
    data = area_filter(data, 1, 2)
    data = data.reset_index()
    data = data.drop('index', axis=1)

    ### PROCESSING ###
    # Generate a file list from the data. As there are repeated filenames this compares the next file name to the current
    # and if they are different adds the next filename to the list.
    file_list = [data["file_name"][0]]
    for i in range(0, data.shape[0] - 1):
        if data["file_name"][i + 1] == data["file_name"][i]:
            continue
        else:
            file_list.append(data["file_name"][i + 1])

    # Create an empty array to add the data to.
    cutout_dataframe = pd.DataFrame(
        columns=['frame', 'X', 'Y', 'filename', 'cutout_array'])

    for i in range(0, len(file_list)):
        img = genr.load_img(file_list[i])  # loads in the file
        print(file_list[i])
        for j in range(0, data.shape[0]):
            if data["file_name"][j] == file_list[i]:
                y = data["centroid-0"][j]
                x = data["centroid-1"][j]
                print(x, y)

                cutouts = np.array(square_area(img, x, y, 7))
                cutout_current = pd.DataFrame(
                    {
                        'frame': [j],
                        'X': [x],
                        'Y': [y],
                        'filename': [file_list[i]],
                        'cutout_array': [cutouts]
                    },
                    index=["{}".format(j)])
                cutout_dataframe = pd.concat(
                    [cutout_dataframe, cutout_current],
                    axis=0)  # Final dataframe.

    # Currently Data is offset by the rounding issue with the localisation. Data needs to be adjusted to match.
    cutout_dataframe.to_csv('particle_position_crops.csv')
    return cutout_dataframe
Example #2
0
def matt_cropper(csv_pathname='{}/panda_data_molecule_locs{}'.format(
    os.getcwd(), '.csv')):
    # Iterate through indices in imported locations dataframe
    molecules, save_data = load_csv(csv_pathname)

    file = ""

    for index in molecules.index:

        # If the file name for current index does not match the current working file, load the file
        if file is not "{}".format(molecules.loc[index, "file_name"]):
            file = genr.load_img("{}".format(molecules.loc[index,
                                                           "file_name"]))

        # Define crop area from centre of mass data in imported locations table
        centre_y = int(np.floor(molecules.loc[index, "centroid-0"]))
        centre_x = int(np.floor(molecules.loc[index, "centroid-1"]))

        # Call cropping function
        molecule_crop = point_crop(file, centre_y, centre_x, 7)

        # Define variables for saving out data
        file_nm, centroid_one, centroid_zero = molecules.loc[
            index, "file_name"], molecules.loc[
                index, "centroid-1"], molecules.loc[index, "centroid-0"]

        # Format variables into a pandas series to allow saving to dataframe
        molecule_crop_id = pd.Series({
            "file_name": file_nm,
            "x-coord": centroid_one,
            "y-coord": centroid_zero,
            "cutout_square": molecule_crop
        })

        # Convert series to dataframe, with index relating to current value of index variable
        save_row = pd.DataFrame([molecule_crop_id], index=["{}".format(index)])

        # Concatenate the data from this iteration to the created data frame
        save_data = pd.concat([save_row, save_data], axis=0)

    # Save finished dataframe to .csv in working directory
    save_data.sort_index(axis=0)
    save_data.to_csv("molecules_positions_crops.csv")
    return save_data
Example #3
0
# For each file in the above list, execute the chosen filter and threshold and then save it out as a numpy array

a = 0  # Set up a counter

localised_data = pd.DataFrame(
    columns=['area', 'centroid-0', 'centroid-1', 'file_name'])

folder = "{}/storm_output_data".format(params["directory:"])
if not os.path.exists(folder):
    os.mkdir(folder)  # Makes a directory is there isn't one there.

for name in file_list:
    a += 1
    file_name = "{}/{}".format(params["directory:"], name)
    print(file_name)
    img = genr.load_img(file_name)

    ### FILTERING ###
    # This takes the data and the filter params information and pulls out the relevant information to choose which
    # function to run. Based on the "filter type:", and uses the parameters a and b as required.
    # Matt, at the moment it does not account for your above if statement.

    filtered_data = filters.filter_switcher(img, params)

    ### THRESHOLDING ###
    # As above, with switcher adapted to thresholds
    thresholded_data = thresholds.threshold_switcher(filtered_data, params)

    # np.save('{}/thresholded_img_{}_{}'.format(folder, a, datetime.datetime.now()), thresholded_data)

    ### LOCALISATION ###