Beispiel #1
0
    def __init__(self,
                 imageId=None,
                 datasetsPath=None,
                 model=None,
                 gaussian=False,
                 rollingSize=0) -> None:
        if not os.path.exists(PREDS_BASE_PATH):
            os.makedirs(PREDS_BASE_PATH)
        if not model:
            args = sys.argv[1:]
            model = args[args.index('--model') +
                         1] if '--model' in args else DEFAULT_MODEL

        self._datasetsPath = datasetsPath
        self._imageReader = ImageReader(imageId=imageId,
                                        datasetsPath=datasetsPath,
                                        rollingSize=rollingSize)
        self._modelPath = model
        self._useGaussian = gaussian  # wheather to use gaussian filter for images or not
        self._rollingSize = rollingSize

        self._X = self._Y = self._model = None

        self.load_X_Y()
        self.load_model()
def load_data(classesFromFile=False, imageId=IMAGE_ID, datasetsPath=DATASETS_PATH, gaussianFilter=False, rolledImages=False):
    imageReader = ImageReader(imageId=imageId, datasetsPath=datasetsPath)
    images = imageReader.get_full_padded_images(gaussianFilter=gaussianFilter)

    if rolledImages:
        images = get_rolled_images(images)

    imageIdxs = list(range(images.shape[0]))

    classes = imageReader.get_frame_wise_class_gmm(imageIdxs, classesFromFile=classesFromFile)

    # Transform targets to keras compatible format
    num_classes = 2
    Y = np_utils.to_categorical(classes, num_classes)

    # preprocess data
    X = images.astype('float32')
    X = X / 255.0

    # Split train / test data
    X_train, X_valid, Y_train, Y_valid = train_test_split(X, Y, test_size=0.2, random_state=1)
    print(f"Prop of sparks in train dataset: {round(Y_train.sum(axis=0)[1]/Y_train.shape[0]*100, 2)}, "
          f"and in validation dataset: {round(Y_valid.sum(axis=0)[1]/Y_valid.shape[0]*100, 2)}")

    return X_train, Y_train, X_valid, Y_valid
Beispiel #3
0
def load_data(imageId,
              datasetsPath=DATASETS_PATH,
              gaussianFilter=False,
              rollingSize=0):
    imageReader = ImageReader(imageId=imageId,
                              datasetsPath=datasetsPath,
                              rollingSize=rollingSize)
    images = imageReader.get_full_images(gaussianFilter=gaussianFilter)
    classes = imageReader.get_pixel_wise_classification()

    # Transform targets to keras compatible format
    num_classes = 2
    Y = np_utils.to_categorical(classes, num_classes)

    # preprocess data
    X = images.astype('float32')
    X = X / 255.0

    # Split train / test data
    X_train, X_valid, Y_train, Y_valid = train_test_split(X,
                                                          Y,
                                                          test_size=0.2,
                                                          random_state=1)
    # print(f"Prop of sparks in train dataset: {round(Y_train.sum(axis=0)[1]/Y_train.shape[0]*100, 2)}, "
    #       f"and in validation dataset: {round(Y_valid.sum(axis=0)[1]/Y_valid.shape[0]*100, 2)}")

    return X_train, Y_train, X_valid, Y_valid
import pandas as pd
import matplotlib.pyplot as plt
import os

from deepcardio_utils import ImageReader, get_mask, plot_cell

if __name__ == '__main__':
    imageReader = ImageReader()
    images = imageReader.get_full_images()
    confsDF, detSparksDF = imageReader.get_spark_simple_data()

    plotsFolderPath = os.path.join(imageReader.get_image_folder(),
                                   imageReader.get_image_id() + '_xyt_size')
    if not os.path.exists(plotsFolderPath):
        os.makedirs(plotsFolderPath)

    xytList = []
    for sparkIdx in range(int(confsDF.loc[0, 'Surviving sparks'])):
        frameIni = int(detSparksDF.loc[sparkIdx, :].tolist()[3]) - 25
        frameFin = int(detSparksDF.loc[sparkIdx, :].tolist()[3]) + 25
        sparkX = int(detSparksDF.loc[sparkIdx, 'Xpix'])
        sparkY = int(detSparksDF.loc[sparkIdx, 'Ypix'])
        maskSize = detSparksDF.loc[sparkIdx, 'FWHM'] / float(
            confsDF.loc[:, 'Pixel size(um)'])
        mask = get_mask(images.shape[1], images.shape[2], sparkX, sparkY,
                        maskSize)

        res = []
        for i in range(frameIni, frameFin + 1):
            res.append(images[i][:, :, 2][mask].mean())
Beispiel #5
0
    m = sparkPredMasksL[0]
    ints = []
    for ii in range(45, 60):
        ints.append(images[ii][:, :, 2][m].mean())
    plt.figure(figsize=(20, 5))
    plt.plot(ints)
    plt.show()

    # plot spark images
    images = imageReader.get_full_images()
    classes = imageReader.get_frame_wise_class_gmm()
    idx = np.random.choice(np.arange(classes.shape[0])[classes == 1], 1)[0]
    plot_cell(images[idx])

    ir1 = ImageReader(imageId='2021-01-23_02-02-14_gen_images')
    ir2 = ImageReader(imageId='2021-03-24_00-35-00_TLeif__synthetic')

    images1 = ir1.get_full_images()
    plot_cell(images1[40])
    plot_cell(cv2.resize(images1[40], (images1.shape[2], 75)))

    images2 = ir2.get_full_images()
    plot_cell(images2[1])
    plot_cell(cv2.resize(images2[1], (images1.shape[2], images1.shape[1])))

    # classes = imageReader.get_frame_wise_class_gmm()
    # pixelClass = imageReader.get_pixel_wise_classification()
    #
    # idx = 17
    # im = images[idx].copy()
Beispiel #6
0
def image_id_selected(imageId, bPath):
    GLOB_DICT['imageReader'] = ImageReader(imageId=imageId, datasetsPath=bPath)
    return len(GLOB_DICT['imageReader'].get_images_names())
Beispiel #7
0
# We add all Plotly and Dash necessary librairies
import plotly.graph_objects as go

import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_daq as daq
from dash.dependencies import Input, Output, State
import plotly.express as px

from deepcardio_utils import ImageReader, get_plottable_image
from pred.utils import FrameWisePredictor, PixelWisePredictor, get_clustered_pred_sparks, get_intensity_heatmap

GLOB_IMG_READER = ImageReader('example_dataset')
GLOB_DICT = {}


###############################################################################

app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP, "assets/custom-style.css"])


# Modal
with open("dash_app/readme.md", "r") as f:
    howto_md = f.read()

modal_overlay = dbc.Modal(
    [
        dbc.ModalBody(html.Div([dcc.Markdown(howto_md)], id="howto-md")),
Beispiel #8
0
class BasePredictor(ABC):
    def __init__(self,
                 imageId=None,
                 datasetsPath=None,
                 model=None,
                 gaussian=False,
                 rollingSize=0) -> None:
        if not os.path.exists(PREDS_BASE_PATH):
            os.makedirs(PREDS_BASE_PATH)
        if not model:
            args = sys.argv[1:]
            model = args[args.index('--model') +
                         1] if '--model' in args else DEFAULT_MODEL

        self._datasetsPath = datasetsPath
        self._imageReader = ImageReader(imageId=imageId,
                                        datasetsPath=datasetsPath,
                                        rollingSize=rollingSize)
        self._modelPath = model
        self._useGaussian = gaussian  # wheather to use gaussian filter for images or not
        self._rollingSize = rollingSize

        self._X = self._Y = self._model = None

        self.load_X_Y()
        self.load_model()

    @abstractmethod
    def load_X_Y(self):
        pass

    @abstractmethod
    def load_model(self):
        pass

    @abstractmethod
    def get_preds_filename(self):
        pass

    def get_model_id(self):
        return os.path.splitext(os.path.basename(self._modelPath))[0]

    def get_X_Y(self):
        return self._X, self._Y

    def get_image_reader(self):
        return self._imageReader

    def get_preds_dirname(self):
        dirn = os.path.join(PREDS_BASE_PATH, self._imageReader.get_image_id(),
                            self.get_model_id())
        if not os.path.exists(dirn):
            os.makedirs(dirn)
        return dirn

    def get_preds_file_path(self):
        return os.path.join(self.get_preds_dirname(),
                            self.get_preds_filename())

    @abstractmethod
    def predict(self, forcePrediction=False):
        pass

    @abstractmethod
    def get_prediction_frame(self, idx, image, Y_pred):
        pass

    def generate_prediction_frames(self, forcePrediction=False, videoSize=1):
        Y_pred = self.predict(forcePrediction=forcePrediction)
        images = self._imageReader.get_full_images(
            gaussianFilter=self._useGaussian)

        if not os.path.exists(os.path.join(self.get_preds_dirname(),
                                           'figures')):
            os.makedirs(os.path.join(self.get_preds_dirname(), 'figures'))
        video_path = os.path.join(self.get_preds_dirname(), 'pred_labels.avi')
        height, width, _ = self._imageReader.get_shape()
        videoWidth = width * videoSize
        videoHeight = (height * 2 + 1) * videoSize
        video = cv2.VideoWriter(video_path, 0, 30, (videoWidth, videoHeight))

        for idx, image in enumerate(images):
            if idx % 100 == 0:
                print(f"labeling frame {idx}/{len(images)}")

            image = self.get_prediction_frame(idx, image, Y_pred)

            fig = plt.figure(figsize=(20, 3))
            plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
            plt.axis('off')
            plt.savefig(os.path.join(self.get_preds_dirname(), 'figures',
                                     str(idx).zfill(5)),
                        bbox_inches='tight',
                        pad_inches=0)
            plt.close(fig)
            video.write(cv2.resize(image, (videoWidth, videoHeight)))

        cv2.destroyAllWindows()
        video.release()
import cv2
# from tensorflow import keras
import keras
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from keras import backend as K
import tensorflow as tf
import matplotlib.cm as cm

from deepcardio_utils import ImageReader

if __name__ == '__main__':
    args = sys.argv[1:]
    imageReader = ImageReader()
    assert '--model' in args, 'USAGE: --model path_to_model.h5 [--imageid imageid]'
    pathToModel = args[args.index('--model') + 1]

    X = imageReader.get_full_padded_images()
    # preprocess data
    X = X.astype('float32')
    X = X / 255.0

    Y = imageReader.get_frame_wise_class_gmm(classesFromFile=True)

    inceptionv3 = keras.applications.InceptionV3(include_top=True,
                                                 weights=None,
                                                 classes=2,
                                                 input_shape=X[0].shape)
    inceptionv3.load_weights(pathToModel)
Beispiel #10
0
import cv2
import sys
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

from deepcardio_utils import ImageReader

MX = 120
MY = 800
BASE_PATH = '../_datasets/deepcardio/Blas_data'

if __name__ == '__main__':
    imageReader = ImageReader('170215_RyR-GFP30_RO_01_Serie2_SPARKS-calcium')
    imagesShape = imageReader.get_full_images()[0].shape

    folder = sys.argv[1]
    outputFolderPath = os.path.join('../_datasets/deepcardio',
                                    f"Blas_data_{folder}")
    if not os.path.exists(outputFolderPath):
        os.makedirs(outputFolderPath)

    for f in sorted(os.listdir(os.path.join(BASE_PATH, folder))):
        s = pd.read_csv(os.path.join(BASE_PATH, folder, f),
                        header=None)  # , sep='       ')
        im = s.to_numpy().reshape(MX, MY, order='F')
        res_im = np.zeros(imagesShape)
        res_im[:, :, 2] = cv2.resize(im,
                                     dsize=(imagesShape[1], imagesShape[0]))
        res_im = (res_im * 255.).astype(np.uint8)
Beispiel #11
0
from skimage.filters import gaussian
from math import sqrt, pi, e

from deepcardio_utils import ImageReader, get_mask

N_FRAMES = 2000
SPARKS_N_FRAMES = (2, 10)
SPARKS_SIZE_SIGMA = (0.2, 0.5)
SPARKS_NOISE_SIGMA = (0.3, 2)
SPARK_PROP = 0.15 / 2.  # 0.146 / 10.
PEPPER_THRESHOLD = (0.25, 0.7)

VERBOSE_SPARKS_FILE = 'verboseSparks.csv'

if __name__ == '__main__':
    imageReader = ImageReader()
    images = imageReader.get_full_images()
    shp = images[0].shape
    isLeif = 'Leif' in imageReader.get_image_id()

    # cell mask
    cellMask = imageReader.get_cellmask(images)

    # synthetic no-spark gen
    noisyGen = imageReader.background_noise_images_generator(multichannel=True)
    # synthetic spark
    sparkGen = imageReader.spark_images_generator(multichannel=True,
                                                  saltAndPepper=isLeif)

    timeID = datetime.datetime.now(
        pytz.timezone('Europe/Madrid')).strftime('%Y-%m-%d_%H-%M-%S')
from deepcardio_utils import ImageReader, plot_cell

if __name__ == '__main__':
    imageReader = ImageReader(rollingSize=3)
    images = imageReader.get_full_images()
    classes = imageReader.get_frame_wise_class_gmm()
    pixelClass = imageReader.get_pixel_wise_classification()

    idx = 3
    im = images[idx].copy()
    im[:, :, 1] = pixelClass[idx] * 50
    plot_cell(im)