Exemple #1
0
def data_augmentation(input_image, output_image):
    # Data augmentation
    input_image, output_image = utils.random_crop(input_image, output_image,
                                                  args.crop_height,
                                                  args.crop_width)

    if args.h_flip and random.randint(0, 1):
        input_image = cv2.flip(input_image, 1)
        output_image = cv2.flip(output_image, 1)
    if args.v_flip and random.randint(0, 1):
        input_image = cv2.flip(input_image, 0)
        output_image = cv2.flip(output_image, 0)
    if args.brightness:
        factor = 1.0 + random.uniform(-1.0 * args.brightness, args.brightness)
        table = np.array([((i / 255.0) * factor) * 255
                          for i in np.arange(0, 256)]).astype(np.uint8)
        input_image = cv2.LUT(input_image, table)
    if args.rotation:
        angle = random.uniform(-1 * args.rotation, args.rotation)
    if args.rotation:
        M = cv2.getRotationMatrix2D(
            (input_image.shape[1] // 2, input_image.shape[0] // 2), angle, 1.0)
        input_image = cv2.warpAffine(
            input_image,
            M, (input_image.shape[1], input_image.shape[0]),
            flags=cv2.INTER_NEAREST)
        output_image = cv2.warpAffine(
            output_image,
            M, (output_image.shape[1], output_image.shape[0]),
            flags=cv2.INTER_NEAREST)

    return input_image, output_image
Exemple #2
0
    def __data_generator(self, batch_samples):
        # initialize images and labels tensors for faster processing
        X = np.empty((len(batch_samples), *self.img_crop_dims, 3))
        y = np.empty((len(batch_samples), self.n_classes))

        for i, sample in enumerate(batch_samples):
            # load and randomly augment image
            img_file = os.path.join(
                self.img_dir, '{}.{}'.format(sample['image_id'],
                                             self.img_format))

            img = utils.load_image(img_file, self.img_load_dims)
            if img is not None:
                img = utils.random_crop(img, self.img_crop_dims)
                img = utils.random_horizontal_flip(img)
                X[i, ] = img

            # normalize labels
            y[i, ] = utils.normalize_labels(sample['label'])

        # apply basenet specific preprocessing
        # input is 4D numpy array of RGB values within [0, 255]
        X = self.basenet_preprocess(X)

        return X, y
    def test_random_crop(self, mock_np_random_randint):
        mock_np_random_randint.return_value = 1

        test_img = np.expand_dims(np.array([[0, 255], [0, 255]]), axis=2)
        crop_dims = (1, 1)
        cropped_img = utils.random_crop(test_img, crop_dims)
        self.assertEqual([255], cropped_img)
Exemple #4
0
def data_augmentation(input_image, output_image):
    # Data augmentation
    # crop a patch sampling cell bodies with a sampling pdf (cell==1 has weight 10000 and cell == 0 has weight 1)

    if h_flip and random.randint(0, 1):
        input_image = cv2.flip(input_image, 1)
        output_image = cv2.flip(output_image, 1)
    if v_flip and random.randint(0, 1):
        input_image = cv2.flip(input_image, 0)
        output_image = cv2.flip(output_image, 0)
    if brightness:
        factor = 1.0 + random.uniform(-1.0 * args.brightness, args.brightness)
        table = np.array([((i / 255.0) * factor) * 255
                          for i in np.arange(0, 256)]).astype(np.uint8)
        input_image = cv2.LUT(input_image, table)
    if rotation:
        angle = random.uniform(-1 * rotation, rotation)
    if rotation:
        M = cv2.getRotationMatrix2D(
            (input_image.shape[1] // 2, input_image.shape[0] // 2), angle, 1.0)
        input_image = cv2.warpAffine(
            input_image,
            M, (input_image.shape[1], input_image.shape[0]),
            flags=cv2.INTER_NEAREST)
        output_image = cv2.warpAffine(
            output_image,
            M, (output_image.shape[1], output_image.shape[0]),
            flags=cv2.INTER_NEAREST)
    input_image, output_image = utils.random_crop(input_image, output_image,
                                                  crop_height, crop_width)
    return input_image, output_image
Exemple #5
0
    def test_random_crop(self, mock_np_random_randint):
        mock_np_random_randint.return_value = 1

        test_img = np.expand_dims(np.array([[0, 255], [0, 255]]), axis=2)
        crop_dims = (1, 1)
        cropped_img = utils.random_crop(test_img, crop_dims)
        self.assertEqual([255], cropped_img)
Exemple #6
0
def data_augmentation(input_image, output_image, args, backgroundValue=None):
    if args.downscale_factor and args.downscale_factor != 1:
        #Downscale image
        dim = (int(input_image.shape[0] * args.downscale_factor),
               int(input_image.shape[1] * args.downscale_factor))
        input_image = cv2.resize(input_image,
                                 dim,
                                 interpolation=cv2.INTER_CUBIC)
        output_image = cv2.resize(output_image,
                                  dim,
                                  interpolation=cv2.INTER_NEAREST)
        #These interpolations are same as used by Darea in matlab when preparing for prediction

    input_image, output_image = utils.random_crop(input_image, output_image,
                                                  args.crop_height,
                                                  args.crop_width,
                                                  args.biased_crop,
                                                  backgroundValue)

    if args.h_flip and random.randint(0, 1):
        input_image = cv2.flip(input_image, 1)
        output_image = cv2.flip(output_image, 1)
    if args.v_flip and random.randint(0, 1):
        input_image = cv2.flip(input_image, 0)
        output_image = cv2.flip(output_image, 0)
    if args.brightness:
        factor = 1.0 + random.uniform(-1.0 * args.brightness, args.brightness)
        table = np.array([((i / 255.0) * factor) * 255
                          for i in np.arange(0, 256)]).astype(np.uint8)
        input_image = cv2.LUT(input_image, table)
    if args.rotation:
        #Does not work
        #network does not improve during training when specified
        #FIX ME!
        angle = random.uniform(-1 * args.rotation, args.rotation)
        M = cv2.getRotationMatrix2D(
            (input_image.shape[1] // 2, input_image.shape[0] // 2), angle, 1.0)
        input_image = cv2.warpAffine(
            input_image,
            M, (input_image.shape[1], input_image.shape[0]),
            flags=cv2.INTER_NEAREST)
        output_image = cv2.warpAffine(
            output_image,
            M, (output_image.shape[1], output_image.shape[0]),
            flags=cv2.INTER_NEAREST)
    if args.rotation_perpendicular:
        angle = math.floor(random.uniform(0, 4))  #Random value between 0 and 3

        for i in range(angle):
            input_image = np.rot90(input_image)
            output_image = np.rot90(output_image)


#        if angle:
#            input_image=np.rot90(input_image,angle)
#            output_image=np.rot90(output_image,angle)

    return input_image, output_image
Exemple #7
0
def data_augmentation(input_image, output_image):
    # Data augmentation
    input_image, output_image = utils.random_crop(input_image, output_image,
                                                  args.crop_height,
                                                  args.crop_width)

    if args.move and random.randint(0, 1):
        gray = cv2.cvtColor(cv2.UMat(input_image), cv2.COLOR_BGR2GRAY)
        gray = 255 * (gray.get() < 128).astype(np.uint8)
        coords = cv2.findNonZero(gray)
        x, y, w, h = cv2.boundingRect(coords)
        crop_input = input_image[y:y + h, x:x + w]
        crop_output = output_image[y:y + h, x:x + w]
        input_image = np.full(input_image.shape, 255)
        new_coordinate = (random.randint(
            0, input_image.shape[0] - crop_input.shape[0]),
                          random.randint(
                              0, input_image.shape[1] - crop_input.shape[1]))
        input_image[new_coordinate[0]:new_coordinate[0] + crop_input.shape[0],
                    new_coordinate[1]:new_coordinate[1] +
                    crop_input.shape[1]] = crop_input
        output_image[new_coordinate[0]:new_coordinate[0] +
                     crop_output.shape[0],
                     new_coordinate[1]:new_coordinate[1] +
                     crop_output.shape[1]] = crop_output
    if args.h_flip and random.randint(0, 1):
        input_image = cv2.flip(input_image, 1)
        output_image = cv2.flip(output_image, 1)
    if args.v_flip and random.randint(0, 1):
        input_image = cv2.flip(input_image, 0)
        output_image = cv2.flip(output_image, 0)

    if args.brightness:
        factor = 1.0 + random.uniform(-1.0 * args.brightness, args.brightness)
        table = np.array([((i / 255.0) * factor) * 255
                          for i in np.arange(0, 256)]).astype(np.uint8)
        input_image = cv2.LUT(input_image, table)
    if args.rotation:
        angle = random.uniform(-1 * args.rotation, args.rotation)
    if args.rotation:
        M = cv2.getRotationMatrix2D(
            (input_image.shape[1] // 2, input_image.shape[0] // 2), angle, 1.0)
        input_image = cv2.warpAffine(
            input_image,
            M, (input_image.shape[1], input_image.shape[0]),
            flags=cv2.INTER_NEAREST)
        output_image = cv2.warpAffine(
            output_image,
            M, (output_image.shape[1], output_image.shape[0]),
            flags=cv2.INTER_NEAREST)

    return input_image, output_image
    def prepare(self):
        # Load PTB-XL data
        self.data, self.raw_labels = utils.load_dataset(
            self.datafolder, self.sampling_frequency)

        # Preprocess label data
        self.labels = utils.compute_label_aggregations(self.raw_labels,
                                                       self.datafolder,
                                                       self.task)

        # Select relevant data and convert to one-hot
        self.data, self.labels, self.Y, _ = utils.select_data(
            self.data, self.labels, self.task, self.min_samples,
            self.outputfolder + self.experiment_name + '/data/')
        self.input_shape = self.data[0].shape

        # 10th fold for testing (9th for now)
        self.X_test = self.data[self.labels.strat_fold == self.test_fold]
        self.y_test = self.Y[self.labels.strat_fold == self.test_fold]
        # 9th fold for validation (8th for now)
        self.X_val = self.data[self.labels.strat_fold == self.val_fold]
        self.y_val = self.Y[self.labels.strat_fold == self.val_fold]
        # rest for training
        self.X_train = self.data[self.labels.strat_fold <= self.train_fold]
        self.y_train = self.Y[self.labels.strat_fold <= self.train_fold]

        print('val>>', self.X_val.shape)
        print('test>>', self.X_test.shape)

        # random crop trainset and slide cut validation/test
        self.X_train = utils.random_crop(self.X_train, fs=100, crops=1)
        self.y_train = utils.remark_label(self.y_train, crops=1)

        self.X_val, self.y_val, self.pid_val = utils.slide_and_cut(
            self.X_val, self.y_val, window_size=250, stride=125)
        self.X_test, self.y_test, self.pid_test = utils.slide_and_cut(
            self.X_test, self.y_test, window_size=250, stride=125)

        # Preprocess signal data
        self.X_train, self.X_val, self.X_test = utils.preprocess_signals(
            self.X_train, self.X_val, self.X_test,
            self.outputfolder + self.experiment_name + '/data/')
        self.n_classes = self.y_train.shape[1]

        # save train and test labels
        self.y_train.dump(self.outputfolder + self.experiment_name +
                          '/data/y_train.npy')
        self.y_val.dump(self.outputfolder + self.experiment_name +
                        '/data/y_val.npy')
        self.y_test.dump(self.outputfolder + self.experiment_name +
                         '/data/y_test.npy')
    def __data_generator(self, batch_samples):
        # initialize images and labels tensors for faster processing
        X = np.empty((len(batch_samples), *self.img_crop_dims, 3))
        y = np.empty((len(batch_samples), self.n_classes))

        for i, sample in enumerate(batch_samples):
            # load and randomly augment image
            img_file = os.path.join(self.img_dir, '{}.{}'.format(sample['image_id'], self.img_format))
            img = utils.load_image(img_file, self.img_load_dims)
            if img is not None:
                img = utils.random_crop(img, self.img_crop_dims)
                img = utils.random_horizontal_flip(img)
                X[i, ] = img

            # normalize labels
            y[i, ] = utils.normalize_labels(sample['label'])

        # apply basenet specific preprocessing
        # input is 4D numpy array of RGB values within [0, 255]
        X = self.basenet_preprocess(X)

        return X, y
Exemple #10
0
class_scores_list = []
precision_list = []
recall_list = []
f1_list = []
iou_list = []
run_times_list = []

# Run testing on ALL test images
for ind in range(len(test_input_names)):
    sys.stdout.write("\rRunning test image %d / %d"%(ind+1, len(test_input_names)))
    sys.stdout.flush()

    input_image = utils.load_image(test_input_names[ind])
    gt = utils.load_image(test_output_names[ind])
    if args.crop_height and args.crop_width:
        input_image,gt = utils.random_crop(input_image,gt,args.crop_height,args.crop_width)
    input_image,gt = utils.input_resize(input_image,gt,args.input_height,args.input_width)
    input_image = np.expand_dims(np.float32(input_image), axis=0) / 255.0
    gt = helpers.reverse_one_hot(helpers.one_hot_it(gt, label_values))

    # input_image = np.expand_dims(np.float32(utils.load_image(test_input_names[ind])[:args.crop_height, :args.crop_width]),axis=0)/255.0
    # gt = utils.load_image(test_output_names[ind])[:args.crop_height, :args.crop_width]
    # gt = helpers.reverse_one_hot(helpers.one_hot_it(gt, label_values))

    st = time.time()
    output_image = sess.run(network,feed_dict={net_input:input_image})

    run_times_list.append(time.time()-st)

    output_image = np.array(output_image[0,:,:,:])
    output_image = helpers.reverse_one_hot(output_image)