Пример #1
0
def get_data_training(img_hdf5, msk_hdf5, patch_h, patch_w, N_subimgs,
                      inside_FOV):
    train_img = load_hdf5(img_hdf5)
    train_msk = load_hdf5(msk_hdf5)
    consistency_check(train_img, train_msk)

    patches_train_img, patches_train_msk = extract_random(train_img,
                                                          train_msk,
                                                          patch_h,
                                                          patch_w,
                                                          N_subimgs,
                                                          inside=inside_FOV)
    consistency_check(patches_train_img, patches_train_msk)
    #patches_train_msk = mask_transform(patches_train_msk)
    return patches_train_img, patches_train_msk
Пример #2
0
def visualize_predicts(predicts, images_path, labels, masks, height, width,
                       name):
    """
    right: green
    error: red
    missing: blue
    """
    if not path.exists('./logs/' + name + '/predicts/'):
        system('mkdir ./logs/' + name + '/predicts/')

    images = load_hdf5(images_path + 'test_images.hdf5')

    pad_height = labels.shape[1]
    pad_width = labels.shape[2]

    for i in range(len(predicts)):
        vis = np.zeros((pad_height, pad_width, 3))
        right = predicts[i] * np.squeeze(labels[i]) * np.squeeze(masks[i])
        error = predicts[i] - right
        missing = np.squeeze(labels[i]) - right
        vis[:, :, 0] = error
        vis[:, :, 1] = right
        vis[:, :, 2] = missing

        vis = np.concatenate((images[i], vis[:height, :width] * 255), axis=0)

        visualize(vis, './logs/' + name + '/predicts/' + str(i + 1) + '.png')
Пример #3
0
    def __call__(self):
        images = load_hdf5(self.images_path)
        labels = load_hdf5(self.labels_path)
        masks = load_hdf5(self.mask_path)

        images = pre_processing(images)
        if np.max(labels) > 1:
            labels = labels / 255.
        masks = masks / 255.


        if self.train_test == 'train':
            return self.extract_ordered(images, labels)

        if self.train_test == 'test':
            sub_images, sub_labels = self.extract_ordered(images, labels)
            return (sub_images, images, labels, masks)
Пример #4
0
    def __call__(self):
        images = load_hdf5(self.images_path)
        labels = load_hdf5(self.labels_path)
        masks = load_hdf5(self.mask_path)

        assert(images.shape[1]==self.height and images.shape[2]==self.width)
        assert(labels.shape[1]==self.height and labels.shape[2]==self.width)
        assert(masks.shape[1]==self.height and masks.shape[2]==self.width)

        images = pre_processing(images)
        if np.max(labels) > 1:
            labels = labels / 255.
        masks = masks / 255.

        images, labels, masks = self.padding(images, labels, masks)

        print('images:', images.shape, images.dtype, np.min(images), np.max(images))
        print('labels:', labels.shape, labels.dtype, np.min(labels), np.max(labels))
        print('masks:', masks.shape, masks.dtype, np.min(masks), np.max(masks))

        return images, labels, masks
Пример #5
0
    def __call__(self):
        images = load_hdf5(self.images_path)
        labels = load_hdf5(self.labels_path)
        masks = load_hdf5(self.mask_path)

        images = pre_processing(images)
        if np.max(labels) > 1:
            labels = labels / 255.
        masks = masks / 255.

        #visualize(group_images(images, 4)).show()
        #visualize(group_images(labels, 4)).show()
        #visualize(group_images(masks, 4)).show()

        #print(images.shape, images.dtype, np.min(images), np.max(images))
        #print(labels.shape, labels.dtype, np.min(labels), np.max(labels))
        #print(masks.shape, masks.dtype, np.min(masks), np.max(masks))

        if self.train_test == 'train':
            return self.extract_ordered(images, labels)

        if self.train_test == 'test':
            sub_images, sub_labels = self.extract_ordered(images, labels)
            return (sub_images, images, labels, masks)
Пример #6
0
    images_std = np.std(images)
    images_mean = np.mean(images)
    images_normalized = (images - images_mean) / images_std
    for i in range(images.shape[0]):
        minv = np.min(images_normalized[i])
        images_normalized[i] = ((images_normalized[i] - minv) /
                                (np.max(images_normalized[i]) - minv)) * 255

    return images_normalized


def adjust_gamma(images, gamma=1.0):
    assert (len(images.shape) == 4)
    assert (images.shape[3] == 1)

    invGamma = 1.0 / gamma
    table = np.array([((i / 255.0)**invGamma) * 255
                      for i in np.arange(0, 256)]).astype("uint8")
    new_images = np.empty(images.shape)
    for i in range(images.shape[0]):
        new_images[i, :, :,
                   0] = cv2.LUT(np.array(images[i, :, :, 0], dtype=np.uint8),
                                table)

    return new_images


if __name__ == '__main__':
    from utils import load_hdf5
    images = load_hdf5('./datasets/CHASEDB/h5py/train_images.hdf5')
    images = pre_processing(images)
Пример #7
0
from dataset import Dataset
import os
from PIL import Image
import numpy as np
from utils import load_hdf5
'''
pth = '/home/tianshu/unet/data/comb/label/'
#imgs = sorted(os.listdir(pth))
img_pth = pth+'21_manual1.gif'
label = Image.open(img_pth).convert('L')
label = np.array(label)[9:574, :]
print(label.shape)
label = label / 255
classes = np.unique(label)
print(classes)
'''

msks = load_hdf5('./hdf5/DRIVE_dataset_imgs_train.hdf5')
print(np.max(msks), np.min(msks))







Пример #8
0
def load_dataset(sketch_data_dir,
                 photo_data_dir,
                 model_params,
                 inference_mode=False):
    """Loads the .npz file, and splits the set into train/test."""

    # normalizes the x and y columns using the training set.
    # applies same scaling factor to test set.

    if isinstance(model_params.data_set, list):
        datasets = model_params.data_set
    else:
        datasets = [model_params.data_set]

    train_strokes = None
    test_strokes = None
    train_image_paths = []
    test_image_paths = []

    for dataset in datasets:
        if model_params.data_type == 'QMUL':
            train_data_filepath = os.path.join(sketch_data_dir, dataset,
                                               'train_svg_sim_spa_png.h5')
            test_data_filepath = os.path.join(sketch_data_dir, dataset,
                                              'test_svg_sim_spa_png.h5')

            train_data_dict = utils.load_hdf5(train_data_filepath)
            test_data_dict = utils.load_hdf5(test_data_filepath)

            train_sketch_data = utils.reassemble_data(
                train_data_dict['image_data'], train_data_dict['data_offset']
            )  # list of [N_sketches], each [N_points, 4]
            train_photo_names = train_data_dict[
                'image_base_name']  # [N_sketches, 1], byte
            train_photo_paths = [
                os.path.join(photo_data_dir,
                             train_photo_names[i, 0].decode() + '.png')
                for i in range(train_photo_names.shape[0])
            ]  # [N_sketches], str
            test_sketch_data = utils.reassemble_data(
                test_data_dict['image_data'], test_data_dict['data_offset']
            )  # list of [N_sketches], each [N_points, 4]
            test_photo_names = test_data_dict[
                'image_base_name']  # [N_sketches, 1], byte
            test_photo_paths = [
                os.path.join(photo_data_dir,
                             test_photo_names[i, 0].decode() + '.png')
                for i in range(test_photo_names.shape[0])
            ]  # [N_sketches], str

            # transfer stroke-4 to stroke-3
            train_sketch_data = utils.to_normal_strokes_4to3(train_sketch_data)
            test_sketch_data = utils.to_normal_strokes_4to3(
                test_sketch_data)  # [N_sketches,], each with [N_points, 3]

            if train_strokes is None:
                train_strokes = train_sketch_data
                test_strokes = test_sketch_data
            else:
                train_strokes = np.concatenate(
                    (train_strokes, train_sketch_data))
                test_strokes = np.concatenate((test_strokes, test_sketch_data))

        elif model_params.data_type == 'QuickDraw':
            data_filepath = os.path.join(sketch_data_dir, dataset, 'npz',
                                         'sketchrnn_' + dataset + '.npz')
            if six.PY3:
                data = np.load(data_filepath, encoding='latin1')
            else:
                data = np.load(data_filepath)

            if train_strokes is None:
                train_strokes = data[
                    'train']  # [N_sketches,], each with [N_points, 3]
                test_strokes = data['test']
            else:
                train_strokes = np.concatenate((train_strokes, data['train']))
                test_strokes = np.concatenate((test_strokes, data['test']))

            train_photo_paths = [
                os.path.join(
                    sketch_data_dir, dataset, 'png', 'train',
                    str(model_params.image_size) + 'x' +
                    str(model_params.image_size),
                    str(im_idx) + '.png')
                for im_idx in range(len(data['train']))
            ]
            test_photo_paths = [
                os.path.join(
                    sketch_data_dir, dataset, 'png', 'test',
                    str(model_params.image_size) + 'x' +
                    str(model_params.image_size),
                    str(im_idx) + '.png')
                for im_idx in range(len(data['test']))
            ]
        else:
            raise Exception('Unknown data type:', model_params.data_type)

        print('Loaded {}/{} from {} {}'.format(len(train_photo_paths),
                                               len(test_photo_paths),
                                               model_params.data_type,
                                               dataset))
        train_image_paths += train_photo_paths
        test_image_paths += test_photo_paths

    all_strokes = np.concatenate((train_strokes, test_strokes))
    num_points = 0
    for stroke in all_strokes:
        num_points += len(stroke)
    avg_len = num_points / len(all_strokes)
    print('Dataset combined: {} ({}/{}), avg len {}'.format(
        len(all_strokes), len(train_strokes), len(test_strokes), int(avg_len)))
    assert len(train_image_paths) == len(train_strokes)
    assert len(test_image_paths) == len(test_strokes)

    # calculate the max strokes we need.
    max_seq_len = utils.get_max_len(all_strokes)

    # overwrite the hps with this calculation.
    model_params.max_seq_len = max_seq_len
    print('model_params.max_seq_len %i.' % model_params.max_seq_len)

    eval_model_params = sketch_p2s_model.copy_hparams(model_params)
    eval_model_params.use_input_dropout = 0
    eval_model_params.use_recurrent_dropout = 0
    eval_model_params.use_output_dropout = 0
    eval_model_params.is_training = 1

    if inference_mode:
        eval_model_params.batch_size = 1
        eval_model_params.is_training = 0

    sample_model_params = sketch_p2s_model.copy_hparams(eval_model_params)
    sample_model_params.batch_size = 1  # only sample one at a time
    sample_model_params.max_seq_len = 1  # sample one point at a time

    train_set = utils.DataLoader(
        train_strokes,
        train_image_paths,
        model_params.image_size,
        model_params.image_size,
        model_params.batch_size,
        max_seq_length=model_params.max_seq_len,
        random_scale_factor=model_params.random_scale_factor,
        augment_stroke_prob=model_params.augment_stroke_prob)

    normalizing_scale_factor = train_set.calculate_normalizing_scale_factor()
    train_set.normalize(normalizing_scale_factor)

    # valid_set = utils.DataLoader(
    #     valid_strokes,
    #     eval_model_params.batch_size,
    #     max_seq_length=eval_model_params.max_seq_len,
    #     random_scale_factor=0.0,
    #     augment_stroke_prob=0.0)
    # valid_set.normalize(normalizing_scale_factor)

    test_set = utils.DataLoader(test_strokes,
                                test_image_paths,
                                model_params.image_size,
                                model_params.image_size,
                                eval_model_params.batch_size,
                                max_seq_length=eval_model_params.max_seq_len,
                                random_scale_factor=0.0,
                                augment_stroke_prob=0.0)
    test_set.normalize(normalizing_scale_factor)

    print('normalizing_scale_factor %4.4f.' % normalizing_scale_factor)

    result = [
        train_set, None, test_set, model_params, eval_model_params,
        sample_model_params
    ]
    return result