Beispiel #1
0
def get_results():
    test_lab = np.zeros((test.shape[0], 128, 128, 3))
    train_lab = np.zeros((test.shape[0], 128, 128, 3))

    for i, image in enumerate(train[:75]):
        train_lab[i] = rgb_to_lab(image)

    for i, image in enumerate(test):
        test_lab[i] = rgb_to_lab(image)

    Xtrain_lab = train_lab[:, :, :, :1]
    Ytrain_lab = train_lab[:, :, :, 1:]

    Xtest_lab = test_lab[:, :, :, :1]
    Ytest_lab = test_lab[:, :, :, 1:]

    print("Loss on test set of 75 images")
    print(model.evaluate(Xtest_lab, Ytest_lab))
    predict(Xtest_lab, 'test')
    predict(Xtrain_lab, 'train')
    def predict(self, path):
        rgb = img_to_array(load_img(path, target_size=settings.SIZE)).astype(np.uint8) # shape (224, 224, 3)
        x_lab = [rgb_to_lab(rgb)] 
        lab = np.stack(x_lab) # shape (1, 224, 224, 3)
        l = lab[:, :, :, 0:1] # shape (1, 224, 224, 1)
        preds = self.autoencoder.predict(l, verbose=0)
        preds_lab = np.concatenate((l, preds), 3).astype(np.uint8)
        preds_rgp = lab_to_rgb(preds_lab[0, :, :, :])

        gray_image = img_to_array(ImageOps.grayscale(array_to_img(rgb)))
        auto_colored_image = preds_rgp
        np_img_list = [gray_image, auto_colored_image, rgb]
        self.save(datetime.now().strftime('%Y%m%d%H%M%S'), np_img_list)
def get_results():
    Xtest_lab = np.zeros((test.shape[0], 128, 128, 1))
    Ytest_lab = np.zeros((test.shape[0], 2, 1))
    for i, image in enumerate(test):
        lab_image = rgb_to_lab(image.astype(np.uint8))
        l, a, b = lab_image[:, :, :1], lab_image[:, :, 1:2], lab_image[:, :,
                                                                       2:3]
        Xtest_lab[i] = l
        Ytest_lab[i, 0] = np.mean(a)
        Ytest_lab[i, 1] = np.mean(b)

    print("Loss on test set of 75 images",
          model.evaluate(Xtest_lab, Ytest_lab))
    predict(Xtest_lab, 'test')
    def _manipulate_tone(self):
        lab = utils.rgb_to_lab(self.img)
        L = lab[..., 0]

        L_mean = np.mean(L)

        if self.sequence is None:
            self.sequence = self.decompose(L)

        lab = tone_manipulation(lab,
                                self.sequence,
                                self.a_values,
                                exposure=self.exposure,
                                saturation=self.saturation,
                                mask=self.mask)
        return lab
    def reader():
        while True:
            for data_path in data_paths:
                raw_image = load_image(
                    file=data_path
                )  # raw_image has type: <type 'numpy.ndarray'>
                # split raw_image to input and target according to a.which_direction
                assert raw_image.shape[2] == 3

                if lab_colorization:
                    # load color and brightness from image, no B image exists here
                    lab = utils.rgb_to_lab(raw_image)
                    L_chan, a_chan, b_chan = utils.preprocess_lab(lab)
                    a_image = L_chan  # TODO:
                    b_image = np.stack([a_chan, b_chan], axis=2)
                else:
                    # break apart image pair and move to range [-1, 1]
                    width = raw_image.shape[1]  # [height, width, channels]
                    a_image = utils.preprocess(raw_image[:, :width // 2, :])
                    b_image = utils.preprocess(raw_image[:, width // 2:, :])

                if which_direction == "AtoB":
                    input, target = [a_image, b_image]
                elif which_direction == "BtoA":
                    input, target = [b_image, a_image]
                else:
                    raise Exception("invalid direction")

                input_image = simple_transform(im=input,
                                               resize_size=resize_size,
                                               crop_size=crop_size,
                                               is_train=is_train)
                target_image = simple_transform(im=target,
                                                resize_size=resize_size,
                                                crop_size=crop_size,
                                                is_train=is_train)

                # flip the images randomly
                r = random.random()
                if r > 0.5:
                    input_image = left_right_flip(input_image)
                    target_image = left_right_flip(target_image)

                yield input_image, target_image

            if not cycle:
                break
Beispiel #6
0
def augment(batches, activation_last):
    batch_num = 0
    while True:
        batch = next(batches)
        augmented_batch = np.zeros((batch.shape[0], 128, 128, 3))
        for i, image in enumerate(batch):
            scalar = np.random.uniform(0.6, 1.0)
            image = image * scalar
            cv2.imwrite(
                augmented_path + str(batch_num) + "_" + str(i) + ".jpg", image)
            augmented_batch[i] = rgb_to_lab(image.astype(np.uint8))
        batch_num += 1
        X_batch = augmented_batch[:, :, :, :1]
        if activation_last == 'relu':
            Y_batch = augmented_batch[:, :, :, 1:]
        else:
            Y_batch = 2 * augmented_batch[:, :, :, 1:] - 1  # [0,1]->[-1,1]
        yield (X_batch, Y_batch)
def augment(batches):
    batch_num = 0
    while True:
        batch = next(batches)
        X_batch = np.zeros((batch.shape[0], 128, 128, 1))
        Y_batch = np.zeros((batch.shape[0], 2, 1))
        for i, image in enumerate(batch):
            scalar = np.random.uniform(0.6, 1.0)
            image = image * scalar
            cv2.imwrite(
                augmented_path + str(batch_num) + "_" + str(i) + ".jpg", image)
            lab_image = rgb_to_lab(image.astype(np.uint8))
            l, a, b = lab_image[:, :, :1], lab_image[:, :,
                                                     1:2], lab_image[:, :, 2:3]
            X_batch[i] = l
            Y_batch[i, 0] = np.mean(a)
            Y_batch[i, 1] = np.mean(b)
        batch_num += 1
        yield (X_batch, Y_batch)
def get_gray_and_ab(image: dict) -> Tuple[dict, tf.Tensor]:
    """
    Get grayscale and ab channels from an rgb image
    Parameters
    ----------
    image: tf.Tensor
        image in the LAB color space

    Returns
    -------
    ab channels: tf.Tensor
    """
    img = image['input_1']
    gray = rgb_to_gray(img)
    lab = rgb_to_lab(img)
    ab = lab[:, :, 1:]
    image['input_1'] = gray
    image['input_2'] = gray
    return image, ab
Beispiel #9
0
    def get(self, query_string):
        """
        query_string : string
            5 colour input palette as hex format string
            no #s, 5 colours of len  6, total len 30, no delimiters
            eg 'bbafa557534d383530726b63958b81'
        """
        query_palette = [query_string[i:i + 6] for i in range(0, 30, 6)]
        rgb_palette = np.array(
            [hex_to_rgb(colour) for colour in query_palette])
        palette = rgb_to_lab(rgb_palette)

        distances = (np.linalg.norm(all_possible_palettes - palette,
                                    axis=3).sum(axis=2).min(axis=1))

        palette_distances = pd.Series(dict(zip(image_ids, distances)))
        most_similar_ids = (
            palette_distances.sort_values()[1:13].index.values.tolist())

        return jsonify({
            'request': query_palette,
            'response': ids_to_urls(most_similar_ids)
        })
batch_input_1 = []
batch_input_2 = []
batch_l_channel = []
# prepare the input
for file in image_files[4:]:
    img_contents = tf.io.read_file(file)
    rgb = tf.image.decode_jpeg(img_contents)
    # resize the image
    input_1 = tf.image.resize_with_crop_or_pad(rgb,
                                               target_width=224,
                                               target_height=224)
    input_2 = tf.image.resize_with_crop_or_pad(rgb,
                                               target_width=229,
                                               target_height=229)
    # conver input_1 to lab
    lab_img = rgb_to_lab(input_1)

    l_channel = lab_img[:, :, 0]
    batch_l_channel.append(l_channel)

    # convert to grayscale
    input_1 = tf.image.rgb_to_grayscale(input_1)
    input_2 = tf.image.rgb_to_grayscale(input_2)
    input_2 = tf.stack([input_2[:, :, 0]] * 3, axis=2)

    # Normalize the image [-1, 1]
    input_1 = normalize_rgb(input_1)
    input_2 = normalize_rgb(input_2)

    # add batch dimension
    input_1 = tf.expand_dims(input_1, axis=0)
Beispiel #11
0
 def get_lab_from_data_list(self, data_list):
     x_lab = []
     for f in data_list:
         rgb = img_to_array(load_img(f, target_size=settings.SIZE)).astype(np.uint8)
         x_lab.append(rgb_to_lab(rgb))
     return np.stack(x_lab)