Exemplo n.º 1
0
 def test_yuv_roundtrip(self):
     img_rgb = img_as_float(self.img_rgb)[::16, ::16]
     assert_array_almost_equal(yuv2rgb(rgb2yuv(img_rgb)), img_rgb)
     assert_array_almost_equal(yiq2rgb(rgb2yiq(img_rgb)), img_rgb)
     assert_array_almost_equal(ypbpr2rgb(rgb2ypbpr(img_rgb)), img_rgb)
     assert_array_almost_equal(ycbcr2rgb(rgb2ycbcr(img_rgb)), img_rgb)
     assert_array_almost_equal(ydbdr2rgb(rgb2ydbdr(img_rgb)), img_rgb)
Exemplo n.º 2
0
 def test_yuv_roundtrip(self):
     img_rgb = img_as_float(self.img_rgb)[::16, ::16]
     assert_array_almost_equal(yuv2rgb(rgb2yuv(img_rgb)), img_rgb)
     assert_array_almost_equal(yiq2rgb(rgb2yiq(img_rgb)), img_rgb)
     assert_array_almost_equal(ypbpr2rgb(rgb2ypbpr(img_rgb)), img_rgb)
     assert_array_almost_equal(ycbcr2rgb(rgb2ycbcr(img_rgb)), img_rgb)
     assert_array_almost_equal(ydbdr2rgb(rgb2ydbdr(img_rgb)), img_rgb)
Exemplo n.º 3
0
def match_luminance(content, style):
    content = content / 255
    style = style / 255
    content = color.rgb2yiq(content)
    style = color.rgb2yiq(style)
    mean_c = np.mean(content)
    mean_s = np.mean(style)
    stddev_c = np.std(content)
    stddev_s = np.std(style)
    style = (stddev_c / stddev_s) * (style - mean_s) + mean_c
    style = np.clip(color.yiq2rgb(style), 0, 1) * 255
    return style
Exemplo n.º 4
0
def _equalize_luminance(im, new_mean):
    im_arr = np.asarray(im)
    ret_im = np.zeros_like(im_arr, dtype=np.uint8)  # asarray is const
    ret_im[:, :, 3] = im_arr[:, :, 3]
    non_transparent_indices = (im_arr[:, :, 3] != 0)
    rgb = im_arr[:, :, 0:3] / 255  # turn to 0-1 image instead of 0-255
    yiq = rgb2yiq(rgb)
    cur_mean = np.mean(yiq[non_transparent_indices,
                           0])  # disregard pixels with alpha=0
    yiq[non_transparent_indices, 0] -= (cur_mean - new_mean)
    rgb = np.round(yiq2rgb(yiq) *
                   255)  # convert back to RGB nad non-normalized image
    rgb /= np.max(rgb)  # normalize
    rgb = np.round(rgb * 255)  # back to 0-255
    ret_im[:, :, 0:3] = rgb
    ret_im = Image.fromarray(ret_im)
    return ret_im
Exemplo n.º 5
0
 def test_yuv_roundtrip(self, channel_axis):
     img_rgb = img_as_float(self.img_rgb)[::16, ::16]
     img_rgb = np.moveaxis(img_rgb, source=-1, destination=channel_axis)
     assert_array_almost_equal(
         yuv2rgb(rgb2yuv(img_rgb, channel_axis=channel_axis),
                 channel_axis=channel_axis), img_rgb)
     assert_array_almost_equal(
         yiq2rgb(rgb2yiq(img_rgb, channel_axis=channel_axis),
                 channel_axis=channel_axis), img_rgb)
     assert_array_almost_equal(
         ypbpr2rgb(rgb2ypbpr(img_rgb, channel_axis=channel_axis),
                   channel_axis=channel_axis), img_rgb)
     assert_array_almost_equal(
         ycbcr2rgb(rgb2ycbcr(img_rgb, channel_axis=channel_axis),
                   channel_axis=channel_axis), img_rgb)
     assert_array_almost_equal(
         ydbdr2rgb(rgb2ydbdr(img_rgb, channel_axis=channel_axis),
                   channel_axis=channel_axis), img_rgb)
Exemplo n.º 6
0
def get_colorized(in_bw, in_marked, out_name):
    start = time.time()
    print(f"Processing {in_bw} + {in_marked} -> {out_name}")

    marks, im = preprocess(in_bw, in_marked)
    result = colorize(marks, im)

    # convert to scaled RGB
    result = yiq2rgb(result)
    result = np.clip(result, 0, 1)

    # write to outfile
    plt.imsave(out_name, result)

    # optionally display colorized result
    if DISPLAY:
        plt.imshow(result)
        plt.show()

    print("runtime in sec: ", (time.time() - start))
def histogram_equalize(im_orig):
    """
    performs histogram equalization to the image
    :param im_orig: an ndimage array
    :return: array of equalized image, the original histogram and the cumulative histogram
    """
    if len(im_orig.shape) > 2:
        # rgb
        YIQim = rgb2yiq(im_orig) * 255
        hist_orig, bin_edges = np.histogram(YIQim[:, :, 0], 256)
        rows, columns, dim = im_orig.shape
        cum_hist = np.cumsum(hist_orig)
        cum_hist = cum_hist.astype(np.float64)
    else:
        # grayscale
        im_orig *= 255
        hist_orig, bin_edges = np.histogram(im_orig, 256)
        cum_hist = np.cumsum(hist_orig)
        cum_hist = cum_hist.astype(np.float64)
        rows, columns = im_orig.shape

    tot_pixels = rows * columns
    cum_hist = (cum_hist / tot_pixels)
    minimum = min(np.nonzero(cum_hist)[0])
    maximum = np.nonzero(cum_hist)[0][-1]
    minVal = cum_hist[minimum]
    maxVal = cum_hist[maximum]
    cum_hist = (255 * ((cum_hist - minVal) / (maxVal - minVal)))
    cum_hist = np.around(cum_hist)
    if len(im_orig.shape) > 2:
        im_eq = np.copy(YIQim)
        y_values = cum_hist[YIQim[:, :, 0].astype(np.int8)]
        im_eq[:, :, 0] = y_values
        im_eq = yiq2rgb(im_eq / 255)
    else:
        im_eq = cum_hist[im_orig.astype(np.int8)]
    cum_hist /= 255
    cum_hist = np.clip(cum_hist, 0, 1)
    return [im_eq, hist_orig, cum_hist]
Exemplo n.º 8
0
def yiq_blend(im1, im2, mask, max_levels, filter_size_im, filter_size_mask):
    """
    :param im1: input rgb image to be blended
    :param im2: input rgb image to be blended
    :param mask: boolean mask containing True and False representing which parts of im1 and im2 should
                appear in the resulting im_blend
    :param max_levels: max_levels parameter you should use when generating the Gaussian and Laplacian pyramids.
    :param filter_size_im: is the size of the Gaussian filter (an odd scalar that represents a squared filter)
                    which defining the filter used in the construction of the Laplacian pyramids of im1 and im2
    :param filter_size_mask: size of the Gaussian filter(an odd scalar that represents a squared filter) which
                defining the filter used in the construction of the Gaussian pyramid of mask.
    :return: the blended image
    """
    y_im1 = rgb2yiq(im1)
    y_im2 = rgb2yiq(im2)
    channel = 0
    y_im2[:, :, channel] = pyramid_blending(y_im1[:, :, channel],
                                            y_im2[:, :,
                                                  channel], mask, max_levels,
                                            filter_size_im, filter_size_mask)

    blended_i = yiq2rgb(y_im2)
    return blended_i
def vidmag_fn(input_fullname, parameters):

    alpha = parameters['alpha']
    lambda_c = parameters['lambda_c']
    fl = parameters['fl']
    fh = parameters['fh']
    samplingRate = parameters['samplingRate']
    chromAttenuation = parameters['chromAttenuation']
    nlevels = parameters['nlevels']

    # Butterworth coefficients
    [low_a, low_b] = signal.butter(1, fl / samplingRate, 'low')
    [high_a, high_b] = signal.butter(1, fh / samplingRate, 'low')

    # output fullname format
    input_filename = os.path.splitext(os.path.basename(input_fullname))[0]
    output_fullname = c.PROCESSED_VIDEO_DIR+input_filename+'-butter-from-'+str(fl)+'-to-'+str(fh)+'Hz'+\
              '-alpha-'+str(alpha)+'-lambda_c-'+str(lambda_c)+\
              '-chromAtn-'+str(chromAttenuation)+'.mp4'

    input_video = cv2.VideoCapture(input_fullname)
    vidHeight = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    vidWidth = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH))
    fr = int(input_video.get(cv2.CAP_PROP_FPS))
    length = int(input_video.get(cv2.CAP_PROP_FRAME_COUNT))

    fourcc = cv2.VideoWriter_fourcc(*'MP4V')
    output_video = cv2.VideoWriter(output_fullname, fourcc, fr,
                                   (vidWidth, vidHeight), 1)

    # First frame
    temp_cdata = input_video.read()
    rgbframe = temp_cdata[1].astype('float') / 255.0

    # get desired sizes used in all the Laplacian pyramid levels
    dsizes = np.zeros((nlevels + 1, 2))
    for k in range(0, nlevels + 1):
        dsizes[k, :] = [
            np.floor(rgbframe.shape[0] / (2**k)),
            np.floor(rgbframe.shape[1] / (2**k))
        ]
    desired_sizes = tuple(map(tuple, dsizes))
    print(desired_sizes)

    # first frame processing (initial conditions)
    frame = color.rgb2yiq(rgbframe)
    lpyr = buildlpyr(frame, nlevels,
                     desired_sizes)  # creates Laplacian pyramid
    lowpass1 = lpyr
    lowpass2 = lpyr
    pyr_prev = lpyr

    output_frame = color.yiq2rgb(frame)  # yiq color space
    output_frame = output_frame * 255
    output_video.write(output_frame.astype('uint8'))

    # processing remaining frames
    counter = 1
    while input_video.isOpened():
        print("Processing: %.1f%%" % (100 * counter / length))

        temp_cdata = input_video.read()
        if not (temp_cdata[0]):
            break

        #from rgb to yiq
        rgbframe = temp_cdata[1].astype('float') / 255
        frame = color.rgb2yiq(rgbframe)

        # Laplacian pyramid (expansion)
        lpyr = buildlpyr(frame, nlevels, desired_sizes)

        # Temporal filter
        lowpass1 = (-high_b[1] * lowpass1 + high_a[0] * lpyr +
                    high_a[1] * pyr_prev) / high_b[0]
        lowpass2 = (-low_b[1] * lowpass2 + low_a[0] * lpyr +
                    low_a[1] * pyr_prev) / low_b[0]
        filtered = lowpass1 - lowpass2
        pyr_prev = lpyr

        # Amplification
        delta = lambda_c / 8 / (1 + alpha)
        exaggeration_factor = 2
        lambda_ = (vidHeight**2 + vidWidth**2)**0.5 / 3
        filtered[0] = np.zeros_like(filtered[0])
        filtered[-1] = np.zeros_like(filtered[-1])

        for i in range(nlevels - 1, 1, -1):
            # equation 14 (paper vidmag, see references)
            currAlpha = lambda_ / delta / 8 - 1
            currAlpha = currAlpha * exaggeration_factor
            # from figure 6 (paper vidmag, see references)
            if currAlpha > alpha:
                filtered[i] = alpha * filtered[i]
            else:
                filtered[i] = currAlpha * filtered[i]
            lambda_ = lambda_ / 2

        # Laplacian pyramid (contraction)
        for i in range(nlevels - 1):
            aux = cv2.pyrUp(filtered[i],
                            dstsize=(int(desired_sizes[nlevels - 2 - i][1]),
                                     int(desired_sizes[nlevels - 2 - i][0])))
            pyr_contraida = cv2.add(aux, filtered[i + 1])

        # Components chrome attenuation
        pyr_contraida[:, :, 1] = pyr_contraida[:, :, 1] * chromAttenuation
        pyr_contraida[:, :, 2] = pyr_contraida[:, :, 2] * chromAttenuation

        # adding contracted pyramid to current frame
        output_frame = pyr_contraida + frame

        # recovering rgb frame
        output_frame = color.yiq2rgb(output_frame)
        output_frame = np.clip(output_frame, a_min=0, a_max=1)
        output_frame = output_frame * 255

        # saving processed frame to output video
        output_video.write(output_frame.astype('uint8'))
        counter = counter + 1

        ### end of WHILE ####

    # release video
    output_video.release()
    input_video.release()

    return output_fullname
Exemplo n.º 10
0
            for j_b in range(B_prime_pyramid[l].shape[1]):
                i_a, j_a = best_match(A_pyramid, A_prime_pyramid, B_pyramid,
                                      B_prime_pyramid, s, l, i_b, j_b)
                B_prime_pyramid[l][i_b][j_b] = A_prime_pyramid[l][i_a][j_a]
                s[(i_b, j_b)] = (i_a, j_a)
    return B_prime_pyramid[0]


if __name__ == '__main__':
    A = plt.imread(INPUT + A_NAME)
    A_prime = plt.imread(INPUT + A_PRIME_NAME)
    B = plt.imread(INPUT + B_NAME)
    if USE_LUMINANCE:
        A, A_prime, B = rgb2yiq(A), rgb2yiq(A_prime), rgb2yiq(B)
        transform_func, inverse_transform_func = compute_luminance_transforms(
            A, B)
        A[:, :, 0] = transform_func(A[:, :, 0])
        A_prime[:, :, 0] = transform_func(A_prime[:, :, 0])
        B[:, :, 0] = transform_func(B[:, :, 0])

    B_prime = create_image_analogy(A, A_prime, B)
    if USE_LUMINANCE:
        B_prime[:, :, 0] = inverse_transform_func(B_prime[:, :, 0])
        B_prime = yiq2rgb(B_prime)

    try:
        os.makedirs(OUTPUT)
    except:
        pass
    plt.imsave(OUTPUT + B_PRIME_NAME, B_prime / 255.)
Exemplo n.º 11
0
plt.subplot(221)
plt.imshow(imgRGB)
plt.title("Original Image")

imgRGB[:, :, 0] = cv2.equalizeHist(imgRGB[:, :, 0])
imgRGB[:, :, 1] = cv2.equalizeHist(imgRGB[:, :, 1])
imgRGB[:, :, 2] = cv2.equalizeHist(imgRGB[:, :, 2])
plt.subplot(222)
plt.imshow(imgRGB)
plt.title("RGB")

# imgHSV[:, :, 2] = cv2.equalizeHist(imgHSV[:, :, 2])
plt.subplot(223)
plt.imshow(scl.hsv2rgb(imgHSV))
plt.title("HSV")

# imgYIQ[:, :, 0] = cv2.normalize(cv2.equalizeHist(cv2.normalize(imgYIQ[:, :, 0], None, 0, 255, cv2.NORM_MINMAX)), None, 0.0, 1.0, cv2.NORM_MINMAX)
# imgYIQ[:, :, 0] = yiq1
plt.subplot(224)
plt.imshow(scl.yiq2rgb(imgYIQ))
print(imgYIQ[:, :, 0].max)
plt.title("YIQ")

plt.subplots_adjust(left=0.1,
                    bottom=0.1,
                    right=0.9,
                    top=0.9,
                    wspace=0.6,
                    hspace=0.6)
plt.show()
def quantize(im_orig, n_quant, n_iter):
    """
    a function that performs optimal quantization of a given grayscale or RGB image.

    :param im_orig: is the input grayscale or RGB image to be quantized (float64 image with values in [0, 1]).
    :param n_quant: is the number of intensities your output im_quant image should have.
    :param n_iter: is the maximum number of iterations of the optimization procedure
    :return: im_quant - is the quantized output image.
            error - is an array with shape (n_iter,) (or less) of the total intensities error for each iteration of the
            quantization procedure
    """
    error = []
    q = np.array([0] * n_quant, dtype=np.float64)
    z = [0] * (n_quant + 1)
    if len(im_orig.shape) > 2:
        # rgb
        YIQim = rgb2yiq(im_orig)
        hist = np.histogram(YIQim[:, :, 0] * 255, bins=256)[0]
    else:
        # grayscale
        hist = np.histogram(im_orig * 255, bins=256)[0]
    cum_hist = np.cumsum(hist)
    for i in range(1, n_quant):
        z[i] = np.where(cum_hist > (i / n_quant) * cum_hist[255])[0][0] - 1
    z[n_quant] = 255

    q_1 = 0
    q_2 = 0
    for i in range(n_iter):
        change = False  # boolean to see if z changed

        # calculate new q
        for j in range(n_quant):
            for x in range(z[j], z[j + 1] + 1):
                q_2 += hist[x]
                q_1 += (x * hist[x])
            q[j] = int(q_1 / q_2)
            q_1 = 0
            q_2 = 0

            # calculate new z
        for j in range(1, n_quant):
            z_1 = int((q[j - 1] + q[j]) / 2)
            if z_1 != z[j]:
                z[j] = z_1
                change = True

        error.append(compute_error(n_quant, z, q, hist))
        if not change:
            lut = create_lut(z, q)
            if len(im_orig.shape) > 2:
                YIQim *= 255
                im_quant = np.copy(YIQim)
                y_values = lut[YIQim[:, :, 0].astype(np.int8)]
                im_quant[:, :, 0] = y_values
                im_quant = yiq2rgb(im_quant)
            else:
                im_orig *= 255
                im_quant = lut[im_orig.astype(np.int8)]
            return [im_quant, error]

    lut = create_lut(z, q)
    if len(im_orig.shape) > 2:
        YIQim *= 255
        im_quant = np.copy(YIQim)
        y_values = lut[YIQim[:, :, 0].astype(np.int8)]
        im_quant[:, :, 0] = y_values
        im_quant = yiq2rgb(im_quant / 255)
    else:
        im_orig *= 255
        im_quant = lut[im_orig.astype(np.int8)]
    return [im_quant, error]
Exemplo n.º 13
0
def main(content_image_path, style_image_path, iterations, content_img_height,
         style_img_height, tv_weight,
         style_weight, content_weight, save_gif, preserve_color, learning_rate,
         beta_1, beta_2, epsilon):
    """Performs neural style transfer on a content image and style image."""
    model = get_model()

    # Load images
    content_image = load_and_process_img(content_image_path, content_img_height)
    style_image = load_and_process_img(style_image_path, style_img_height)
    content_image_yiq = rgb2yiq(
        load_img(content_image_path, content_img_height))

    # Compute content and style features
    style_outputs = model(style_image)
    content_outputs = model(content_image)

    # Get the style and content feature representations from our model
    style_features = [style_layer[0] for style_layer in
                      style_outputs[:NUM_STYLE_LAYERS]]
    content_features = [content_layer[0] for content_layer in
                        content_outputs[NUM_STYLE_LAYERS:]]
    gram_style_features = [gram_matrix(style_feature) for style_feature in
                           style_features]

    # Set initial image
    init_image = load_and_process_img(content_image_path, content_img_height,
                                      as_gray=preserve_color)
    init_image = tf.Variable(init_image, dtype=tf.float32)

    # Create our optimizer
    optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=beta_1,
                                   beta_2=beta_2, epsilon=epsilon)

    # Create config dictionary
    loss_weights = (style_weight, content_weight, tv_weight)
    config = {
        'model': model,
        'loss_weights': loss_weights,
        'init_image': init_image,
        'gram_style_features': gram_style_features,
        'content_features': content_features
    }

    images = []

    # Optimization loop
    for step in range(1, iterations + 1):
        start_time = time.time()
        grads, loss = compute_grads(config)
        optimizer.apply_gradients([(grads, init_image)])
        clipped = tf.clip_by_value(init_image, MIN_VALS, MAX_VALS)
        init_image.assign(clipped)
        img = deprocess_image(init_image.numpy())
        if preserve_color:
            img = rgb2yiq(img)
            img[:, :, 1:] = content_image_yiq[:, :, 1:]
            img = yiq2rgb(img)
            img = np.clip(img, 0, 1)
            img = (img * 255).astype('uint8')
        images.append(img)
        end_time = time.time()
        print('Finished step {} ({:.03} seconds)\nLoss: {}\n'.format(
            step, end_time - start_time, loss))

    # Save final image
    imsave('stylized.jpg', images[-1])

    if save_gif:
        create_gif(images, 'transformation.gif')
Exemplo n.º 14
0
def main(args):
    content_image = Image.open(FLAGS.content)
    content_image = resize(content_image)
    style_image = Image.open(FLAGS.style).resize(content_image.size)

    content_image = np.asarray(content_image, dtype=np.float32)
    style_image = np.asarray(style_image, dtype=np.float32)

    # match luminance between style and content image
    style_image = match_luminance(content_image, style_image)

    content_image = np.expand_dims(content_image, 0)
    style_image = np.expand_dims(style_image, 0)

    img_shape = content_image.shape
    with tf.name_scope('image'):
        random_image = tf.random_normal(mean=1, stddev=.01, shape=img_shape)
        image = tf.Variable(initial_value=random_image,
                            name='image',
                            dtype=tf.float32)
        tf.summary.image('img', tf.clip_by_value(image, 0, 255), max_outputs=1)
        # subtract mean
        inputs = image - IMAGENET_MEAN
        # convert to BGR, because VGG16 was trained on BGR images
        channels = tf.unstack(inputs, axis=-1)
        inputs = tf.stack([channels[2], channels[1], channels[0]], axis=-1)
    _, endpoints = vgg_16(inputs, is_training=False, scope='vgg_16')

    saver = tf.train.Saver(var_list=tf.get_collection(
        tf.GraphKeys.GLOBAL_VARIABLES, scope='vgg_16'))

    style_tensors = [endpoints[l] for l in STYLE_LAYERS]
    content_tensors = [endpoints[l] for l in CONTENT_LAYERS]

    image_style_tensors = [endpoints[l] for l in STYLE_LAYERS]
    image_content_tensors = [endpoints[l] for l in CONTENT_LAYERS]

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        saver.restore(sess, FLAGS.ckpt_file)
        style_features = sess.run(style_tensors,
                                  feed_dict={image: style_image})
        content_features = sess.run(content_tensors,
                                    feed_dict={image: content_image})

    # define style loss
    style_losses = []
    for image_layer, style_layer in zip(image_style_tensors, style_features):
        _, height, width, channels = image_layer.get_shape().as_list()
        size = height * width * channels

        # computer gram matrices
        image_feats_reshape = tf.reshape(image_layer, [-1, channels])
        image_gram = tf.matmul(tf.transpose(image_feats_reshape),
                               image_feats_reshape) / size
        style_feats_reshape = tf.reshape(style_layer, [-1, channels])
        style_gram = tf.matmul(tf.transpose(style_feats_reshape),
                               style_feats_reshape) / size

        loss = tf.square(
            tf.norm(image_gram - style_gram, ord='fro', axis=(0, 1)))
        style_losses.append(loss)

    style_loss = STYLE_WEIGHT * tf.add_n(style_losses)

    # define content loss
    content_losses = []
    for image_layer, content_layer in zip(image_content_tensors,
                                          content_features):
        _, height, width, channels = image_layer.get_shape().as_list()
        size = height * width * channels
        loss = tf.nn.l2_loss(image_layer - content_layer) / size
        content_losses.append(loss)

    content_loss = CONTENT_WEIGHT * tf.add_n(content_losses)

    # total variation denoising loss
    tvd_loss = TVD_WEIGHT * tf.reduce_sum(tf.image.total_variation(image))

    loss = style_loss + content_loss + tvd_loss

    global_step = tf.train.get_or_create_global_step()
    optim = tf.train.AdamOptimizer(LEARNING_RATE)
    train_op = optim.minimize(loss, global_step=global_step, var_list=[image])

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        saver.restore(sess, FLAGS.ckpt_file)

        noise = tf.random_normal(mean=1, stddev=.01, shape=img_shape)
        rand_init = tf.clip_by_value(content_image * noise, 0, 255)
        image.assign(rand_init).eval()
        for step in range(FLAGS.steps):
            t0 = time.time()
            _, style_loss_val, content_loss_val, tvd_loss_val, loss_val = sess.run(
                [train_op, style_loss, content_loss, tvd_loss, loss])
            t = time.time() - t0
            if step % 10 == 0:
                format_str = 'step: {}/{} loss: style: {}, content: {}, tvd: {}, total: {} | time: {:.2f} s/step'
                print(
                    format_str.format(step, FLAGS.steps, style_loss_val,
                                      content_loss_val, tvd_loss_val, loss_val,
                                      t))

        img = sess.run(image)[0]

    # transfer luminance
    img = np.clip(img, 0, 255) / 255
    content_image = content_image[0] / 255
    result_y = np.expand_dims(color.rgb2yiq(img)[:, :, 0], 2)
    content_iq = color.rgb2yiq(content_image)[:, :, 1:]
    img = np.dstack((result_y, content_iq))
    img = np.clip(color.yiq2rgb(img), 0, 1)
    imsave(FLAGS.result_file, img)
Exemplo n.º 15
0
    Cp = np.expand_dims(C, axis=0)

    # Initiliatize Model
    params = init_param()
    vgg = VGG16(params)
    model = NSTModel(vgg)
    X = tf.Variable(np.random.random((1, 224, 224, 3)).astype('float64'),
                    name='GenImg',
                    trainable=True)
    Lt, Loss = model.train(
        X, Cp, Sp, weight_s=[3 / 64, 3 / 128, 3 / 256, 3 / 512, 3 / 512])

    I = Lt.numpy().reshape(224, 224, 3)
    I[:, :, 1] = C[:, :, 1]
    I[:, :, 2] = C[:, :, 2]
    I = yiq2rgb(I)
    C = yiq2rgb(tf.reshape(C, [224, 224, 3]).numpy())
    S = yiq2rgb(tf.reshape(S, [224, 224, 3]).numpy())

    plt.figure()
    plt.subplot(131)
    plt.imshow(C)
    plt.title('Context')
    plt.subplot(132)
    plt.imshow(S)
    plt.title('Style')
    plt.subplot(133)
    plt.imshow(I)
    plt.title('NST')
    plt.savefig('comparision.jpg')
    plt.show()