Ejemplo n.º 1
0
def deblur(image_path):
    data = {
        'A_paths': [image_path],
        'A': np.array([preprocess_image(load_image(image_path))])
    }
    x_test = data['A']
    g = generator_model()
    g.load_weights('generator.h5')
    generated_images = g.predict(x=x_test)
    generated = np.array([deprocess_image(img) for img in generated_images])
    #kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32) #锐化
    print(generated.shape)
    #ima = Image.fromarray(generated)
    #dst = cv.filter2D(ima, -1, kernel=kernel)
    #dst.save("/content/drive/My Drive/5405_digitalMedia/result/e.png")
    #image_arr = np.array(dst)

    x_test = deprocess_image(x_test)
    '''
    img = generated[0, :, :, :]
    im = Image.fromarray(img.astype(np.uint8))
    im.save("/content/drive/My Drive/5405_digitalMedia/result/f.png")
    src = cv.imread("/content/drive/My Drive/5405_digitalMedia/result/f.png")
    kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], dtype=np.float32)
    sharpen_image = cv.filter2D(src, cv.CV_32F, kernel=kernel)
    sharpen_image = cv.convertScaleAbs(sharpen_image)
    cv.imwrite("/content/drive/My Drive/5405_digitalMedia/result/g.png",sharpen_image)
    '''

    for i in range(generated_images.shape[0]):
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        output = np.concatenate((x, img), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        im.save("result.jpg")  #('deblur'+image_path)
Ejemplo n.º 2
0
def deblur_image(img_dir, batch_size):
    if os.path.exists('./result'):
        shutil.rmtree('./result')
    os.makedirs('./result')

    gen = generator_model()
    gen.load_weights('generator.h5')

    print('\tdeblur image')
    data = load_images(img_dir, batch_size)
    img_names = os.listdir(img_dir + '/A')
    print('Names', img_names)
    x_test = data['A']

    print('\tGenerator')
    generated_images = gen.predict(x=x_test, batch_size=batch_size)
    print('\tGenerated')
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)

    for i in range(generated_images.shape[0]):
        print("\tPredicting", i)
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        im = Image.fromarray(img.astype(np.uint8))
        print('saving result/{}'.format(img_names[i]))
        im.save('result/{}'.format(img_names[i]))
Ejemplo n.º 3
0
def plot_gradcam(image, vgg_cam, res_cam, dense_cam):
    image = deprocess_image(image)
    name_dict = {
        'Original Image': image,
        'GradCAM (VGG-16)': apply_mask(image, vgg_cam),
        'GradCAM (ResNet-18)': apply_mask(image, res_cam),
        'GradCAM (DenseNet-121)': apply_mask(image, dense_cam)
    }

    plt.style.use('seaborn-notebook')
    fig = plt.figure(figsize=(20, 4))
    for i, (name, img) in enumerate(name_dict.items()):
        ax = fig.add_subplot(1, 4, i + 1, xticks=[], yticks=[])
        if i:
            img = img[:, :, ::-1]
        ax.imshow(img)
        ax.set_xlabel(name, fontweight='bold')

    fig.suptitle('Localization with Gradient based Class Activation Maps',
                 fontweight='bold',
                 fontsize=16)
    plt.tight_layout()
    fig.savefig('outputs/grad_cam.png')
    plt.show()
    plt.close()
def predict(options, img_read_path, img_write_path):
    # Read image
    content = process_image(img_read_path, -1, -1, resize=False)
    ori_height = content.shape[1]
    ori_width = content.shape[2]

    # Pad image
    content = get_padding(content)
    height = content.shape[1]
    width = content.shape[2]

    # Get eval model
    eval_model = get_evaluate_model(width, height)
    eval_model.load_weights(options['weights_read_path'])

    # If flag is set, print model summary and generate model description
    if options["plot_model"]:
        eval_model.summary()
        plot_model(eval_model, to_file='model.png')

    # Generate output and save image
    res = eval_model.predict([content])
    output = deprocess_image(res[0], width, height)
    output = remove_padding(output, ori_height, ori_width)
    imwrite(img_write_path, output)
Ejemplo n.º 5
0
def guided_backprop(input_data, model, index, layer_name, n_classes):
    # Define the loss function
    @tf.custom_gradient
    def guidedRelu(x):
        def grad(dy):
            return tf.cast(dy > 0, "float32") * tf.cast(x > 0, "float32") * dy

        return tf.nn.relu(x), grad

    def category_loss(x):
        return categorical_crossentropy(tf.one_hot([index], n_classes), x)

    # Update with loss output
    loss_layer = Lambda(category_loss)(model.output)
    guidedModel = Model(inputs=model.input, outputs=loss_layer)

    # Replace relu activations with our custom activation function
    for layer in guidedModel.layers:
        if (hasattr(layer, "activation")):
            if layer.activation == relu:
                layer.activation = guidedRelu

    # Compute the gradient.
    with tf.GradientTape() as tape:
        indata = tf.cast(input_data, tf.float32)
        tape.watch(indata)
        loss = guidedModel(indata)

    gradients = tape.gradient(loss, indata)[0]

    # Don't know why this, but it seems to work :)
    gradients = np.flip(deprocess_image(np.array(gradients)), -1)

    return gradients
Ejemplo n.º 6
0
    def style(self):
        """
		Run L-BFGS over the pixels of the generated image so as to
		minimize the neural style loss.
		"""
        print('\nDone initializing... Ready to style!')

        # initialize white noise image
        if K.image_dim_ordering() == 'th':
            x = np.random.uniform(
                0, 255, (1, 3, self.img_nrows, self.img_ncols)) - 128.
        else:
            x = np.random.uniform(
                0, 255, (1, self.img_nrows, self.img_ncols, 3)) - 128.

        for i in range(self.iterations):
            print('\n\tIteration: {}'.format(i + 1))

            toc = time.time()
            x, min_val, info = fmin_l_bfgs_b(self.loss,
                                             x.flatten(),
                                             fprime=self.grads,
                                             maxfun=20)

            # save current generated image
            img = deprocess_image(x.copy(), self.img_nrows, self.img_ncols)
            fname = self.output_img_path + '_at_iteration_%d.png' % (i + 1)
            imsave(fname, img)

            tic = time.time()

            print('\t\tImage saved as', fname)
            print('\t\tLoss: {:.2e}, Time: {} seconds'.format(
                float(min_val), float(tic - toc)))
def dream_on(original_img, feature_extractor, output_dir, iterations=1000, save_every=10, downscale_factor=2):

    #processed_img = preprocess_image(original_img)
    processed_img = original_img
    processed_img = tf.image.resize(processed_img, 
            (int(processed_img.shape[1]/downscale_factor), int(processed_img.shape[2]/downscale_factor))
        )
    img =  processed_img

    x_size, y_size = int(processed_img.shape[1]), int(processed_img.shape[2])
    print(f"x_size: {x_size}, y_size:{y_size}")

    for i in range(iterations):
        

        files = os.listdir(f"{output_dir}")
        files = sorted(files, key=lambda x: int(x.split("_")[3].split(".")[0]))
        print(f"recent saves: {files[-2:]}")
    
        if os.path.isfile(f"{output_dir}/dream_{img.shape[1]}_{img.shape[2]}_{i}" + ".jpg"):
            print(f"{output_dir}/dream_{img.shape[1]}_{img.shape[2]}_{i}" + ".jpg Exist")

        elif len(os.listdir(f"{output_dir}"))==0:
            img = processed_img
            #img = tf.keras.preprocessing.image.img_to_array(img)
            tf.keras.preprocessing.image.save_img(f"{output_dir}/dream_{img.shape[1]}_{img.shape[2]}_{i}" + ".jpg", deprocess_image(img.numpy()))
        else:
            lastfile = files[-1]
        
            img = tf.keras.preprocessing.image.load_img(f"{output_dir}/{lastfile}")
            img = tf.keras.preprocessing.image.img_to_array(img)
            
            x_trim = 2
            y_trim = 2

            print(img.shape)
            #img = img[0:x_size-x_trim, 0:y_size-y_trim]
            img = tf.image.central_crop(img, central_fraction=0.99)
            img = tf.image.resize(img, (x_size, y_size))
            print(img.shape)

            #kernel = np.ones((5,5),np.float32)/25
            #img = cv2.filter2D(np.array(img),-1,kernel)
            #img = cv2.GaussianBlur(np.array(img), (9, 9), 0)
            #img = cv2.resize(img, (y_size, x_size))

            print(img.shape)
            img = tf.expand_dims(img, axis=0)
            img = inception_v3.preprocess_input(img)
            print(i%save_every)

            img = gradient_ascent_loop(img, feature_extractor, optim_steps, step_size, max_loss=None)

            if save_every>0 and i%save_every==0:
                deproc_img = deprocess_image(img.numpy())

                deproc_img = cv2.GaussianBlur(deproc_img, (3, 3), 0)

                tf.keras.preprocessing.image.save_img(f"{output_dir}/dream_{img.shape[1]}_{img.shape[2]}_{i}" + ".jpg", deproc_img)
                print(f"-------dream_{img.shape[1]}_{img.shape[2]}_{i}" + ".jpg-------")
Ejemplo n.º 8
0
def dream_on(original_img, feature_extractor, output_name="result.jpg"):
    #original_img = preprocess_image(base_image_path)
    original_shape = original_img.shape[1:3]

    successive_shapes = [original_shape]
    for i in range(1, num_octave):
        shape = tuple([int(dim / (octave_scale**i)) for dim in original_shape])
        print(shape)
        successive_shapes.append(shape)
    successive_shapes = successive_shapes[::-1]
    shrunk_original_img = tf.image.resize(original_img, successive_shapes[0])

    img = tf.identity(original_img)  # Make a copy
    for i, shape in enumerate(successive_shapes):
        print("Processing octave %d with shape %s" % (i, shape))
        img = tf.image.resize(img, shape)
        img = gradient_ascent_loop(img,
                                   feature_extractor=feature_extractor,
                                   iterations=iterations,
                                   learning_rate=step,
                                   max_loss=max_loss)
        upscaled_shrunk_original_img = tf.image.resize(shrunk_original_img,
                                                       shape)
        same_size_original = tf.image.resize(original_img, shape)
        lost_detail = same_size_original - upscaled_shrunk_original_img

        img += lost_detail
        shrunk_original_img = tf.image.resize(original_img, shape)

    keras.preprocessing.image.save_img(output_name,
                                       deprocess_image(img.numpy()))
Ejemplo n.º 9
0
def gen_feature_activation(filter_index):
    global grads
    global input_img
    grads = {}

    # Gradient ascent
    for i in range(40):

        net.zero_grad()

        feature = net(input_img)

        feature0 = feature[:, filter_index, :, :]
        loss = torch.mean(feature0)

        input_img.register_hook(save_grad('input_img'))
        loss.backward()

        grads_ = grads['input_img']
        # print(grads.size())
        print(loss.item())
        input_img = input_img + grads_ * step

    output_img = deprocess_image(input_img.squeeze().cpu().detach().numpy())

    return output_img
Ejemplo n.º 10
0
def deblur(image_path):
    data = {
        'A_paths': [image_path],
        'A': np.array([preprocess_image(load_image(image_path))])
    }
    x_test = data['A']
    g = generator_model()
    g.load_weights('generator.h5')
    #    g.load_weights('weights/719/generator_2_640.h5')
    generated_images = g.predict(x=x_test)
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)
    for i in range(generated_images.shape[0]):
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        im = Image.fromarray(img.astype(np.uint8))
        im.save('deblur' + image_path)
Ejemplo n.º 11
0
def test(batch_size):
    data = load_images(TEST_FOLDER, batch_size)
    y_test, x_test = data['B'], data['A']
    g = generator_model()
    g.load_weights(SAVE_MODEL_PATH)
    generated_images = g.predict(x=x_test, batch_size=batch_size)
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)
    y_test = deprocess_image(y_test)

    for i in range(generated_images.shape[0]):
        y = y_test[i, :, :, :]
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        output = np.concatenate((y, x, img), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        im.save('results{}.png'.format(i))
Ejemplo n.º 12
0
def deblur(image_path):
    data = {
        'A_paths': [image_path],
        'A': np.array([preprocess_image(load_image(image_path))])
    }
    x_test = data['A']
    g = generator_model()
    g.load_weights('generator_49_478.h5')
    generated_images = g.predict(x=x_test)
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)

    for i in range(generated_images.shape[0]):
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        output = np.concatenate((x, img), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        im.save('deblur' + image_path)
Ejemplo n.º 13
0
def deblur(image_path):
    data = {
        'A_paths': [image_path],
        'A': np.array([preprocess_image(load_image(image_path))])
    }
    x_test = data['A']
    g = generator_model()
    g.load_weights('/notebooks/deblur-gan/weights/89/generator_3_659.h5')
    generated_images = g.predict(x=x_test)
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)

    for i in range(generated_images.shape[0]):
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        output = np.concatenate((x, img), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        output_save_path = image_path.split('.')[0] + '_deblur.jpg'
        im.save(output_save_path)
Ejemplo n.º 14
0
def depth(image_path):
    data = {
        'A_paths': [path + image_path],
        'A': np.array([preprocess_image(load_image(path + image_path))])
    }
    x_test = data['A']
    g = generator_model()
    g.load_weights('generator.h5')
    generated_images = g.predict(x=rgb2gray(x_test))
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)

    for i in range(generated_images.shape[0]):
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        #img=rgb2gray(img)
        output = img
        im = Image.fromarray(output.astype(np.uint8))
        im.save('./images/out/' + image_path)
Ejemplo n.º 15
0
def deblur_image_split():
    files = os.listdir('./uploads/A')
    ext = os.path.basename(files[0]).split('.')[1]
    img = Image.open('./uploads/A/' + files[0])
    new_img = Image.new('RGB', img.size)
    box_map = crop_image(img, ext)

    img_keys = list(box_map.keys())

    if os.path.exists('./result'):
        shutil.rmtree('./result')
    os.makedirs('./result')

    gen = generator_model()
    gen.load_weights('generator.h5')

    img_dir = './split_uploads'
    batch_size = len(img_keys)

    data = load_images(img_dir, batch_size)
    x_test = data['A']

    generated_images = gen.predict(x=x_test, batch_size=batch_size)
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)

    for i in range(generated_images.shape[0]):
        print("Predicting", img_keys[i])
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]

        print(box_map[img_keys[i]])
        new_img.paste(Image.fromarray(img), box_map[img_keys[i]])

        output = np.concatenate((x, img), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        im.save('./result/img{}.{}'.format(chr(i + 65), ext))

    new_img.save('result/{}'.format(files[0]))
    print("Saving", files[0])

    print(img_keys)
Ejemplo n.º 16
0
def conv_filter(model, layer_name, img):
    """Get the filter of conv layer.

    Args:
           model: keras model.
           layer_name: name of layer in the model.
           img: processed input image.

    Returns:
           filters.
    """
    # this is the placeholder for the input images
    input_img = model.input

    # get the symbolic outputs of each "key" layer (we gave them unique names).
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])

    try:
        layer_output = layer_dict[layer_name].output
    except:
        raise Exception('Not layer named {}!'.format(layer_name))

    kept_filters = []
    for i in range(layer_output.shape[-1]):
        loss = K.mean(layer_output[:, :, :, i])

        # compute the gradient of the input picture with this loss
        grads = K.gradients(loss, input_img)[0]

        # normalization trick: we normalize the gradient
        grads = utils.normalize(grads)

        # this function returns the loss and grads given the input picture
        iterate = K.function([input_img], [loss, grads])

        # step size for gradient ascent
        step = 1.
        # run gradient ascent for 20 steps
        fimg = img.copy()

        for j in range(40):
            loss_value, grads_value = iterate([fimg])
            fimg += grads_value * step

        # decode the resulting input image
        fimg = utils.deprocess_image(fimg[0])
        kept_filters.append((fimg, loss_value))

        # sort filter result
        kept_filters.sort(key=lambda x: x[1], reverse=True)

    return np.array([f[0] for f in kept_filters])
Ejemplo n.º 17
0
def test(batch_size):
    #data = load_images('./images/test', batch_size)
    y_train = sorted(glob.glob('/home/turing/td/data/*.png'))
    x_train = sorted(glob.glob('/home/turing/td/blur/*.png'))
    y_test, x_test = load_image(y_train[:5]), load_image(x_train[:5])
    g = generator_model()
    g.load_weights('weights1/428/generator_13_261.h5')
    generated_images = g.predict(x=x_test, batch_size=batch_size)
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)
    y_test = deprocess_image(y_test)

    for i in range(generated_images.shape[0]):
        y = y_test[i, :, :, :]
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        #print img.shape
        #img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        #y = cv2.cvtColor(y,cv2.COLOR_BGR2GRAY)
        #x = cv2.cvtColor(x,cv2.COLOR_BGR2GRAY)
        output = np.concatenate((y, x, img), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        im.save('results{}.png'.format(i))
Ejemplo n.º 18
0
    def style(self):
        x = np.random.uniform(0, 255, (1, self.img_nrows, self.img_ncols, 3)) - 128.

        for i in range(self.iterations):
            print('\n\tIteration: {}'.format(i+1))

            toc = time.time()
            x, min_val, info = fmin_l_bfgs_b(self.loss, x.flatten(), fprime=self.grads, maxfun=20)

            img = deprocess_image(x.copy(), self.img_nrows, self.img_ncols)
            fname = self.output_img_path + '_at_iteration_%d.png' % (i+1)
            imsave(fname, img)

            tic = time.time()

            print('\t\tImage saved as', fname)
            print('\t\tLoss: {:.2e}, Time: {} seconds'.format(float(min_val), float(tic-toc)))
Ejemplo n.º 19
0
                                    opt.img_nrows)
style_reference_image = utils.preprocess_image(opt.style_reference_image_path,
                                               img_ncols, opt.img_nrows)
combination_image = tf.Variable(
    utils.preprocess_image(opt.base_image_path, img_ncols, opt.img_nrows))

# Entrenament
# Per cada època
for i in range(opt.n_epochs):

    loss, grads, cl, sl, tl = losses.compute_loss_and_grads(
        combination_image, base_image, style_reference_image,
        feature_extractor, opt.content_weight, opt.style_weight,
        opt.total_variation_weight, img_ncols, opt.img_nrows)

    # Apliquem els gradients al optimitzador
    optimizer.apply_gradients([(grads, combination_image)])
    # Cada epoch_save èpoques
    if i % opt.epoch_save == 0:
        # Es grava log per consola amb el nombre de època i la pèrdua total
        print(
            'Iteration %d: loss=%.2f, content_loss:%.2f, style_loss:%.2f, total_variation_loss:%.2f'
            % (i, loss, cl, sl, tl))
        # Es "deprocessa" la imatges generada
        img = utils.deprocess_image(combination_image.numpy(), img_ncols,
                                    opt.img_nrows)
        # Es nombra la imatges generada
        fname = opt.output_path + 'output_generated_at_iteration_%d.png' % i
        # Es desa la imatges generada en la ruta inidcada per paràmetre
        keras.preprocessing.image.save_img(fname, img)
        logging_channel.send(x=time.time(), y="iteration %s start" % i)

        random_jitter = (JITTER * 2) * (np.random.random(img_size) - 0.5)
        tensor += random_jitter

        tensor, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                              tensor.flatten(),
                                              fprime=evaluator.grads,
                                              maxfun=OPTIM_ITER)
        loss_channel.send(x=i, y=float(min_val))
        logging_channel.send(x=time.time(),
                             y="iteration %s error %s" % (i, min_val))

        tensor = tensor.reshape((1, ) + img_size)
        tensor -= random_jitter
        img = utils.deprocess_image(np.copy(tensor))

        description = "File:{}\nOptimization Iterations:{}\nLayer number:{}\nFilter Number:{}\n\Coefficient:{}\nJitter:{}\nIteration:{}\n".\
                                format(BASE_IMAGE_PATH.split("/")[-1],
                                       OPTIM_ITER,
                                       LAYER_NR,
                                       FILTER_NR,
                                       COEFF,
                                       JITTER,
                                       i
                                      )

        result_channel.send(x=time.time(), y=neptune_image(img, description))

        activations = get_activations(img_recognition_network, LAYER_NR,
                                      tensor)
Ejemplo n.º 21
0
    num_batches = int(np.ceil(model_args.nb_classes / float(args.batch_size)))

    for img_name in os.listdir(args.input_path):
        print('Processing %s' % img_name)
        img = preprocess_image_scale(os.path.join(args.input_path, img_name),
                                     img_size=args.img_size)
        imgs = np.repeat(img, model_args.nb_classes, axis=0)
        out_name = os.path.splitext(os.path.split(img_name)[-1])[0]

        for batch_idx in range(num_batches):
            idx = batch_idx * args.batch_size

            batch = imgs[idx:idx + args.batch_size]
            indices = batch_idx * args.batch_size + np.arange(batch.shape[0])

            if args.use_style_name:
                names = style_names[idx:idx + args.batch_size]
            else:
                names = indices
            print('  Processing styles %s' % str(names))

            out = transfer_style([batch, indices, 0.])[0]

            for name, im in zip(names, out):
                print('Saving file %s_style_%s.png' % (out_name, str(name)))
                imsave(
                    os.path.join(args.output_path,
                                 '%s_style_%s.png' % (out_name, str(name))),
                    deprocess_image(im[None, :, :, :].copy()))
    # Function that makes a step after backpropping to the image
    make_step = K.function([], outputs, updates)

    # Perform optimization steps and save the results
    start_time = time.time()

    for i in range(args.num_iterations):
        out = make_step([])
        if (i + 1) % args.print_and_save == 0:
            print('Iteration %d/%d' % (i + 1, args.num_iterations))
            N = len(content_losses)
            for j, l in enumerate(out[1:N + 1]):
                print('    Content loss %d: %g' % (j, l))
            for j, l in enumerate(out[N + 1:-1]):
                print('    Style loss %d: %g' % (j, l))

            print('    Total style loss: %g' % (sum(out[N + 1:-1])))
            print('    TV loss: %g' % (out[-1]))
            print('    Total loss: %g' % out[0])
            stop_time = time.time()
            print('Did %d iterations in %.2fs.' %
                  (args.print_and_save, stop_time - start_time))
            x = K.get_value(pastiche_image)
            for s in range(nb_styles):
                fname = output_path + '_style%d_%d%s' % (
                    s, (i + 1) / args.print_and_save, ext)
                print('Saving image to %s.\n' % fname)
                img = deprocess_image(x[s:s + 1])
                imsave(fname, img)
            start_time = time.time()
    # set target layer for CAM
    if args.model == 'vgg16' or args.model == 'densenet121':
        target_layer = model.features[-1]
    elif args.model == 'resnet18':
        target_layer = model.layer4[-1]

    # get given label's index
    label = {'covid_19': 0, 'lung_opacity': 1, 'normal': 2, 'pneumonia': 3}
    idx_to_label = {v: k for k, v in label.items()}
    if args.label is not None:
        label = label[args.label]
    else:
        label = None

    # load and preprocess image
    image = utils.load_image(args.image_path)

    warnings.filterwarnings("ignore", category=UserWarning)
    # pass image through model and get CAM for the given label
    cam = GradCAM(model=model, target_layer=target_layer)
    label, mask = cam(image, label)
    print(f'GradCAM generated for label "{idx_to_label[label]}".')

    # deprocess image and overlay CAM
    image = utils.deprocess_image(image)
    image = apply_mask(image, mask)

    # save the image
    utils.save_image(image, args.output_path)
Ejemplo n.º 24
0
                      base_image,
                      description="Image you want to style neuraly"))
style_channel.send(
    x=time.time(),
    y=neptune_image(
        style_image,
        description="Image that represents the style you want to transfer"))

x = np.random.uniform(0, 255, (1, ) + img_size) - 128.

for i in range(STYLE_ITER):
    logging_channel.send(x=time.time(), y='Iteration: %s' % i)

    evaluator = Evaluator(layer_dict, CONTENT_WEIGHT, STYLE_WEIGHT)

    x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                     x.flatten(),
                                     fprime=evaluator.grads,
                                     maxfun=OPTIM_ITER)

    logging_channel.send(x=time.time(), y='Current loss value: %s' % min_val)
    loss_channel.send(x=i, y=float(min_val))

    img = utils.deprocess_image(np.copy(x))

    combined_channel.send(x = time.time(),y = neptune_image(raw_image = img,
                                                            description = "your neuraly styled image\nIteration:{}\nContent weight:{}\nStyle weight{}".\
                                                            format(i,CONTENT_WEIGHT,STYLE_WEIGHT)
                                                           )
                         )
    row = filter_index // image_per_row
    col = filter_index - row * image_per_row
    # Gradient ascent
    for i in range(40):

        net.zero_grad()

        feature = net(input_img)

        feature0 = feature[:, filter_index, :, :]
        loss = torch.mean(feature0)

        input_img.register_hook(save_grad('input_img'))
        loss.backward()

        grads_ = grads['input_img']
        # print(grads.size())
        grads = {}
        input_img = input_img + grads_ * step

    print(filter_index, input_img.size())
    output_img = deprocess_image(input_img.squeeze().cpu().detach().numpy())

    display_grad[row * size:(row + 1) * size,
                 col * size:(col + 1) * size, :] = output_img

scale = 1. / size
plt.figure(figsize=(scale * display_grad.shape[1],
                    scale * display_grad.shape[0]))
plt.imshow(display_grad / 255.0, aspect='auto', cmap='viridis')
plt.show()
Ejemplo n.º 26
0
    def train(self,
              train_data,
              batch_size=1,
              epoch_num=10,
              critic_updates=5,
              save_freq=100,
              val_freq=200,
              show_freq=10,
              generate_image_freq=10,
              pre_trained_model=None):
        # implement training on two models
        cur_model_name = 'Deblur_{}'.format(int(time.time()))
        sharp, blur = train_data['B'], train_data['A']
        min_loss = np.inf
        save_to = 'deblur_train/' + cur_model_name
        i = 0
        train_critic = 0
        d_loss = None

        with tf.Session() as sess:
            merge_all = tf.summary.merge_all()
            merge_D = tf.summary.merge(self.d_merge)
            merge_G = tf.summary.merge(self.g_merge)
            writer = tf.summary.FileWriter("log/{}".format(cur_model_name),
                                           sess.graph)

            saver = tf.train.Saver()
            sess.run(tf.global_variables_initializer())

            if pre_trained_model is not None:
                try:
                    print("Load the model from: {}".format(pre_trained_model))
                    saver.restore(sess, 'model/{}'.format(pre_trained_model))
                    #writer = tf.summary.FileWriterCache.get('log/{}'.format(pre_trained_model))
                except Exception:
                    print("Load model Failed!")
                    pass

            for epoch in range(epoch_num):
                permutated_indexes = np.random.permutation(sharp.shape[0])
                if epoch >= self.param.g_train_num:
                    train_critic = 1

                for index in range(int(blur.shape[0] / batch_size)):
                    batch_indexes = permutated_indexes[index *
                                                       batch_size:(index + 1) *
                                                       batch_size]
                    if batch_indexes.shape == ():
                        batch_indexes = [batch_indexes]
                    sharp_batch = sharp[batch_indexes]
                    blur_batch = blur[batch_indexes]
                    if train_critic:

                        generated_images = sess.run(self.fake_B,
                                                    feed_dict={
                                                        self.real_A:
                                                        blur_batch,
                                                        self.training: True
                                                    })

                        for _ in range(critic_updates):
                            d_loss, _, d_merge_result = sess.run(
                                [self.d_loss, self.D_trainer, merge_D],
                                feed_dict={
                                    self.real_B: sharp_batch,
                                    self.d_fake_B: generated_images,
                                    self.training: True
                                })
                        writer.add_summary(d_merge_result, i)

                    g_loss, _, g_merge_result = sess.run(
                        [self.g_loss, self.G_trainer, merge_G],
                        feed_dict={
                            self.real_A: blur_batch,
                            self.real_B: sharp_batch,
                            self.training: True,
                            self.train_critic: train_critic
                        })

                    writer.add_summary(g_merge_result, i)
                    if (i + 1) % show_freq == 0:
                        print(
                            "{}/{} batch in {}/{} epochs, discriminator loss: {}, generator loss: {}"
                            .format(index + 1, int(blur.shape[0] / batch_size),
                                    epoch + 1, epoch_num, d_loss, g_loss))
                    if (i + 1) % save_freq == 0:
                        if not os.path.exists('model/'):
                            os.makedirs('model/')
                        saver.save(sess, 'model/{}'.format(cur_model_name))
                        print('{} Saved'.format(cur_model_name))

                    ##save image
                    if (i + 1) % generate_image_freq == 0:

                        if not train_critic:
                            generated_images = sess.run(self.fake_B,
                                                        feed_dict={
                                                            self.real_A:
                                                            blur_batch,
                                                            self.training: True
                                                        })

                        if not os.path.exists(save_to):
                            os.makedirs(save_to)

                        for j in range(generated_images.shape[0]):
                            y = sharp_batch[j, :, :, :]
                            x = blur_batch[j, :, :, :]
                            img = generated_images[j, :, :, :]
                            x = deprocess_image(x)
                            y = deprocess_image(y)
                            img = deprocess_image(img)
                            output = np.concatenate((y, img, x), axis=1)
                            im = Image.fromarray(output.astype(np.uint8))
                            im.save(save_to + '/' + str(i + 1) + '_' + str(j) +
                                    '.png')
                        print('image saved to {}'.format(save_to))
                    i += 1

            if not os.path.exists('model/'):
                os.makedirs('model/')
            saver.save(sess, 'model/{}'.format(cur_model_name))
            print('{} Saved'.format(cur_model_name))
Ejemplo n.º 27
0
    def generate(self,
                 test_data,
                 batch_size,
                 trained_model,
                 save_to='deblur_test/',
                 customized=False,
                 save=True):
        # generate deblured image
        if customized:
            x_test = test_data
        else:
            y_test, x_test = test_data['B'], test_data['A']
        size = x_test.shape[0]
        save_to = save_to + trained_model
        with tf.Session() as sess:
            saver = tf.train.Saver()
            sess.run(tf.global_variables_initializer())

            print("Load the model from: {}".format(trained_model))
            saver.restore(sess, 'model/{}'.format(trained_model))
            print("Model restored.")

            ##Generate deblurred images
            generated = []
            for index in range(int(size / batch_size)):
                _input = x_test[index * batch_size:(index + 1) * batch_size]
                generated_test = sess.run(self.fake_B,
                                          feed_dict={
                                              self.real_A: _input,
                                              self.training: False
                                          })
                generated = generated + [
                    deprocess_image(img) for img in generated_test
                ]
            if not (index + 1) * batch_size == size:
                _input = x_test[((index + 1) * batch_size):]
                generated_test = sess.run(self.fake_B,
                                          feed_dict={
                                              self.real_A: _input,
                                              self.training: False
                                          })
                generated = generated + [
                    deprocess_image(img) for img in generated_test
                ]
            generated = np.array(generated)
            # generated_test = sess.run(self.fake_B, feed_dict={self.real_A: x_test, self.training:False})
            # generated = np.array([deprocess_image(img) for img in generated_test])
            x_test = deprocess_image(x_test)

            if not os.path.exists(save_to):
                os.makedirs(save_to)
            ##save image
            if save:
                if customized:
                    for i in range(generated.shape[0]):
                        x = x_test[i, :, :, :]
                        img = generated[i, :, :, :]
                        output = np.concatenate((img, x), axis=1)
                        im = Image.fromarray(output.astype(np.uint8))
                        im.save(save_to + '/' + str(i) + '.png')
                else:
                    y_test = deprocess_image(y_test)
                    for i in range(generated.shape[0]):
                        y = y_test[i, :, :, :]
                        x = x_test[i, :, :, :]
                        img = generated[i, :, :, :]
                        output = np.concatenate((y, img, x), axis=1)
                        im = Image.fromarray(output.astype(np.uint8))
                        im.save(save_to + '/' + str(i) + '.png')

            ##Calculate Peak Signal Noise Ratio(PSNR)
            if not customized:
                if not save:
                    y_test = deprocess_image(y_test)
                psnr = 0
                for i in range(size):
                    y = y_test[i, :, :, :]
                    img = generated[i, :, :, :]
                    psnr = psnr + PSNR(y, img)
                    # print(PSNR(y,img))
                psnr_mean = psnr / size
                print("PSNR of testing data: " + str(psnr_mean))
        return generated
def train(options):
    width = options["train_image_width"]
    height = options["train_image_height"]

    # Get style activations
    style_tensor = process_image(options["style_image_path"], width, height)
    style_acts = list()
    for layer_name in options["style_layer"]:
        func = get_vgg_activation(layer_name, width, height)
        style_act = expand_input(options["batch_size"], func([style_tensor])[0])
        style_acts.append(style_act)

    if "style_image_path_2" in options:
        style_tensor_2 = process_image(options["style_image_path_2"], width, height)
        style_acts_2 = list()
        for layer_name in options["style_layer"]:
            func = get_vgg_activation(layer_name, width, height)
            style_act_2 = expand_input(options["batch_size"], func([style_tensor_2])[0])
            style_acts_2.append(style_act_2)

    # Get content activations for test_image
    content_test = process_image(options["test_image_path"], width, height)
    content_func = get_vgg_activation(options["content_layer"], width, height)
    content_act_test = expand_input(options["batch_size"], content_func([content_test])[0])
    content_test = expand_input(options["batch_size"], content_test)

    # Get weights
    style_w = options["style_weight"] / len(style_acts)
    content_w = options["content_weight"]
    tv_w = options["total_variation_weight"]

    # Get training model
    bi_style = False
    if "style_image_path_2" in options:
        bi_style = True
    training_model = get_training_model(width, height, bs=options['batch_size'], bi_style=bi_style)
    if bi_style:
        training_model.compile(loss={'content': dummy_loss, 'style1_out': dummy_loss, 'style2_out': dummy_loss,
                                     'style3_out': dummy_loss, 'style4_out': dummy_loss, 'tv': dummy_loss,
                                     'output': zero_loss},
                               optimizer=optimizers.Adam(lr=options["learning_rate"]),
                               loss_weights=[content_w, style_w, style_w, style_w, style_w, tv_w, 0])
    else:
        training_model.compile(loss={'content': dummy_loss, 'style1': dummy_loss, 'style2': dummy_loss,
                                     'style3': dummy_loss, 'style4': dummy_loss, 'tv': dummy_loss, 'output': zero_loss},
                               optimizer=optimizers.Adam(lr=options["learning_rate"]),
                               loss_weights=[content_w, style_w, style_w, style_w, style_w, tv_w, 0])

    # If flag is set, print model summary and generate model description
    if options["plot_model"]:
        training_model.summary()
        plot_model(training_model, to_file='model.png')

    # function for printing test information
    def print_test_results(cur_res, cur_iter, prev_loss):
        losses = list()
        losses.append(cur_res[0][0] * content_w)
        losses.append(cur_res[1][0] * style_w)
        losses.append(cur_res[2][0] * style_w)
        losses.append(cur_res[3][0] * style_w)
        losses.append(cur_res[4][0] * style_w)
        losses.append(cur_res[5][0] * tv_w)
        cur_loss = sum(losses)
        if prev_loss is None:
            prev_loss = cur_loss

        print("----------------------------------------------------")
        print("Details: iteration %d, " % cur_iter, end='')
        print('improvement: %.2f percent, ' % ((prev_loss - cur_loss) / prev_loss * 100), end='')
        print("loss: %.0f" % cur_loss)
        print("content_loss: %.0f, style_loss_1: %.0f, style_loss_2: %.0f\n"
              "style_loss_3: %.0f, style_loss_4: %.0f, tv_loss: %.0f"
              % (losses[0], losses[1], losses[2], losses[3], losses[4], losses[5]))
        print("----------------------------------------------------")

        return cur_loss

    # Prepare for training
    dg = ImageDataGenerator()
    dummy_in = expand_input(options["batch_size"], np.array([0.0]))
    interrupted = False
    c_loss = None
    t_sum = 0.0

    # Begin Training
    t_total_1 = time.time()
    for i in range(options["epochs"]):
        print("Epoch: %d" % (i+1))
        iters = 0

        for x in dg.flow_from_directory(options["train_image_path"], class_mode=None,
                                        batch_size=options["batch_size"], target_size=(height, width)):
            try:
                t1 = time.time()
                x = vgg16.preprocess_input(x)
                content_act = content_func([x])[0]
                if bi_style:
                    res = training_model.fit([x, content_act, style_acts[0], style_acts[1], style_acts[2],
                                              style_acts[3], style_acts_2[0], style_acts_2[1], style_acts_2[2],
                                              style_acts_2[3]], [dummy_in, dummy_in, dummy_in, dummy_in, dummy_in,
                                                                 dummy_in, x],
                                             epochs=1, verbose=0, batch_size=options["batch_size"])
                else:
                    res = training_model.fit([x, content_act, style_acts[0], style_acts[1], style_acts[2],
                                              style_acts[3]], [dummy_in, dummy_in, dummy_in, dummy_in, dummy_in,
                                                               dummy_in, x],
                                             epochs=1, verbose=0, batch_size=options["batch_size"])
                t2 = time.time()
                t_sum += t2 - t1

                iters += 1

                if iters % options["view_iter"] == 0:
                    loss = res.history['loss'][0]
                    est_time = int((options["steps_per_epoch"]*(options["epochs"]-i) - iters)
                                   * (t_sum/options["view_iter"]))
                    print("Iter : %d / %d, Time elapsed: %0.2f seconds, Loss: %.0f, EST: " %
                          (iters, options["steps_per_epoch"], t_sum/options["view_iter"], loss) +
                          str(datetime.timedelta(seconds=est_time)))
                    t_sum = 0.0

                if iters % options["test_iter"] == 0:
                    if bi_style:
                        res = training_model.predict([content_test, content_act_test, style_acts[0], style_acts[1],
                                                      style_acts[2], style_acts[3], style_acts_2[0], style_acts_2[1],
                                                      style_acts_2[2], style_acts_2[3]])
                    else:
                        res = training_model.predict([content_test, content_act_test, style_acts[0], style_acts[1],
                                                      style_acts[2], style_acts[3]])
                    c_loss = print_test_results(res, iters, c_loss)

                    output = deprocess_image(res[6][0], width, height)
                    imsave(options["test_res_save_path"] + '%d_%d_output.jpg' % (i, iters), output)

                if iters >= options["steps_per_epoch"]:
                    break

            except KeyboardInterrupt:
                print("Interrupted, training suspended.")
                interrupted = True
                break

        if interrupted:
            break

    t_total_2 = time.time()
    print("Training ended. Time used: " + str(datetime.timedelta(seconds=int(t_total_2-t_total_1))))

    # Saving models
    print("Saving models...")
    model_eval = get_evaluate_model(width, height)
    training_model_layers = {layer.name: layer for layer in training_model.layers}
    for layer in model_eval.layers:
        if layer.name in training_model_layers:
            print(layer.name)
            layer.set_weights(training_model_layers[layer.name].get_weights())

    model_eval.save_weights(options["weights_save_path"] + '%s_weights.h5' % options["net_name"])
def temp_view(options, img_read_path, img_write_path, iters):
    width = options["train_image_width"]
    height = options["train_image_height"]

    # Get style activations
    style_tensor = K.variable(process_image(options["style_image_path"], width, height))
    style_acts = list()
    for layer_name in options["style_layer"]:
        func = get_vgg_activation(layer_name, width, height)
        style_act = func([style_tensor])[0]
        style_acts.append(style_act)

    if "style_image_path_2" in options:
        style_tensor_2 = process_image(options["style_image_path_2"], width, height)
        style_acts_2 = list()
        for layer_name in options["style_layer"]:
            func = get_vgg_activation(layer_name, width, height)
            style_act_2 = func([style_tensor_2])[0]
            style_acts_2.append(style_act_2)

    # Get content activations
    content_tensor = K.variable(process_image(img_read_path, width, height))
    func = get_vgg_activation(options["content_layer"], width, height)
    content_act = func([content_tensor])[0]

    dummy_in = np.array([0.0])
    style_w = options["style_weight"] / len(style_acts)
    content_w = options["content_weight"]
    tv_w = options["total_variation_weight"]

    # Get training model
    bi_style = False
    if "style_image_path_2" in options:
        bi_style = True
    training_model = get_temp_view_model(width, height, bi_style=bi_style)
    if bi_style:
        training_model.compile(loss={'content': dummy_loss, 'style1_out': dummy_loss, 'style2_out': dummy_loss,
                                     'style3_out': dummy_loss, 'style4_out': dummy_loss, 'tv': dummy_loss,
                                     'output': zero_loss},
                               optimizer=optimizers.Adam(lr=1),
                               loss_weights=[content_w, style_w, style_w, style_w, style_w, tv_w, 0])
    else:
        training_model.compile(loss={'content': dummy_loss, 'style1': dummy_loss, 'style2': dummy_loss,
                                     'style3': dummy_loss, 'style4': dummy_loss, 'tv': dummy_loss, 'output': zero_loss},
                               optimizer=optimizers.Adam(lr=1),
                               loss_weights=[content_w, style_w, style_w, style_w, style_w, tv_w, 0])

    # If flag is set, print model summary and generate model description
    if options["plot_model"]:
        training_model.summary()
        plot_model(training_model, to_file='model.png')

    # Input should always be ones
    x = np.ones([1, height, width, 3], dtype='float32')

    # Begin training
    prev_loss = None
    for i in range(iters):
        t1 = time.time()

        if bi_style:
            res = training_model.fit(
                [x, content_act, style_acts[0], style_acts[1], style_acts[2], style_acts[3], style_acts_2[0],
                 style_acts_2[1], style_acts_2[2], style_acts_2[3]],
                [dummy_in, dummy_in, dummy_in, dummy_in, dummy_in, dummy_in, x],
                epochs=1, verbose=0, batch_size=1)
        else:
            res = training_model.fit([x, content_act, style_acts[0], style_acts[1], style_acts[2], style_acts[3]],
                                 [dummy_in, dummy_in, dummy_in, dummy_in, dummy_in, dummy_in, x],
                                 epochs=1, verbose=0, batch_size=1)

        t2 = time.time()

        if i % 10 == 0:
            loss = res.history['loss'][0]
            if prev_loss is None:
                prev_loss = loss
            improvement = (prev_loss - loss) / prev_loss * 100
            prev_loss = loss

            print("Iter: %d / %d, Time elapsed: %0.2f seconds, Loss: %.0f, Improvement: %0.2f percent." %
                  (i, iters, t2-t1, loss, improvement))
            if bi_style:
                print("Detail: content_loss: %0.0f, style_loss_1: %0.0f, style_loss_2: %0.0f,"
                      " style_loss_3: %0.0f, style_loss_4: %0.0f, tv_loss: %0.0f"
                      % (float(res.history['content_loss'][0]) * content_w,
                         float(res.history['style1_out_loss'][0]) * style_w,
                         float(res.history['style2_out_loss'][0]) * style_w,
                         float(res.history['style3_out_loss'][0]) * style_w,
                         float(res.history['style4_out_loss'][0]) * style_w,
                         float(res.history['tv_loss'][0]) * tv_w))
            else:
                print("Detail: content_loss: %0.0f, style_loss_1: %0.0f, style_loss_2: %0.0f,"
                      " style_loss_3: %0.0f, style_loss_4: %0.0f, tv_loss: %0.0f"
                      % (float(res.history['content_loss'][0]) * content_w,
                         float(res.history['style1_loss'][0]) * style_w,
                         float(res.history['style2_loss'][0]) * style_w,
                         float(res.history['style3_loss'][0]) * style_w,
                         float(res.history['style4_loss'][0]) * style_w,
                         float(res.history['tv_loss'][0]) * tv_w))

    if bi_style:
        res = training_model.predict(
            [x, content_act, style_acts[0], style_acts[1], style_acts[2], style_acts[3], style_acts_2[0],
             style_acts_2[1], style_acts_2[2], style_acts_2[3]])
    else:
        res = training_model.predict([x, content_act, style_acts[0], style_acts[1], style_acts[2], style_acts[3]])
    output = deprocess_image(res[6][0], width, height)
    imsave(img_write_path, output)
Ejemplo n.º 30
0
    def generate(self, input_tensor):
        saliency = self.tensor_function([input_tensor])

        return saliency[0]


if __name__ == "__main__":
    img_width = 224
    img_height = 224

    model = ResNet50(weights='imagenet')
    activation_layer = 'activation_49'

    img_path = '../images/cat.jpg'
    img = load_image(path=img_path, target_size=(img_width, img_height))

    preds = model.predict(img)
    predicted_class = preds.argmax(axis=1)[0]
    # decode the results into a list of tuples (class, description, probability)
    # (one such list for each sample in the batch)
    print("predicted top1 class:", predicted_class)
    print('Predicted:', decode_predictions(preds, top=1)[0])
    # Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]

    vis_conv = VisConvolution(model, ResNet50, activation_layer)
    gradient = vis_conv.generate(img)

    cv2.imshow('vis_conv', deprocess_image(gradient))
    cv2.waitKey()
    cv2.destroyAllWindows()