コード例 #1
0
    def _draw_filters(filters, n=None):
        """Draw the best filters in a nxn grid.

        # Arguments
            filters: A List of generated images and their corresponding losses
                     for each processed filter.
            n: dimension of the grid.
               If none, the largest possible square will be used
        """
        if n is None:
            n = int(np.floor(np.sqrt(len(filters))))

        # the filters that have the highest loss are assumed to be better-looking.
        # we will only keep the top n*n filters.
        filters.sort(key=lambda x: x[1], reverse=True)
        filters = filters[:n * n]

        # build a black picture with enough space for
        # e.g. our 8 x 8 filters of size 412 x 412, with a 5px margin in between
        MARGIN = 5
        width = n * output_dim[0] + (n - 1) * MARGIN
        height = n * output_dim[1] + (n - 1) * MARGIN
        stitched_filters = np.zeros((width, height, 3), dtype='uint8')

        # fill the picture with our saved filters
        for i in range(n):
            for j in range(n):
                img, _ = filters[i * n + j]
                width_margin = (output_dim[0] + MARGIN) * i
                height_margin = (output_dim[1] + MARGIN) * j
                stitched_filters[
                    width_margin: width_margin + output_dim[0],
                    height_margin: height_margin + output_dim[1], :] = img

        # save the result to disk
        save_img('vgg_{0:}_{1:}x{1:}.png'.format(layer_name, n), stitched_filters)
コード例 #2
0
hr_ae.fit_generator(hr_gen, steps_per_epoch=32, epochs=20)


def super_generator(batch_size=32):
    lr_gen = ImageDataGenerator(rescale=1. / 255).flow_from_directory(
        "DIV2K_train_LR_x8/",
        class_mode="input",
        shuffle=False,
        batch_size=batch_size)
    hr_gen = ImageDataGenerator(rescale=1. / 255).flow_from_directory(
        "DIV2K_train_HR/",
        class_mode="input",
        shuffle=False,
        batch_size=batch_size)
    while True:
        next_x = next(lr_gen)
        next_y = next(hr_gen)

        yield (next_x[0], next_y[0])


super_ae = keras.Model(lr_encoder.input, hr_decoder(lr_encoder.output))
super_ae.compile(optimizer="rmsprop", loss="binary_crossentropy")
super_ae.fit_generator(super_generator(), steps_per_epoch=32, epochs=20)

test_gen = ImageDataGenerator(rescale=1. / 255).flow_from_directory(
    "DIV2K_valid_LR_x8/", class_mode=None, shuffle=False)
test_preds = super_ae.predict(test_gen)
save_img("test0.png", img_to_array(test_gen[0][0]))
save_img("pred0.png", img_to_array(test_preds[0]))
コード例 #3
0
        grad_values = np.copy(self.grad_values)
        self.loss_value = None
        self.grad_values = None
        return grad_values


evaluator = Evaluator()

x = preprocess_image(base_image_path)

import os

os.chdir('../rcnn/neural style transfer')
result_prefix = 'image'

for i in range(iterations):
    print('Start of iteration', i)
    start_time = time.time()
    x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                     x.flatten(),
                                     fprime=evaluator.grads,
                                     maxfun=20)
    print('Current loss value:', min_val)
    # save current generated image
    img = deprocess_image(x.copy())
    fname = result_prefix + '_at_iteration_%d.png' % i
    save_img(fname, img)
    end_time = time.time()
    print('Image saved as', fname)
    print('Iteration %d completed in %ds' % (i, end_time - start_time))
コード例 #4
0
    print(seg.shape)
    # sys.exit(0)
    contours, _ = cv2.findContours(seg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    RGBforLabel = {1: (0, 0, 255), 2: (0, 255, 255)}

    for i,c in enumerate(contours):
        # Find mean colour inside this contour by doing a masked mean
        mask = np.zeros(seg.shape, np.uint8)
        cv2.drawContours(mask,[c],-1,255, -1)
        # DEBUG: cv2.imwrite(f"mask-{i}.png",mask)
        mean, _, _, _ = cv2.mean(seg, mask=mask)
        # DEBUG: print(f"i: {i}, mean: {mean}")

        # Get appropriate colour for this label
        label = 2 if mean > 1.0 else 1
        colour = RGBforLabel.get(label)
        # DEBUG: print(f"Colour: {colour}")

        # Outline contour in that colour on main image, line thickness=1
        cv2.drawContours(main, [c], -1, colour, 1)

    cv2.imwrite('./overlays/overlay' + str(i) + '.png', main)
    cv2.imwrite('./overlays/mask'+str(i)+'.png', main)

for i in range(len(preds_val_t)):
    img = array_to_img(preds_val_t[i])
    save_img('./graders/prediction/'+data[i], preds_val[i], scale=True)

model.save_weights('./experiments/model2_100.h5')

コード例 #5
0
max_loss = 10.

img = preprocess_image(base_image_path)
if K.image_data_format() == 'channels_first':
    original_shape = img.shape[2:]
else:
    original_shape = img.shape[1:3]
successive_shapes = [original_shape]
for i in range(1, num_octave):
    shape = tuple([int(dim / (octave_scale**i)) for dim in original_shape])
    successive_shapes.append(shape)
successive_shapes = successive_shapes[::-1]
original_img = np.copy(img)
shrunk_original_img = resize_img(img, successive_shapes[0])

for shape in successive_shapes:
    print('Processing image shape', shape)
    img = resize_img(img, shape)
    img = gradient_ascent(img,
                          iterations=iterations,
                          step=step,
                          max_loss=max_loss)
    upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)
    same_size_original = resize_img(original_img, shape)
    lost_detail = same_size_original - upscaled_shrunk_original_img

    img += lost_detail
    shrunk_original_img = resize_img(original_img, shape)

save_img(result_prefix + '.png', deprocess_image(np.copy(img)))
コード例 #6
0
        kept_filters.append((img, loss_value))
    end_time = time.time()
    print('Filter %d processed in %ds' % (filter_index, end_time - start_time))

# 8 X 8 격자인 64개의 필터들을 사용할 겁니다.
n = 8

# 가장 큰 손실값을 가진 필터는 더 잘보일 것입니다.
# 상위 64개의 필터는 유지시킬 겁니다.
kept_filters.sort(key=lambda x: x[1], reverse=True)
kept_filters = kept_filters[:n * n]

# 128 x 128 크기의 8 x 8 필터를 저장할 수 있는 충분한 공간이 있는 검정 이미지를 만듭니다.
# 5px의 여유공간도 둬야합니다.
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))

# 필터와 이미지를 저장합니다.
for i in range(n):
    for j in range(n):
        img, loss = kept_filters[i * n + j]
        width_margin = (img_width + margin) * i
        height_margin = (img_height + margin) * j
        stitched_filters[width_margin:width_margin + img_width,
                         height_margin:height_margin + img_height, :] = img

# 결과를 디스크에 저장합니다.
save_img('stitched_filters_%dx%d.png' % (n, n), stitched_filters)
コード例 #7
0
the_height = 256
the_width = 1600

imgshape = (the_height, the_width)
orig_train_dir = "/kaggle/input/severstal-steel-defect-detection/train_images/"
#the_index = 0
for file_name, rle, class_id in zip(train_df["image_id"],
                                    train_df["EncodedPixels"],
                                    train_df["class_id"]):
    the_mask = rle2mask(rle, imgshape)
    the_mask = expand_dims(the_mask, 2)
    mask_file_name = file_name[:-4] + "_" + str(int(class_id)) + "_mask.png"
    image_file_name = file_name[:-4] + "_" + str(int(class_id)) + "_image.jpg"
    save_img(masks_subdir + "/" + mask_file_name,
             the_mask,
             data_format="channels_last",
             scale=False)
    src = join(orig_train_dir, file_name)
    dst = join(images_subdir, image_file_name)
    copyfile(src, dst)
    #the_index = the_index + 1
    #if the_index == 100:
    #    break

imgshape = (the_height, the_width)
orig_train_dir = "/kaggle/input/severstal-steel-defect-detection/train_images/"
#the_index = 0
for file_name, rle, class_id in zip(validate_df["image_id"],
                                    validate_df["EncodedPixels"],
                                    validate_df["class_id"]):
    the_mask = rle2mask(rle, imgshape)
コード例 #8
0
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import save_img
# 載入圖檔
img = load_img("penguins.png", grayscale=True)
# 顯示圖片資訊
print(type(img))
# 轉換成 Numpy 陣列
img_array = img_to_array(img)
# 儲存圖檔
save_img("penguins_grayscale.jpg", img_array)
# 載入圖片
img2 = load_img("penguins_grayscale.jpg")
# 顯示圖片
import matplotlib.pyplot as plt

plt.axis("off")
plt.imshow(img2, cmap="gray")

コード例 #9
0
    print('Filter %d processed in %ds' % (filter_index, end_time - start_time))

# we will stich the best 64 filters on a 8 x 8 grid.
n = 8

# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top 64 filters.
kept_filters.sort(key=lambda x: x[1], reverse=True)
kept_filters = kept_filters[:n * n]

# build a black picture with enough space for
# our 8 x 8 filters of size 128 x 128, with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))

# fill the picture with our saved filters
for i in range(n):
    for j in range(n):
        index = i * n + j
        if index < len(kept_filters):
            img, loss = kept_filters[index]
            stitched_filters[(img_width + margin) *
                             i:(img_width + margin) * i + img_width,
                             (img_height + margin) *
                             j:(img_height + margin) * j + img_height, :] = img

# save the result to disk
save_img(layer_name + '_stitched_filters_%dx%d.png' % (n, n), stitched_filters)
コード例 #10
0
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])

# we start from a gray image with some noise
input_img_data = np.random.random((1, img_width, img_height,1)) * 20 + 128.

step = 1
# run gradient ascent for 20 steps
for i in range(20):
    loss_value, grads_value = iterate([input_img_data])
    input_img_data += grads_value * step
    
img = input_img_data[0]
img = deprocess_image(img)
save_img('%s_filter_%d.png' % (layer_name, filter_index), img)
    
    
#for filter_index in range(filters):
#    # we only scan through the first 200 filters,
#    # but there are actually 512 of them
#    print('Processing filter %d' % filter_index)
#    start_time = time.time()
#
#    # we build a loss function that maximizes the activation
#    # of the nth filter of the layer considered
#    layer_output = layer_dict[layer_name].output
#    if K.image_data_format() == 'channels_first':
#        loss = K.mean(layer_output[:, filter_index, :, :])
#    else:
#        loss = K.mean(layer_output[:, :, :, filter_index])
コード例 #11
0
def main():

    ## retrieve arguments and print out in shell
    args = args_parser()
    ## print out information on shell
    info_print(args)

    ## create output directory if not available ##

    #### Keras Model Loading ####
    if args.model.lower() == "vgg16":
        from keras.applications.vgg16 import VGG16 as keras_model, preprocess_input
    elif args.model.lower() == "vgg19":
        from keras.applications.vgg19 import VGG19 as keras_model, preprocess_input

    ## Define local variables in main environment
    if not "content/" in args.content_image_path:
        content_image_path = "content/" + args.content_image_path
        base_path = args.content_image_path
    else:
        content_image_path = args.content_image_path
        base_path = args.content_image_path[-1]

    ## remove file extension
    base_path = os.path.splitext(base_path)[0]

    output_subdir = args.output_subdir
    if output_subdir is None:
        ## Create output subdirectory
        output_subdir = "output/{}".format(base_path)
        if not os.path.exists(output_subdir):
            os.makedirs(output_subdir)
    else:
        if not "output/" in output_subdir:
            output_subdir = "output/" + output_subdir
        if not os.path.exists(output_subdir):
            os.makedirs(output_subdir)

    if not "style/" in args.style_image_path:
        style_image_path = "style/" + args.style_image_path
    else:
        style_image_path = args.style_image_path

    init_image = args.init_image
    image_width = args.image_width
    image_height = args.image_height
    img_size = (image_height, image_width)
    content_weight = args.content_weight
    style_weights = args.style_weights
    total_variation_weight = args.total_variation_weight
    num_iter = args.num_iter
    model = args.model
    rescale_image = str_to_bool(args.rescale_image)
    content_layer = args.content_layer
    if args.style_layers == None:
        style_layers = [
            'block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1',
            'block5_conv1'
        ]
    else:
        style_layers = args.style_layers

    print(style_layers)

    original_size = Image.open(content_image_path).size

    ###### Content Image ######
    ## Get preprocessed content image array
    content_image = preprocess_image(content_image_path, img_size,
                                     preprocess_input)
    ## Parse content_image numpy array as Keras Backend Variable
    content_image = K.variable(content_image,
                               dtype="float32",
                               name="content_image")

    ###### Style Image ######
    ## Get preprocessed style image array
    style_image = preprocess_image(style_image_path, img_size,
                                   preprocess_input)
    ## Parse style image numpy array as Keras Backend Variable
    style_image = K.variable(style_image, dtype="float32", name="style_image")

    ###### Generated Image ######
    ## Init generated image as numpy array and parse into Keras Backend Variable
    if init_image == "content":
        generated_image = preprocess_image(content_image_path, img_size,
                                           preprocess_input)
    elif init_image == "random":
        generated_image = np.random.randint(256,
                                            size=(image_width, image_height,
                                                  3)).astype("float64")
        generated_image = preprocess_input(
            np.expand_dims(generated_image, axis=0))
    else:
        import sys
        print("wrong init_image")
        sys.exit(1)
    fname = output_subdir + "/generated_image_at_iteration_0.jpg"
    save_img(path=fname, x=generated_image[0])

    ## Define generate image variable placeholder for later optimization
    # Theano
    if K.image_data_format() == "channels_first":
        generated_image_placeholder = K.placeholder(shape=(1, 3, image_height,
                                                           image_width))
    # Tensorflow
    else:
        generated_image_placeholder = K.placeholder(shape=(1, image_height,
                                                           image_width, 3))

    ###### Initialize one keras models with one input tensors which is concatenated by 3 images ######
    input_tensor = K.concatenate(
        [content_image, style_image, generated_image_placeholder], axis=0)
    ## input_tensor is a 4D tensor, with shape (3, image_height, image_width, 3) where the first 3 is the concatenation of 3 images and last 3 the color channel (tf)

    # build the keras network with our 3 images as input
    model = keras_model(input_tensor=input_tensor,
                        weights='imagenet',
                        include_top=False)

    # get the symbolic outputs of each layer (we gave them unique names). [Feature representations/maps in form of 4D tensors at each layer]
    outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])

    # combine these loss functions into a single scalar
    loss = K.variable(0.0)
    layer_features = outputs_dict[content_layer]

    ############# Content extraction: #############
    # retrieve content_image output for content_layer
    content_image_features = layer_features[0, :, :, :]
    # retrieve generated_image output from content_layer
    generated_image_features = layer_features[2, :, :, :]
    # get loss containing only content loss
    loss = loss + content_weight * content_loss(content_image_features,
                                                generated_image_features)

    ############# Style Extraction:  #############
    if len(style_weights) == 1:
        style_weights = [style_weights[0]] * len(style_layers)
    else:
        assert len(style_weights) == len(style_layers)
        style_weights = [float(style_weight) for style_weight in style_weights]

    session = K.get_session()
    for style_weight, layer_name in zip(style_weights, style_layers):
        ## get feature activations from layers
        layer_features = outputs_dict[layer_name]
        ## retrieve style_image output activations for a style_layer
        style_image_features = layer_features[1, :, :, :]
        ## retrieve generated_image output activations for a style_layer
        generated_image_features = layer_features[2, :, :, :]
        ## get loss containing content loss and style loss
        loss = loss + (style_weight / len(style_layers)) * style_loss(
            style_image_features, generated_image_features, img_size, session)

    ## get loss containing content loss, style loss and total variation loss
    loss = loss + total_variation_weight * total_variation_loss(
        generated_image_placeholder, img_size)

    # get the gradients of the generated image wrt. the loss
    grads = K.gradients(loss, generated_image_placeholder)

    # Define outputs list to have loss included
    outputs = [loss]

    # add the gradients to the outputs instance
    if isinstance(grads, (list, tuple)):
        outputs += grads
    else:
        outputs.append(grads)

    ## Define keras function with input the placeholder of the generated image and output the {loss and gradients} for learning
    f_outputs = K.function(inputs=[generated_image_placeholder],
                           outputs=outputs)

    class Evaluator(object):
        def __init__(self):
            self.loss_value = None
            self.grads_values = None

        def loss(self, x):
            assert self.loss_value is None
            loss_value, grad_values = eval_loss_and_grads(
                x, img_size, f_outputs)
            self.loss_value = loss_value
            self.grad_values = grad_values
            return self.loss_value

        def grads(self, x):
            assert self.loss_value is not None
            grad_values = np.copy(self.grad_values)
            self.loss_value = None
            self.grad_values = None
            return grad_values

    # this Evaluator class makes it possible
    # to compute loss and gradients in one pass
    # while retrieving them via two separate functions,
    # "loss" and "grads". This is done because scipy.optimize
    # requires separate functions for loss and gradients,
    # but computing them separately would be inefficient.

    evaluator = Evaluator()

    # run scipy-based optimization (L-BFGS) over the pixels of the generated image
    # so as to minimize the neural style loss
    loss_history = [None] * num_iter
    for i in range(num_iter):
        print("Start of iteration:", i + 1)
        start_time = time.time()
        generated_image, loss_history[i], info = fmin_l_bfgs_b(
            evaluator.loss,
            generated_image.flatten(),
            fprime=evaluator.grads,
            maxfun=20)
        print("Current loss value:", loss_history[i])
        # save current generated image
        img = deprocess_image(generated_image.copy(), img_shape=img_size)
        if rescale_image:
            img = array_to_img(img[0])
            img = img.resize(original_size)
            img = img_to_array(img)

        fname = output_subdir + "/generated_image_at_iteration_%s.png" % str(
            i + 1)
        save_img(path=fname, x=img)
        end_time = time.time()
        print("Image saved at:", fname)
        print("Iteration %s completed in %ds" %
              (str(i + 1), end_time - start_time))

    # summarize history for loss
    plt.figure(3, figsize=(7, 5))
    plt.plot(loss_history)
    plt.title("loss process during neural style transfer")
    plt.ylabel("loss")
    plt.xlabel("iteration")
    plt.savefig(output_subdir + "/loss_history.jpg")
    plt.close()
コード例 #12
0
ファイル: test_and_save.py プロジェクト: richardt94/deepSAR
from keras.utils import plot_model

#generator will apply speckle noise to each image in real time. When we test on actual SAR images
#this will not happen obvs
datagen = ImageDataGenerator(preprocessing_function=standardise)

val_iter = datagen.flow_from_directory("UCMerced_LandUse/Images/test",
                                       color_mode='grayscale',
                                       batch_size=4,
                                       target_size=(256, 256),
                                       class_mode='input')

#initialise and test the model
#we're still using the synthetic speckle so we need to set train=True
model = unet_autoenc(pretrained_weights='unet_autoenc-59-0.18.hdf5',
                     input_size=(256, 256, 1),
                     train=True,
                     output_noisy=True)

results = model.predict_generator(val_iter, steps=len(val_iter))

rfolder = 'test_results/'
ifolder = 'test_inputs/'

for indx, (result, noisy) in enumerate(zip(results[0], results[1])):

    fname = str(indx) + '.png'
    save_img(rfolder + fname, result)
    save_img(ifolder + fname, noisy)
コード例 #13
0
x_adv = x
x_noise = np.zeros_like(x)

# Set variables , can change epsilon
epochs = 20
epsilon = 0.18

for i in range(epochs): 
	# One hot encode the target class
	target = K.one_hot(target_class, 2)

	# Get the loss and gradient of the loss wrt the inputs
	loss = -1*K.categorical_crossentropy(target, model.output)
	grads = K.gradients(loss, model.input)

	# Get the sign of the gradient
	delta = K.sign(grads[0])
	x_noise = x_noise + delta

	# Perturb the image
	x_adv = x_adv + epsilon*delta

	# Get the new image and predictions
	x_adv = sess.run(x_adv, feed_dict={model.input:x})
	preds = model.predict(x_adv)
	adv_pred = preds[0][target_class].item()
	print(i, preds[0][target_class])
	# if pred > 0.9 is success
	if adv_pred > 0.95 :
		image.save_img( "afterAdv"+img_path , x_adv[0])
		break
コード例 #14
0
        det_conf = results[0][:, 1]

        cand_size = len(det_label)
        isPerson = False

        for i in range(cand_size):
            if det_conf[i] < 0.9:
                continue
            else:
                # Person is 15
                if int(det_label[i]) == 15:
                    isPerson = True
                    break

        if isPerson:
            # time de hozon
            now_time_str = datetime.now(
                pytz.timezone('Asia/Tokyo')).strftime('%Y%m%d_%H%M%S%f')[:-3]
            # print("Person detect :",now_time_str)

            try:
                image.save_img("./output/" + now_time_str + '.jpg', output_img)
            except Exception as e:
                continue
        time.sleep(0.5)

except Exception as e:
    logger.info('Web driver Error occured!')
    logger.exception(e)
    sys.exit()
コード例 #15
0
        self.loss_value = loss_value
        self.grad_values = grad_values
        return self.loss_value

    def grads(self, x):
        assert self.loss_value is not None
        grad_values = np.copy(self.grad_values)
        self.loss_value = None
        self.grad_values = None
        return grad_values

evaluator = Evaluator()

# run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the neural style loss
x = preprocess_image(base_image_path)

for i in range(iterations):
    print('Start of iteration', i)
    start_time = time.time()
    x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
                                     fprime=evaluator.grads, maxfun=20)
    print('Current loss value:', min_val)
    # save current generated image
    img = deprocess_image(x.copy())
    fname = result_prefix + '_at_iteration_%d.png' % i
    save_img(fname, img)
    end_time = time.time()
    print('Image saved as', fname)
    print('Iteration %d completed in %ds' % (i, end_time - start_time))
コード例 #16
0
x = preprocess_image(base_image_path)
iterations = 10
evaluator, x = train()
#img = deprocess_image(x.astype('float64'))
from PIL import Image
img = Image.fromarray(x, 'RGB')
#img.resize((600,600))
img.save('output_images/picasso_RM.png')

# ### Drop

# In[18]:

fname = result_prefix + 'art_iter10.png'
save_img(fname, x)

# In[13]:

evaluator = Evaluator()
iterations = 100
for i in range(iterations):
    print('iteration', i)
    start_time = time.time()
    x, min_val, _ = fmin_l_bfgs_b(evaluator.loss,
                                  x.flatten(),
                                  fprime=evaluator.grads,
                                  maxfun=20)
    print('Current loss value:', min_val)
    end_time = time.time()
    print('duration:', end_time - start_time)
コード例 #17
0
#defining autoencoder model

AEmodel = Sequential()

AEmodel.add(Conv2D(filters = 16, input_shape = (128, 128, 3), kernel_size = (3, 3), padding = "same", activation = "relu"))
AEmodel.add(Dropout(Beta))
AEmodel.add(Conv2D(filters = 32, kernel_size = (3, 3), padding = "same", activation = "relu"))
AEmodel.add(Dropout(Beta))
AEmodel.add(Conv2D(filters = 64, kernel_size = (3, 3), padding = "same", activation = "relu"))
AEmodel.add(Dropout(Beta))
AEmodel.add(Conv2D(filters = 128, kernel_size = (3, 3), padding = "same", activation = "relu"))
AEmodel.add(Dropout(Beta))
AEmodel.add(Conv2D(filters = 64, kernel_size = (3, 3), padding = "same", activation = "relu"))
AEmodel.add(Dropout(Beta))
AEmodel.add(Conv2D(filters = 32, kernel_size = (3, 3), padding = "same", activation = "relu"))
AEmodel.add(Dropout(Beta))
AEmodel.add(Conv2D(filters = 16, kernel_size = (3, 3), padding = "same", activation = "relu"))
AEmodel.add(Dropout(Beta))
AEmodel.add(Conv2D(filters = 3, kernel_size = (3, 3), padding = "same", activation = "sigmoid"))

AEmodel.compile(optimizer = "adam", loss = "mse", metrics = ["accuracy"])
AEmodel.summary()

for E in range(nEpochs):
	AEmodel.fit(datasetX, datasetY, shuffle = True, epochs = 1, batch_size = Batch_size)
	print(E)

	if E % Check_point == 0:
		AEmodel.save_weights(str(E) + ".h5")
		save_img(str(E) + ".png", 255 * AEmodel.predict(Sample(dataTrain, 2))[0])
コード例 #18
0
ファイル: main.py プロジェクト: Wangshenshen123/demo1
def run(a, b):
    # 设置参数
    # base_image = '1.jpg'
    # style_image = '2.png'

    global img_nrows

    global img_ncols

    global f_outputs
    base_image = a
    style_image = b
    result_image = 'result/'
    iterations = 400
    total_variation_weight = 8.5e-5
    style_weight = 1.0
    content_weight = 0.025

    # 设置产生图片的大小(缩放)
    width, height = load_img(base_image).size
    # 行
    img_nrows = 600
    # 列
    img_ncols = int(width * img_nrows / height)

    # 读入内容和风格图,包装为Keras张量
    base_image_K = K.variable(preprocess_image(base_image))
    style_reference_image = K.variable(preprocess_image(style_image))

    # 初始化一个待优化的占位符
    if K.image_data_format() == 'channels_first':
        combination_image = K.placeholder((1, 3, img_nrows, img_ncols))
    else:
        combination_image = K.placeholder((1, img_nrows, img_ncols, 3))

    # 将3个张量串联在一起
    input_tensor = K.concatenate(
        [base_image_K, style_reference_image, combination_image], axis=0)

    model = vgg19.VGG19(input_tensor=input_tensor,
                        weights='imagenet',
                        include_top=False)
    print('Model loaded.')

    # get the symbolic outputs of each "key" layer (we gave them unique names).
    outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])

    # combine these loss functions into a single scalar
    loss = K.variable(0.0)
    layer_features = outputs_dict['block5_conv2']
    base_image_features = layer_features[0, :, :, :]
    combination_features = layer_features[2, :, :, :]
    loss = loss + content_weight * content_loss(base_image_features,
                                                combination_features)

    feature_layers = [
        'block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1',
        'block5_conv1'
    ]
    for layer_name in feature_layers:
        layer_features = outputs_dict[layer_name]
        style_reference_features = layer_features[1, :, :, :]
        combination_features = layer_features[2, :, :, :]
        sl = style_loss(style_reference_features, combination_features)
        loss = loss + (style_weight / len(feature_layers)) * sl
    loss = loss + total_variation_weight * total_variation_loss(
        combination_image)

    # get the gradients of the generated image wrt the loss
    grads = K.gradients(loss, combination_image)

    outputs = [loss]
    if isinstance(grads, (list, tuple)):
        outputs += grads
    else:
        outputs.append(grads)

    f_outputs = K.function([combination_image], outputs)

    evaluator = Evaluator()

    x = preprocess_image(base_image)

    for i in range(iterations):
        start_time = time.time()
        x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                         x.flatten(),
                                         fprime=evaluator.grads,
                                         maxfun=20)
        print('Current loss value:', min_val)
        # save current generated image
        img = deprocess_image(x.copy())
        fname = result_image + '_at_iteration_%d.png' % i
        if i % 10 == 0:
            save_img(fname, img)
        end_time = time.time()
        print('Iteration %d completed in %ds' % (i, end_time - start_time))
コード例 #19
0
def main():
    width, height = load_img(target_image_path).size
    global img_height, img_width
    img_height = 200
    img_width = int(width * img_height / height)

    target_image = K.constant(preprocess_image(target_image_path))
    style_reference_image = K.constant(preprocess_image(style_reference_path))
    combination_image = K.placeholder((1, img_height, img_width, 3))
    input_tensor = K.concatenate(
        [target_image, style_reference_image, combination_image], axis=0)
    model = vgg19.VGG19(input_tensor=input_tensor,
                        weights='imagenet',
                        include_top=False)

    output_dict = dict([(layer.name, layer.output) for layer in model.layers])
    content_layer = 'block5_conv2'
    style_layers = [
        'block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1',
        'block5_conv1'
    ]

    total_variation_weight = 1e-4
    style_weight = 1.
    content_weight = 0.025

    loss = K.variable(0.)
    layer_features = output_dict[content_layer]
    target_image_features = layer_features[0, :, :, :]
    combination_features = layer_features[2, :, :, :]
    loss = loss + content_weight * content_loss(target_image_features,
                                                combination_features)

    for layer_name in style_layers:
        layer_features = output_dict[layer_name]
        style_reference_features = layer_features[1, :, :, :]
        combination_features = layer_features[2, :, :, :]
        s1 = style_loss(style_reference_features, combination_features)
        loss = loss + (style_weight / len(style_layers)) * s1

    loss = loss + total_variation_weight * total_variation_loss(
        combination_image)

    grads = K.gradients(loss, combination_image)[0]
    fetch_loss_and_grads = K.function([combination_image], [loss, grads])

    class Evaluator(object):
        def __init__(self):
            self.loss_value = None
            self.grads_values = None

        def loss(self, x):
            assert self.loss_value is None
            x = x.reshape((1, img_height, img_width, 3))
            outs = fetch_loss_and_grads([x])
            loss_value = outs[0]
            grad_values = outs[1].flatten().astype('float64')
            self.loss_value = loss_value
            self.grad_values = grad_values
            return self.loss_value

        def grads(self, x):
            assert self.loss_value is not None
            grad_values = np.copy(self.grad_values)
            self.loss_value = None
            self.grad_values = None
            return grad_values

    evaluator = Evaluator()

    iterations = iter_size

    x = preprocess_image(target_image_path)
    x = x.flatten()

    for i in range(iterations):

        x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                         x,
                                         fprime=evaluator.grads,
                                         maxfun=20)
        img = x.copy().reshape((img_height, img_width, 3))
        img = deprocess_image(img)
        percent = (int)(100.0 * i / iterations)
        sys.stdout.write("\r{0}{1}{2}{3}{4}".format(
            "\r[%2d%%]" % percent, "[", "=" * int(percent / 5),
            " " * (20 - int(percent / 5)), "]"))
        sys.stdout.flush()
        if (i == iterations - 1):
            fname = 'NST.png'
            save_img(fname, img)
            print('\n\rImage saved as %s \n\r' % fname)
    print('\n\r')
コード例 #20
0
ファイル: stylize.py プロジェクト: Liberty3000/neural-styler
def run(ctx, **config):
    config = SimpleNamespace(**config)
    verbose = config.verbose
    identifier, pattern, ext = '{}_{}_v{}_{}', '{}_c{}_s{}_v', '.png'
    status = '{:12} | Loss: {:.2E}'

    if config.content is None:
        if verbose:
            print(
                '<!> no content file found, using noise for texture generation.'
            )
        config.content = config.temp_file
        config.shape = (config.height, config.width, 3)
        if not os.path.isfile(config.content):
            noise_scale, noise_shift = 255, 100
            noise = (np.random.normal(size=config.shape) *
                     noise_scale) + noise_shift
            cv.imwrite(config.content, noise)
    else:
        w, h = load_img(config.content).size
        config.height = h if not config.height else config.height
        config.width = w if not config.width else config.width
        w, h = load_img(config.content,
                        target_size=(config.height, config.width)).size
        config.shape = (h, w, 3)
    h, w, _ = config.shape

    if verbose: print('loading style...')
    style_image = preprocess(config.style, *config.shape[:-1])
    style_id = config.style.split('/')[-1].split('.')[0]
    style_weight = config.style_weight
    if verbose: print('done.')

    if verbose: print('loading content...')
    content_image = preprocess(config.content, *config.shape[:-1])
    content_id = config.content.split('/')[-1].split('.')[0]
    content_weight = config.content_weight
    if verbose: print('done.')

    if verbose: print('configuring run...')
    output = content_image.copy()
    variation_weight = config.variation_weight
    config.pair = '{}_{}'.format(style_id, content_id).title()
    config.variation = pattern.format(content_weight, style_weight,
                                      variation_weight)
    version = 1
    for f in os.listdir(config.output_dir):
        test = identifier.format(style_id, content_id, version,
                                 config.variation)
        if test in f:
            version += 1
            break
    save_as = identifier.format(style_id, content_id, version,
                                config.variation)
    saver = os.path.join(config.output_dir, save_as)
    if config.save_progress: os.makedirs(saver)
    if verbose: print('ready...')

    if verbose: print('building graph...')
    content_tensor = K.variable(content_image)
    style_tensor = K.variable(style_image)
    pastiche_tensor = K.placeholder((1, ) + config.shape)

    content_axis, style_axis, pastiche_axis = 0, 1, 2
    input_tensor = K.concatenate(
        [content_tensor, style_tensor, pastiche_tensor], axis=0)

    model = zoo[config.model]['model'](input_tensor)
    feature_layers = zoo[config.model]['layers']

    if config.content_layers == 'all':
        config.content_layers = feature_layers
    if config.style_layers == 'all':
        config.style_layers = feature_layers

    if config.content_layers is None:
        config.content_layers = random.sample(feature_layers,
                                              size=len(feature_layers))
    if config.style_layers is None:
        config.style_layers = random.sample(feature_layers,
                                            size=len(feature_layers))

    if not isinstance(config.content_layers, list):
        config.content_layers = [config.content_layers]
    if not isinstance(config.style_layers, list):
        config.style_layers = [config.style_layers]

    outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])

    if verbose:
        print('graph built successfully.')
        model.summary()
        print('      feature extractor|', config.model)
        print('  target style layer(s)|', config.content_layers)
        print('target content layer(s)|', config.style_layers)
        print('           style weight|', config.style_weight)
        print('         content weight|', config.content_weight)
        print('       variation weight|', config.variation_weight)
        print('                       |')
        print('             resolution|', config.shape[:-1])
        print('             identifier|', config.pair)

    loss = K.variable(0.)

    for layer in config.content_layers:
        content_feats = outputs_dict[layer][content_axis, ...]
        pastiche_feats = outputs_dict[layer][pastiche_axis, ...]
        closs = content_loss(content_feats, pastiche_feats)
        loss = loss * (content_weight * closs)

    for layer in config.style_layers:
        style_feats = outputs_dict[layer][style_axis, ...]
        pastiche_feats = outputs_dict[layer][pastiche_axis, ...]
        sloss = style_loss(style_feats, pastiche_feats, config.shape)
        loss += (style_weight / len(feature_layers)) * sloss

    total_variation_loss_ = total_variation_loss(config.shape,
                                                 variation_weight)
    loss += variation_weight * total_variation_loss_(pastiche_tensor)
    grads = K.gradients(loss, pastiche_tensor)

    outputs = [loss]
    if isinstance(grads, (list, tuple)): outputs += grads
    else: outputs.append(grads)
    f_outputs = K.function([pastiche_tensor], outputs)

    def loss_and_grads(x):
        x = x.reshape((1, h, w, 3))
        outputs = f_outputs([x])
        losses = outputs[0]
        if len(outputs[1:]) == 1:
            grads = outputs[1].flatten().astype('float64')
        else:
            grads = np.array(outputs[1:]).flatten().astype('float64')
        return losses, grads

    class Evaluator(object):
        def __init__(self):
            self.loss_value = None
            self.grads_values = None

        def loss(self, x):
            assert self.loss_value is None
            loss_value, grad_values = loss_and_grads(x)
            self.loss_value = loss_value
            self.grad_values = grad_values
            return self.loss_value

        def grads(self, x):
            assert self.loss_value is not None
            grad_values = np.copy(self.grad_values)
            self.loss_value = None
            self.grad_values = None
            return grad_values

    eval = Evaluator()
    bar = tqdm.tqdm(range(config.itrs))
    for itr in bar:
        output, loss, _ = fmin_l_bfgs_b(eval.loss,
                                        output.flatten(),
                                        fprime=eval.grads,
                                        maxfun=config.lbfgs_steps)

        if config.save_progress:
            current = '{}_{}'.format(config.pair, str(itr).zfill(2))
            output_file = os.path.join(saver, current) + ext
            save_img(output_file, deprocess(output.copy(), h, w))

        bar.set_description(status.format(config.pair, loss))
        bar.refresh()

    output_file = os.path.join(config.output_dir, save_as) + ext
    save_img(output_file, deprocess(output.copy(), h, w))
コード例 #21
0
ファイル: Augmentation.py プロジェクト: junyuong/Augmentation
from keras.preprocessing.image import ImageDataGenerator
from matplotlib import pyplot
# load the image
img = load_img('B_0_0_0_0_0.png')
# convert to numpy array
data = img_to_array(img)
# expand dimension to one sample
samples = expand_dims(data, 0)
# create image data augmentation generator

#datagen = ImageDataGenerator(zoom_range=[0.5,1.0])
#datagen = ImageDataGenerator(rotation_range=10)
datagen = ImageDataGenerator(rotation_range=30)

# prepare iterator
it = datagen.flow(samples, batch_size=1)
# generate samples and plot

for i in range(12):
    # define subplot
    #pyplot.subplot(330 + 1 + i)
    # generate batch of images
    batch = it.next()
    # convert to unsigned integers for viewing
    image = batch[0].astype('uint8')
    img_array = img_to_array(image)
    # plot raw pixel data
    #pyplot.imshow(image)
    save_img(str(i) + '.jpg', img_array)
# show the figure
pyplot.show()
コード例 #22
0
    '''
    Probability histogram initialize
    2019.10.24
    '''
    global prob_hist
    global prob_time_prev

    prob_name = 'output\\probability_histogram_DB_pickle2'
    with open(prob_name, 'rb') as fp:
        prob_hist = pickle.load(fp)
    print("Probability histogram ready")
    prob_time_prev = 0


global map_x_size, map_y_size, map_layer_num
map_y_size = 128
map_x_size = 128
map_layer_num = 3
prob_hist_initialize()
image_files = glob.glob('map_input_DB\\*.bmp')
data_length = np.shape(image_files)[0]

for idx in range(data_length):
    map_data = data_read_one_prob_concatnate(image_files[idx])
    map_in = (map_data / 25).astype(int)
    map_prob = prob_map_generation(map_in)
    print('Probability map generation:', idx)
    map_save = np.concatenate((25 * map_in, 2.5 * map_prob), axis=1)
    file_name = 'map_output_DB\\ProbMap_' + str(idx) + '.bmp'
    image.save_img(file_name, map_save)
コード例 #23
0
        theta = np.linspace(0.0, 180.0, biggest_dim, endpoint=False)
        #theta_limited = np.concatenate([theta[:(theta.size-wedge)//2], theta[(theta.size+wedge)//2:]]
        sinogram = radon(ood_img, theta=theta, circle=False)
        sinogram[:, :(15 * 512) // 180] = .0
        sinogram[:, -(15 * 512) // 180:] = .0

        #lat reconstruction of ood
        ood_img_lat_rec = iradon(sinogram, theta=theta, circle=False)
        ood_img_lat_rec = skimage.exposure.rescale_intensity(ood_img_lat_rec,
                                                             out_range=(.0,
                                                                        1.))

        #do all inspection logging here
        if i % 30 == 0:
            fname = tmp_path + '/' + 'ood_target.png'
            save_img(fname, ood_img[:, :, np.newaxis])
            mlflow.log_artifact(fname, artifact_path=artifact_path)

            fname = tmp_path + '/' + 'ood_img_lat_rec.png'
            save_img(fname, ood_img_lat_rec[:, :, np.newaxis])
            mlflow.log_artifact(fname, artifact_path=artifact_path2)

        #do all continual saving here
        #saving
        np.savez_compressed(
            os.path.join(
                npz_path,
                "mayo_test_{:03d}.npz".format(i),
            ),
            input=input_img[np.newaxis, :, :, :],
            target=target_img[np.newaxis, :, :, :],
コード例 #24
0
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import array_to_img
# load the image
img = load_img('bondi_beach.jpg')
print(type(img))
# convert to numpy array
img_array = img_to_array(img)
print(img_array.dtype)
print(img_array.shape)
# convert back to image
img_pil = array_to_img(img_array)
print(type(img))

# example of saving an image with the Keras API
from keras.preprocessing.image import load_img
from keras.preprocessing.image import save_img
from keras.preprocessing.image import img_to_array
# load image as as grayscale
img = load_img('bondi_beach.jpg', grayscale=True)
# convert image to a numpy array
img_array = img_to_array(img)
# save the image with a new filename
save_img('bondi_beach_grayscale.jpg', img_array)
# load the image to confirm it was saved correctly
img = load_img('bondi_beach_grayscale.jpg')
print(type(img))
print(img.format)
print(img.mode)
print(img.size)
img.show()
コード例 #25
0
ファイル: Lab2.py プロジェクト: YongDong11467/Style_Transfer
def styleTransfer(cData, sData, tData):
    print("   Building transfer model.")
    contentTensor = K.variable(cData)
    styleTensor = K.variable(sData)
    genTensor = K.placeholder((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
    inputTensor = K.concatenate([contentTensor, styleTensor, genTensor],
                                axis=0)
    model = vgg19.VGG19(include_top=False,
                        weights='imagenet',
                        input_tensor=inputTensor)
    outputDict = dict([(layer.name, layer.output) for layer in model.layers])
    print("   VGG19 model loaded.")
    loss = 0.0
    styleLayerNames = [
        "block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1",
        "block5_conv1"
    ]
    contentLayerName = "block5_conv2"
    print("   Calculating content loss.")
    contentLayer = outputDict[contentLayerName]
    contentOutput = contentLayer[0, :, :, :]
    genOutput = contentLayer[2, :, :, :]
    cl = contentLoss(contentOutput, genOutput)
    sl = 0.0
    print("   Calculating style loss.")
    for layerName in styleLayerNames:
        styleLayer = outputDict[layerName]
        styleOutput = styleLayer[1, :, :, :]
        genOutput = styleLayer[2, :, :, :]
        sl += styleLoss(styleOutput, genOutput)
    loss = CONTENT_WEIGHT * cl + STYLE_WEIGHT * sl

    gradient = K.gradients(loss, genTensor)
    outputs = [loss]
    outputs += gradient
    kFunction = K.function([genTensor], outputs)

    class Wrapper:
        def loss(self, x):
            x = x.reshape((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
            outs = kFunction([x])
            self._gradients = outs[1].flatten().astype("float64")
            return outs[0]

        def gradients(self, x):
            return self._gradients

    wrapper = Wrapper()

    print("   Beginning transfer.")
    for i in range(TRANSFER_ROUNDS):
        print("   Step %d." % i)
        position, tLoss, dictionary = fmin_l_bfgs_b(wrapper.loss,
                                                    tData.flatten(),
                                                    fprime=wrapper.gradients,
                                                    maxfun=1000)
        print("      Loss: %f." % tLoss)
        img = deprocessImage(position)
        saveFile = "generatedImg" + str(i) + ".jpg"
        save_img(saveFile, img)  #Uncomment when everything is working right.
        print("      Image saved to \"%s\"." % saveFile)
    print("   Transfer complete.")
コード例 #26
0
import os
import numpy as np
from keras.preprocessing.image import load_img, save_img, img_to_array, array_to_img


def Convert(nplist):
    a, b, c = nplist.shape
    outlist = np.ones((a, b, c), dtype=np.int16)
    for x in range(a):
        for y in range(b):
            outlist[x, y, 0] = (nplist[x, y, 0] + nplist[x, y, 1] +
                                nplist[x, y, 2]) / 3
            outlist[x, y, 1] = (nplist[x, y, 0] + nplist[x, y, 1] +
                                nplist[x, y, 2]) / 3
            outlist[x, y, 2] = (nplist[x, y, 0] + nplist[x, y, 1] +
                                nplist[x, y, 2]) / 3
    return outlist


fileList = os.listdir(os.curdir)

counter = 1
for file in fileList:
    if not (file == "GrayConvert.py"):
        img = Convert(img_to_array(load_img(file)))
        print(img.shape)
        print(counter)
        save_img(file, img)
        counter = counter + 1
コード例 #27
0
ファイル: crop.py プロジェクト: TTLuze/rank-reid
def crop_img():
    for i, image_name in enumerate(sorted(os.listdir(source_path))):
        image_path = os.path.join(source_path, image_name)
        x = image.load_img(image_path)
        x = image.img_to_array(x)

        camera_id = i//264 + 7
        location_id = (i%24)//2
        face_id = i%2 #表示脸的朝向 0朝前,1朝后
        person_id = (i%264)//24+1502

        if i < 264:
            if i % 24 == 0 or i % 24 == 1:
                x = x[590:1030, 660:800, :]
            elif i % 24 == 2 or i % 24 == 3:
                x = x[600:990, 570:680, :]
            elif i % 24 == 4 or i % 24 == 5:
                x = x[610:950, 490:600, :]
            elif i % 24 == 6 or i % 24 == 7:
                x = x[610:930, 430:550, :]
            elif i % 24 == 8 or i % 24 == 9:
                x = x[580:980, 990:1120, :]
            elif i % 24 == 10 or i % 24 == 11:
                x = x[600:940, 880:990, :]
            elif i % 24 == 12 or i % 24 == 13:
                x = x[610:920, 780:890, :]
            elif i % 24 == 14 or i % 24 == 15:
                x = x[620:900, 700:815, :]
            elif i % 24 == 16 or i % 24 == 17:
                x = x[600:920, 1230:1340, :]
            elif i % 24 == 18 or i % 24 == 19:
                x = x[610:900, 1110:1210, :]
            elif i % 24 == 20 or i % 24 == 21:
                x = x[610:880, 1010:1110, :]
            elif i % 24 == 22 or i % 24 == 23:
                x = x[620:860, 910:1010, :]
        elif 264 <= i < 528:
            if i % 24 == 0 or i % 24 == 1:
                x = x[540:1060, 400:570, :]
            elif i % 24 == 2 or i % 24 == 3:
                x = x[560:990, 470:640, :]
            elif i % 24 == 4 or i % 24 == 5:
                x = x[560:940, 540:690, :]
            elif i % 24 == 6 or i % 24 == 7:
                x = x[570:910, 580:720, :]
            elif i % 24 == 8 or i % 24 == 9:
                x = x[540:1060, 840:1060, :]
            elif i % 24 == 10 or i % 24 == 11:
                x = x[560:980, 860:1060, :]
            elif i % 24 == 12 or i % 24 == 13:
                x = x[560:940, 880:1040, :]
            elif i % 24 == 14 or i % 24 == 15:
                x = x[570:900, 890:1030, :]
            elif i % 24 == 16 or i % 24 == 17:
                x = x[530:1060, 1360:1550, :]
            elif i % 24 == 18 or i % 24 == 19:
                x = x[560:990, 1290:1480, :]
            elif i % 24 == 20 or i % 24 == 21:
                x = x[560:940, 1250:1430, :]
            elif i % 24 == 22 or i % 24 == 23:
                x = x[570:900, 1210:1350, :]
        elif i >= 528:
            if i % 24 == 0 or i % 24 == 1:
                x = x[570:920, 650:780, :]
            elif i % 24 == 2 or i % 24 == 3:
                x = x[570:900, 770:890, :]
            elif i % 24 == 4 or i % 24 == 5:
                x = x[570:860, 890:990, :]
            elif i % 24 == 6 or i % 24 == 7:
                x = x[580:840, 970:1080, :]
            elif i % 24 == 8 or i % 24 == 9:
                x = x[540:980, 920:1060, :]
            elif i % 24 == 10 or i % 24 == 11:
                x = x[550:920, 1030:1160, :]
            elif i % 24 == 12 or i % 24 == 13:
                x = x[560:900, 1130:1250, :]
            elif i % 24 == 14 or i % 24 == 15:
                x = x[570:870, 1210:1320, :]
            elif i % 24 == 16 or i % 24 == 17:
                x = x[520:1020, 1290:1470, :]
            elif i % 24 == 18 or i % 24 == 19:
                x = x[530:970, 1390:1550, :]
            elif i % 24 == 20 or i % 24 == 21:
                x = x[540:930, 1450:1630, :]
            elif i % 24 == 22 or i % 24 == 23:
                x = x[560:890, 1500:1630, :]

        x = image.array_to_img(x)
        image_name = '{:0>4}_c{}l{:0>2}f{}.JPG'.format(person_id,camera_id,location_id,face_id)
        image_path = os.path.join(target_path, image_name)
        image.save_img(image_path, x)
コード例 #28
0
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
import os
from keras.preprocessing import image
import cv2
dirName = r'imagestest/'
imagelist = []
for (dirpath, dirnames, filenames) in os.walk(dirName):

    for file in filenames:
        imagelist.append(file)

        print(file)

dirName = 'imagestest/'
#images folder path

for img in imagelist:

    filepath = dirName + img
    imd1 = image.load_img(filepath)
    imarr1 = np.array(imd1)
    #indexing.append(index)
    resized_im = cv2.resize(imarr1, (640, 480), interpolation=cv2.INTER_CUBIC)

    writepath = 'keras-frcnn-master/test_images/' + img
    image.save_img(writepath, resized_im)
コード例 #29
0
        self.loss_value = None
        self.grad_values = None
        return grad_values


evaluator = Evaluator()

X = preprocess_image(CONTENT_IMAGE_PATH)

# ____________________APPLY ALTERNATION____________________

for i in range(iterations):
    print('Start of iteration', i)
    start_time = time.time()
    x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                     X.flatten(),
                                     fprime=evaluator.grads,
                                     maxfun=20)
    print('Current loss value:', min_val)
    # save current generated image
    if i % 10 == 0:
        img = deprocess_image(x.copy(), img_height, img_width)
        fname = str(content_weight) + '_' + str(
            style_weight) + '_at_iteration_%d.png' % i
        kpi.save_img(fname, img)
        print('Image saved as', fname)
    end_time = time.time()
    print('Iteration %d completed in %ds' % (i, end_time - start_time))
    if K.image_data_format() == 'channels_first':
        print("ERROR")
コード例 #30
0
def _encode(msg,
            dataset,
            model_type,
            epochs,
            experiment_id,
            attack_name,
            keep_one=False,
            quality=100,
            attack_strength=2.0):
    encoded_msg = _encodeString(msg)
    logger.info("Encode message {}=>{}".format(msg, encoded_msg))
    test_size = len(encoded_msg)
    model, x_train, x_test, y_train, y_test = load_model(dataset=dataset,
                                                         model_type=model_type,
                                                         epochs=epochs)
    num_classes = 10

    combined = list(zip(x_test, y_test))
    random.shuffle(combined)
    x_test[:], y_test[:] = zip(*combined)

    #keep only correctly predicted inputs
    batch_size = 64
    preds_test = np.argmax(model.predict(x_test, verbose=0), axis=1)
    inds_correct = np.where(preds_test == y_test.argmax(axis=1))[0]
    x, y = x_test[inds_correct], y_test[inds_correct]
    x, y = x[:test_size], y[:test_size]

    targets = np.array(
        to_categorical([int(i) for i in encoded_msg], num_classes), "int32")
    #print(targets)

    if keep_one:
        x = np.repeat(np.array([x[0, :, :, :]]), y.shape[0], axis=0)
        y = model.predict(x)
    adv_x = craft_attack(model,
                         x,
                         attack_name,
                         y=targets,
                         epsilon=attack_strength)
    yadv = np.argmax(model.predict(adv_x), axis=1)

    pictures_path = default_path.format(experiment_id, attack_name,
                                        experiment_time)
    os.makedirs(pictures_path, exist_ok=True)
    os.makedirs("{}/ref".format(pictures_path), exist_ok=True)

    for i, adv in enumerate(adv_x):
        predicted = yadv[i]
        encoded = np.argmax(targets[i])
        truth = np.argmax(y[i])
        adv_path = "{}/{}_predicted{}_encoded{}_truth{}.{}".format(
            pictures_path, i, predicted, encoded, truth, extension)
        real_path = "{}/ref/{}.{}".format(pictures_path, i, extension)

        if extension == "png":
            q = int(10 - quality / 100)
            save_img(adv_path, adv, compress_level=q)
            save_img(real_path, x[i], compress_level=q)
        elif extension == "jpg":
            save_img(adv_path, adv, quality=quality)
            save_img(real_path, x[i], quality=quality)

    return experiment_time
コード例 #31
0
    end_time = time.time()
    print('Filter %d processed in %ds' % (filter_index, end_time - start_time))

# we will stich the best 64 filters on a 8 x 8 grid.
n = 8

# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top 64 filters.
kept_filters.sort(key=lambda x: x[1], reverse=True)
kept_filters = kept_filters[:n * n]

# build a black picture with enough space for
# our 8 x 8 filters of size 128 x 128, with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))

# fill the picture with our saved filters
for i in range(n):
    for j in range(n):
        img, loss = kept_filters[i * n + j]
        width_margin = (img_width + margin) * i
        height_margin = (img_height + margin) * j
        stitched_filters[
            width_margin: width_margin + img_width,
            height_margin: height_margin + img_height, :] = img

# save the result to disk
save_img('stitched_filters_%dx%d.png' % (n, n), stitched_filters)
コード例 #32
0
def visualize_filter(model, layer_name, filters=None, image=None, save_path='stitched_filters.png',
                     n=1, loss_min=0, grad_check=True, steps=20, step_size=1):
    """
    This creates a visualization of the filters in a given layer by using gradient ascent on either a random input
    image, or a selected image. It returns a list of kept filters and save an nxn image of filters stiched together
    and saved to path, save_path. We perform gradient asscent to maximize a loss function which maximizes the 
    output of that node, and forces others to zero.
    
    Input:
    ------------------
    model: a keras model
    
    layer_name: a string identifying a layer in the above model, (layer must have output dimension of 4)
    
    filters: None or some iterable list of indices of filters to visualizes. If none, all features will be 
             visualized (or at least attempted). 
    
    image: if not none, the image to use as a starting point in gradient ascent
            
    save_path: a string, where the image is saved
    
    n: an integer, the square root of how many filters to use in the output image. (will error if not enough 
       kept filters)
    
    loss_min: float, a lower bound on the loss of filter we will keep
    
    grad_check: bool, if true we check if the loss of a filter is 0 on an image, and skip it if true
            
    Output: 
    --------------------
    
    kept_filters: a list of images equal in size to those of the inputs of model.

    
    NOTE: requires channel last format as of current version. adapted from: 
    https://blog.keras.io/how-convolutional-neural-networks-see-the-world.html
    
    """
    

    import numpy as np
    import time
    from keras.preprocessing.image import save_img
    from keras.applications import vgg16
    from keras import backend as K
    
    # util function to convert a tensor into a valid image
    def deprocess_image(x):
        # normalize tensor: center on 0., ensure std is 0.1
        x -= x.mean()
        x /= (x.std() + K.epsilon())
        x *= 0.1

        # clip to [0, 1]
        x += 0.5
        x = np.clip(x, 0, 1)

        # convert to RGB array
        x *= 255
        if K.image_data_format() == 'channels_first':
            x = x.transpose((1, 2, 0))
        x = np.clip(x, 0, 255).astype('float64')
        return x
    
    def l2_norm(x):
        return K.eval(K.sqrt(K.mean(K.square(x))))

    def normalize(x):
        # utility function to normalize a tensor by its L2 norm
        return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
    
    
    #infer image size and input tensor from model
    img_width = int(model.input.shape[1])
    img_height = int(model.input.shape[2])
    input_img = model.input
    
    
    if image: #... process and load image
        size=(img_width,img_height)
        im=img.open(image)
        im=im.resize(size,img.ANTIALIAS)
        im.load()
        img_data=np.asarray(im, dtype="float64" )
        img_data=np.expand_dims(img_data,axis=0)
    
    layer_dict = dict([(layer.name, layer) for layer in model.layers])
    
    # we build a loss function that maximizes the activation
    # of the nth filter of the layer considered
    layer_output = layer_dict[layer_name].output
    
    if not filters:# if default to none or empty list, creates an iterable to loop over all layers
        filters=range(int(layer_output.shape[3]))
    
    kept_filters = []
    for output_index in filters:
        
        print('Processing filter %d' % output_index)
        start_time = time.time()
        
        loss = K.mean(layer_output[:,:,:,output_index])

        #we compute the gradient of the input picture wrt this loss
        grads = K.gradients(loss, input_img)[0]

        # normalization trick: we normalize the gradient
        grads = normalize(grads)

        # this function returns the loss and grads given the input picture
        iterate = K.function([input_img], [loss, grads])

        # step size for gradient ascent



        if image: #... use image
            input_img_data=img_data
        else: #... use a random array image
            input_img_data = np.random.random((1, img_width, img_height, 3))
            #transform the data to have values in [0,255]
            input_img_data = (input_img_data - 0.5) * 20 + 128



        for i in range(steps):
            loss_value, grads_value = iterate([input_img_data])
            input_img_data += grads_value * step_size

            print('Current loss value:', loss_value)
            if loss_value <= 0:
                # some filters get stuck to 0, we can skip them
                break

        # decode the resulting input image
        if loss_value > loss_min:
            img = deprocess_image(input_img_data[0])
            kept_filters.append((img, loss_value))

            
        end_time = time.time()
        print('Filter %d processed in %ds' % (output_index, end_time - start_time))


    #we stich the best n^2 filters on a n x n grid.
    #---------------------------------------------------------------


    # the filters that have the highest loss are assumed to be better-looking.
    # we will only keep the top n^2 filters in the image.
    kept_filters.sort(key=lambda x: x[1], reverse=True)
    kept_filters = kept_filters[:n * n]

    # build a black picture with enough space for
    # our n x n filters of size equal to the input dimension of our model, with a 5px margin in between
    margin = 5
    width = n * img_width + (n - 1) * margin
    height = n * img_height + (n - 1) * margin
    stitched_filters = np.zeros((width, height, 3))

    # fill the picture with our saved filters
    for i in range(n):
        for j in range(n):
            img, loss = kept_filters[i * n + j]
            stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,
                             (img_height + margin) * j: (img_height + margin) * j + img_height, :] = img

    # save the result to disk
    save_img(save_path,stitched_filters)        
コード例 #33
0
ファイル: deepDream.py プロジェクト: AmmarkoV/MyScripts
img = preprocess_image(base_image_path)
if K.image_data_format() == 'channels_first':
    original_shape = img.shape[2:]
else:
    original_shape = img.shape[1:3]
successive_shapes = [original_shape]
for i in range(1, num_octave):
    shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape])
    successive_shapes.append(shape)
successive_shapes = successive_shapes[::-1]
original_img = np.copy(img)
shrunk_original_img = resize_img(img, successive_shapes[0])

for shape in successive_shapes:
    print('Processing image shape', shape)
    img = resize_img(img, shape)
    img = gradient_ascent(img,
                          iterations=iterations,
                          step=step,
                          max_loss=max_loss)
    upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)
    same_size_original = resize_img(original_img, shape)
    lost_detail = same_size_original - upscaled_shrunk_original_img

    img += lost_detail
    shrunk_original_img = resize_img(original_img, shape)

save_img(result_prefix + '.png', deprocess_image(np.copy(img)))

コード例 #34
0
import glob
from keras.preprocessing.image import load_img, img_to_array, save_img

parser = argparse.ArgumentParser(description='Extract channel')
parser.add_argument('--src_dir', type=str)
parser.add_argument('--dest_dir', type=str)
parser.add_argument('--size', type=int)

# Image file extension, for example jpg or png.
parser.add_argument('--format', type=str)

# Number of channel to extract. Select from 0 for red, 1 for green
# and 2 for blue.
parser.add_argument('--channel', type=int)
args = parser.parse_args()

SRC_DIR = args.src_dir
DEST_DIR = args.dest_dir
FORMAT = args.format
src_files = glob.glob(f'{SRC_DIR}/*.{FORMAT}')

for f in src_files:
    img = load_img(f, target_size=args.size)
    img = img_to_array(img)
    channel = img[:, :, args.channel:args.channel + 1]

    fname = f.split('/')[-1]
    save_path = f'{DEST_DIR}/{fname}'
    save_img(save_path, channel)
    print(f'Channel saved to {save_path}')