Esempio n. 1
0
X_train_style = np.array([
    load_image(args.style,
               size=(height, width),
               preprocess_type='vgg19',
               verbose=True)
])
print("X_train_style shape:", X_train_style.shape)

print('Loading VGG headless 5')
modelWeights = "%s/%s-%s-%s%s" % (vgg19Dir, 'vgg-19', dim_ordering, K._BACKEND,
                                  '_headless_5_weights.hdf5')
model = VGG_19_headless_5(input_shape,
                          modelWeights,
                          trainable=False,
                          pooling_type=args.pooling_type)
layer_dict, layers_names = get_layer_data(model, 'conv_')
print('Layers found:' + ', '.join(layers_names))

input_layer = model.input

print('Building white noise images')
input_data = create_noise_tensor(height, width, channels)

mean_losses = {}
print('Using optimizer: ' + optimizer)
current_iter = 1
for layer_name in layers_names:
    prefix = str(current_iter).zfill(4)

    print('Creating labels for ' + layer_name)
    out = layer_dict[layer_name].output
Esempio n. 2
0
X_train_style = np.array([
    load_image(args.style,
               size=(height, width),
               preprocess_type='vgg19',
               verbose=True)
])
print("X_train_style shape:", X_train_style.shape)

print('Loading VGG headless 5')
modelWeights = "%s/%s-%s-%s%s" % (vgg19Dir, 'vgg-19', dim_ordering, K._BACKEND,
                                  '_headless_5_weights.hdf5')
model = VGG_19_headless_5(input_shape,
                          modelWeights,
                          trainable=False,
                          pooling_type=args.pooling_type)
layer_dict, layers_names = get_layer_data(
    model, 'conv_(1|2|3|4)')  # remove conv_5_* layers
print('Layers found:' + ', '.join(layers_names))

input_layer = model.input

print('Building white noise images')
input_data = create_noise_tensor(height, width, channels)

print('Using optimizer: ' + optimizer)
current_iter = 1
alpha = 5e1  # 1e2
for idx_content, lc_name in enumerate(layers_names):
    for idx_style, ls_name in enumerate(layers_names):
        print('Creating labels for content ' + lc_name + ' and style ' +
              ls_name)
        out_style = layer_dict[ls_name].output
Esempio n. 3
0
    # We separate the image_freq and the histogram_freq just to avoid dumping everything at the same time
    callbacks.append(tb)
callbacks.append(
    ModelCheckpointBatch(st_model,
                         chkp_dir=prefixed_dir + '/chkp',
                         nb_step_chkp=200))
callbacks.append(HistoryBatch())

print('Loading VGG headless 4')
modelWeights = "%s/%s-%s-%s%s" % (vgg19Dir, 'vgg-19', dim_ordering, K._BACKEND,
                                  '_headless_4_weights.hdf5')
vgg_model = VGG_19_headless_4(input_shape,
                              modelWeights,
                              trainable=False,
                              pooling_type=args.pooling_type)
layer_dict, layers_names = get_layer_data(vgg_model, 'conv_')
style_layers = ['conv_1_2', 'conv_2_2', 'conv_3_4', 'conv_4_2']
style_layers_mask = [name in style_layers for name in layers_names]
# About that: each time you have a pooling you hardly limiting the gradient flow upward
# after the > 2 layers, if you have a to do mini batchs and have a lot of noise
# You won't be able to pick up the gradient to converge
content_layers = ['conv_2_2']
content_layers_mask = [name in content_layers for name in layers_names]

print('Building full model')
mean = load_mean(name='vgg19')  # BGR
if K._BACKEND == "tensorflow":
    import tensorflow as tf
    preprocessed_output = Lambda(
        lambda x: tf.reverse(x, [False, False, False, True]) - mean,
        name="ltv")(st_model.output)  # RGB -> BGR -> BGR - mean