loss=utilities.vae_loss,
    learning_rate=0.00001)  # adagrad? #adadelta #nesterov did good,
model = tflearn.DNN(network_instance)
print("LOADING MODEL.")

model_name = "_anime_V2_poke5"
model.load("saved_models/model_Jul_29_optim_adam_loss_vae_loss_"
           "last_activ_relu_latent_128_num_filters_512_1024_decoder_width_8" +
           model_name + ".tflearn")
print("MODEL SUCCESSFULLY LOADED.")
encoder_model = utilities.get_encoder_network(model)
generator_model = utilities.get_generative_network(model)
print("LOADED GENERATOR MODEL.")

encoded_pokemon = utilities.predict_batches(expanded_full_X_HSV,
                                            encoder_model,
                                            in_samples_per_batch=64,
                                            in_input_name='input_images')

# Pick one pokemon and repeat it a number of times equal to the total noise samples
target_pokemon = encoded_pokemon[chosen_pokemon]
second_target_pokemon = encoded_pokemon[chosen_pokemon_2]
target_pokemon_difference = target_pokemon - second_target_pokemon
target_pokemon_difference = target_pokemon_difference / float(
    interpolation_intervals)  # Divided by the number of samples
interpolated_pokemon = []
for i in range(0, interpolation_intervals):
    current_interpolation = second_target_pokemon + (
        target_pokemon_difference * i)
    interpolated_pokemon.append(current_interpolation)
interpolated_transfer = utilities.predict_batches(interpolated_pokemon,
                                                  generator_model,
Exemplo n.º 2
0
        n_epoch=1,
        shuffle=True,
        show_metric=True,
        snapshot_epoch=True,
        batch_size=128,
        # validation_set=0.15,  # It also accepts a float < 1 to performs a data split over training data.
        validation_set=(expanded_test_X, expanded_test_X
                        ),  # We use it for validation for now. But also test.
        run_id='encoder_decoder')

    print("getting samples to show on screen.")
    encode_decode_sample = []
    if predict_full_dataset:
        predicted_X = X
        predicted_Y = Y_full_RGB
        encode_decode_sample = utilities.predict_batches(
            expanded_full_X_HSV, model, in_samples_per_batch=64)
    else:
        predicted_X = small_X
        predicted_Y = small_Y
        encode_decode_sample = utilities.predict_batches(
            expanded_small_X, model, in_samples_per_batch=64)

    # encode_decode_sample = model.predict(expanded_X)  # Just to test training with RGB. It seemed worse.
    print("The number of elements in the predicted samples is: " +
          str(len(encode_decode_sample)))
    reconstructed_pixels = []
    reconstructed_types = []
    # Made a function to avoid repeating that fragment of code in other python files.
    reconstructed_pixels, reconstructed_types = utilities.reconstruct_pixels_and_types(
        encode_decode_sample)
model = tflearn.DNN(network_instance)

print_ssim_scores = True

print("LOADING MODEL.")
first_model_name = "_regular_V3_no_noise2"  # This is the one for the encoder-only part. From Jul_28
# first_model_name = "_regular_V2_more_noise2"  # this was from Jul_23
# This hasn't been commited yet, due to network restrictions (AKA slow upload connection).
# Double check to have a folder with the correct path here.
model.load("saved_models/model_Jul_29_optim_adam_loss_vae_loss_"
           "last_activ_relu_latent_128_num_filters_512_1024_decoder_width_8" +
           first_model_name + ".tflearn")

print("getting samples to show on screen.")
encode_decode_sample_original = utilities.predict_batches(
    expanded_full_X_HSV, model, in_samples_per_batch=64)
reconstructed_pixels_original, reconstructed_types_original = \
    utilities.reconstruct_pixels_and_types(encode_decode_sample_original)
print("MSE value for the " + first_model_name +
      " model, over the WHOLE dataset is: ")
utilities.mean_square_error(reconstructed_pixels_original, X_full_RGB)
if print_ssim_scores:
    print("SSIM is: ")
    utilities.ssim_comparison(reconstructed_pixels_original, X_full_RGB)

# Now, training only:
encode_decode_sample_original_train = utilities.predict_batches(
    expanded_X, model, in_samples_per_batch=64)
reconstructed_pixels_original_train, reconstructed_types_original_train = \
    utilities.reconstruct_pixels_and_types(encode_decode_sample_original_train)
print("MSE value for the " + first_model_name +
Exemplo n.º 4
0
    loss=utilities.vae_loss,
    learning_rate=0.00001)  # adagrad? #adadelta #nesterov did good,

model = tflearn.DNN(network_instance)
print("LOADING MODEL.")

#####################
# TRANSFER RECONSTRUCTED
# Now, we can load the transfer learning model.
# second_model_name = "_V3_noise2"  # 21 of july  # "_V3_noise4" THIS WAS A GOOD MODEL.
first_model_name = "_anime_V2_poke5"
model.load("saved_models/model_Jul_29_optim_adam_loss_vae_loss_"
           "last_activ_relu_latent_128_num_filters_512_1024_decoder_width_8" +
           first_model_name + ".tflearn")

encode_decode_sample_transfer = utilities.predict_batches(
    expanded_originals, model, in_samples_per_batch=64)
reconstructed_transfer, reconstructed_types_transfer = \
    utilities.reconstruct_pixels_and_types(encode_decode_sample_transfer)

###################
# SWAPPED TRANSFER
encode_decode_sample_transfer = utilities.predict_batches(
    originals_fake_type, model, in_samples_per_batch=64)
swapped_transfer, reconstructed_types_transfer = \
    utilities.reconstruct_pixels_and_types(encode_decode_sample_transfer)

###################
# RECONSTRUCTED NON-TRANSFER
second_model_name = "_regular_V3_no_noise2"  # This is the one for the encoder-only part. From Jul_28
# This hasn't been commited yet, due to network restrictions (AKA slow upload connection).
# Double check to have a folder with the correct path here.
Exemplo n.º 5
0
        exporting_RGB = X_full_RGB
    else:
        # Non-regional to regional
        expanded_fake_X = np.append(X_full_HSV_non_regional,
                                    new_types_array,
                                    axis=1)
        exporting_RGB = X_full_RGB_non_regional
        # Regional to Non-regional
        # expanded_fake_X = np.append(X_full_HSV_regional, new_types_array, axis=1)
        # exporting_RGB = X_full_RGB_regional
else:
    expanded_fake_X = np.append(small_X, new_types_array, axis=1)
    exporting_RGB = small_X_RGB

print("getting samples to show on screen.")
encode_decode_sample_original = utilities.predict_batches(
    predicted_X, model, in_samples_per_batch=64)
encode_decode_sample_fake = utilities.predict_batches(expanded_fake_X,
                                                      model,
                                                      in_samples_per_batch=64)

reconstructed_pixels_original, reconstructed_types_original = \
    utilities.reconstruct_pixels_and_types(encode_decode_sample_original)

reconstructed_pixels_fake, reconstructed_types_fake = \
    utilities.reconstruct_pixels_and_types(encode_decode_sample_fake)

wanted_indices_RGB = [exporting_RGB[i] for i in indices_to_predict]
wanted_indices_pixels_original = [
    reconstructed_pixels_original[i] for i in indices_to_predict
]
wanted_indices_pixels_fake = [