second_target_pokemon = encoded_pokemon[chosen_pokemon_2]
target_pokemon_difference = target_pokemon - second_target_pokemon
target_pokemon_difference = target_pokemon_difference / float(
    interpolation_intervals)  # Divided by the number of samples
interpolated_pokemon = []
for i in range(0, interpolation_intervals):
    current_interpolation = second_target_pokemon + (
        target_pokemon_difference * i)
    interpolated_pokemon.append(current_interpolation)
interpolated_transfer = utilities.predict_batches(interpolated_pokemon,
                                                  generator_model,
                                                  in_samples_per_batch=64,
                                                  in_input_name='input_noise')

reconstructed_pixels_generated_transfer, reconstructed_types_generated_transfer = \
    utilities.reconstruct_pixels_and_types(interpolated_transfer)
##########################################################################
second_model_name = "_regular_V3_no_noise2"
model.load("saved_models/model_Jul_29_optim_adam_loss_vae_loss_"
           "last_activ_relu_latent_128_num_filters_512_1024_decoder_width_8" +
           second_model_name + ".tflearn",
           weights_only=True)
encoder_model_transfer = utilities.get_encoder_network(model)
generator_model_transfer = utilities.get_generative_network(model)
print("LOADED GENERATOR MODEL.")
encoded_pokemon = utilities.predict_batches(expanded_full_X_HSV,
                                            encoder_model_transfer,
                                            in_samples_per_batch=64,
                                            in_input_name='input_images_1')

target_pokemon = encoded_pokemon[chosen_pokemon]
예제 #2
0
        predicted_Y = Y_full_RGB
        encode_decode_sample = utilities.predict_batches(
            expanded_full_X_HSV, model, in_samples_per_batch=64)
    else:
        predicted_X = small_X
        predicted_Y = small_Y
        encode_decode_sample = utilities.predict_batches(
            expanded_small_X, model, in_samples_per_batch=64)

    # encode_decode_sample = model.predict(expanded_X)  # Just to test training with RGB. It seemed worse.
    print("The number of elements in the predicted samples is: " +
          str(len(encode_decode_sample)))
    reconstructed_pixels = []
    reconstructed_types = []
    # Made a function to avoid repeating that fragment of code in other python files.
    reconstructed_pixels, reconstructed_types = utilities.reconstruct_pixels_and_types(
        encode_decode_sample)

    print("Exporting reconstructed pokemon as an image.")
    # utilities.export_as_atlas(X_full_RGB, reconstructed_pixels)  # I have checked that it works perfectly.
    if predict_full_dataset:
        correct_indices = utilities.export_types_csv(Y_full_RGB,
                                                     reconstructed_types)
    else:
        correct_indices = utilities.export_types_csv(small_Y,
                                                     reconstructed_types)

# This is used to export an image only containing the ones whose types were correctly predicted by the NN.
# correct_X_RGB = [X_full_RGB[i] for i in correct_indices]
# correct_reconstructed_pixels = [reconstructed_pixels[i] for i in correct_indices]
# utilities.export_as_atlas(correct_X_RGB, correct_reconstructed_pixels, name_annotations='correct')
print_ssim_scores = True

print("LOADING MODEL.")
first_model_name = "_regular_V3_no_noise2"  # This is the one for the encoder-only part. From Jul_28
# first_model_name = "_regular_V2_more_noise2"  # this was from Jul_23
# This hasn't been commited yet, due to network restrictions (AKA slow upload connection).
# Double check to have a folder with the correct path here.
model.load("saved_models/model_Jul_29_optim_adam_loss_vae_loss_"
           "last_activ_relu_latent_128_num_filters_512_1024_decoder_width_8" +
           first_model_name + ".tflearn")

print("getting samples to show on screen.")
encode_decode_sample_original = utilities.predict_batches(
    expanded_full_X_HSV, model, in_samples_per_batch=64)
reconstructed_pixels_original, reconstructed_types_original = \
    utilities.reconstruct_pixels_and_types(encode_decode_sample_original)
print("MSE value for the " + first_model_name +
      " model, over the WHOLE dataset is: ")
utilities.mean_square_error(reconstructed_pixels_original, X_full_RGB)
if print_ssim_scores:
    print("SSIM is: ")
    utilities.ssim_comparison(reconstructed_pixels_original, X_full_RGB)

# Now, training only:
encode_decode_sample_original_train = utilities.predict_batches(
    expanded_X, model, in_samples_per_batch=64)
reconstructed_pixels_original_train, reconstructed_types_original_train = \
    utilities.reconstruct_pixels_and_types(encode_decode_sample_original_train)
print("MSE value for the " + first_model_name +
      " model, over the TRAINING dataset is: ")
utilities.mean_square_error(
예제 #4
0
model = tflearn.DNN(network_instance)
print("LOADING MODEL.")

#####################
# TRANSFER RECONSTRUCTED
# Now, we can load the transfer learning model.
# second_model_name = "_V3_noise2"  # 21 of july  # "_V3_noise4" THIS WAS A GOOD MODEL.
first_model_name = "_anime_V2_poke5"
model.load("saved_models/model_Jul_29_optim_adam_loss_vae_loss_"
           "last_activ_relu_latent_128_num_filters_512_1024_decoder_width_8" +
           first_model_name + ".tflearn")

encode_decode_sample_transfer = utilities.predict_batches(
    expanded_originals, model, in_samples_per_batch=64)
reconstructed_transfer, reconstructed_types_transfer = \
    utilities.reconstruct_pixels_and_types(encode_decode_sample_transfer)

###################
# SWAPPED TRANSFER
encode_decode_sample_transfer = utilities.predict_batches(
    originals_fake_type, model, in_samples_per_batch=64)
swapped_transfer, reconstructed_types_transfer = \
    utilities.reconstruct_pixels_and_types(encode_decode_sample_transfer)

###################
# RECONSTRUCTED NON-TRANSFER
second_model_name = "_regular_V3_no_noise2"  # This is the one for the encoder-only part. From Jul_28
# This hasn't been commited yet, due to network restrictions (AKA slow upload connection).
# Double check to have a folder with the correct path here.
model.load("saved_models/model_Jul_29_optim_adam_loss_vae_loss_"
           "last_activ_relu_latent_128_num_filters_512_1024_decoder_width_8" +
예제 #5
0
    interpolated_pokemon.append(current_interpolation)
interpolated_transfer = utilities.predict_batches(interpolated_pokemon, generator_model,
                                                                     in_samples_per_batch=64,
                                                                     in_input_name='input_noise')

modified_encoded_pokemon = np.tile(target_pokemon, (len(latent_dim_changes), 1))
modified_encoded_pokemon = modified_encoded_pokemon + np.asarray(latent_dim_changes)

# Check how to do it in batches later.  input_noise_list
encode_decode_generated_samples_transfer = utilities.predict_batches(modified_encoded_pokemon, generator_model,
                                                                     in_samples_per_batch=64,
                                                                     in_input_name='input_noise')
# encode_decode_generated_samples = generator_model.predict({'input_noise': input_noise_list})

reconstructed_pixels_generated_transfer, reconstructed_types_generated_transfer = \
    utilities.reconstruct_pixels_and_types(encode_decode_generated_samples_transfer)

second_model_name = "_regular_V3_no_noise2"
model.load("saved_models/model_Jul_29_optim_adam_loss_vae_loss_"
           "last_activ_relu_latent_128_num_filters_512_1024_decoder_width_8" + second_model_name + ".tflearn",
           weights_only=True)

encoder_model_transfer = utilities.get_encoder_network(model)
generator_model_transfer = utilities.get_generative_network(model)
print("LOADED GENERATOR MODEL.")

encoded_pokemon = utilities.predict_batches(expanded_full_X_HSV, encoder_model_transfer,
                                            in_samples_per_batch=64,
                                            in_input_name='input_images_1')

# Pick one pokemon and repeat it a number of times equal to the total noise samples