コード例 #1
0
# train autoencoder with fine-tuning
print "\ntrain autoencoder with fine-tuning ==========\n"
autoencoder.fine_tune(fine_tuning_iterator,
                      supervised=True,
                      learning_rate=0.02,
                      max_epoch=10000,
                      tied=True)
#autoencoder.fine_tune(fine_tuning_iterator, supervised = False, learning_rate = 0.02, max_epoch = 6000)

# encode data (with fine-tuning)
tuned_encoded_datas = autoencoder.encode(datas)
print "encoder (with fine-tuning)================"
print tuned_encoded_datas

# predict data( based on fine tuning )
predicted_datas = autoencoder.predict(datas)
print "predicted ================"
print predicted_datas
predicted_labels = predicted_datas.argmax(1)
eval_array = (predicted_labels == labels)
correct_count = len(np.where(eval_array == True)[0])
error_count = len(np.where(eval_array == False)[0])
correct_rate = float(correct_count) / (correct_count + error_count)
error_rate = float(error_count) / (correct_count + error_count)
print "correct: {}({})\terror: {}({})".format(correct_count,
                                              "%.2f" % correct_rate,
                                              error_count, "%.2f" % error_rate)
autoencoder.close()

#visualize encoded datas
colors = ["red", "green", "blue"]
コード例 #2
0
    # Compiling
    model.compile(loss=args.loss, optimizer="adam")

    # Fitting
    model.fit(completeTrainGen,
              steps_per_epoch,
              n_epochs=args.e,
              batch_size=args.bs,
              wandb=args.wandb)

    # Saving
    model.save_models()
    print("Done training")

    print("\nCreating embeddings...")
    E_train = model.predict(X_train)
    E_train_flatten = E_train.reshape((-1, np.prod(output_shape_model)))

# Read images
query_map = loader.get_files(QueryDir)
query_names, query_paths, imgs_query, query_classes = loader.get_data_paths(
    query_map)
gallery_map = loader.get_files(GalleryDir)
gallery_names, gallery_paths, imgs_gallery, gallery_classes = loader.get_data_paths(
    gallery_map)

# Normalize all images
print("Normalizing query images")
imgs_query = normalize_img(imgs_query)
print("Normalizing gallery images")
imgs_gallery = normalize_img(imgs_gallery)
コード例 #3
0
train_x_with_noise = add_noise(train_x, 2)
test_x_with_noise = add_noise(train_x, 2)

ae = AutoEncoder([25, 15],
                 10, [10, 25],
                 activation='tanh',
                 solver='lbfgs',
                 eta=0.0001,
                 max_iterations=30000,
                 tol=0.0000001,
                 verbose=True)
ae.fit(train_x_with_noise, train_x)

for i in range(32):
    prediction_1 = ae.predict(train_x_with_noise[i])
    prediction_2 = ae.predict(train_x[i])
    prediction_3 = ae.predict(test_x_with_noise[i])

    plt.figure()
    plt.subplot(3, 2, 1)
    plt.imshow(train_x_with_noise[i].reshape(7, 5), 'gray_r')
    plt.title("Train Input noise", fontsize=15)
    plt.xticks([])
    plt.yticks([])
    plt.subplot(3, 2, 2)
    plt.imshow(prediction_1.reshape(7, 5), 'gray_r')
    plt.title('Predicted noise', fontsize=15)
    plt.xticks([])
    plt.yticks([])
    plt.subplot(3, 2, 3)
コード例 #4
0
        transform_binary_matrix(rating_df, user2idx, movie2idx)
    print(
        f'Positive Feedback: {stat["pos"]}',
        f'\tNegative Feedback: {stat["neg"]}'
    )

    rating_matrix_train, rating_matrix_val =\
        split_matrix(rating_matrix, user2idx, movie2idx)

    print(
        f'Train: {rating_matrix_train.count_nonzero()}\t',
        f'Validation Size: {rating_matrix_val.count_nonzero()}'
    )

    # Train Auto Encoder Model
    X = rating_matrix_train.toarray()
    model = AutoEncoder(input_dim=len(movie2idx), encoding_dim=20)
    model.train(X)

    # Make Prediction
    recommendations = model.predict(X, n=100)

    # Evaluate
    precison_100 = n_precision(recommendations, rating_matrix_val, 100)
    recall_100 = n_recall(recommendations, rating_matrix_val, 100)
    print(f'P@100 : {precison_100:.2%}')
    print(f'R@100 : {recall_100:.2%}')

    # Save Recommendation
    np.savez('./output/rec_auto.npz', recommendations)
コード例 #5
0
    feat_extractor = AutoEncoder(exp=args.exp, weights_path=args.weights_path)
    print("loading features ... ", end='')
    sys.stdout.flush()
    for set_ in [k for k in anno.keys() if k != "tags"]:
        imid_list = sorted(list(anno[set_].keys()))  # only for reproducibility
        X[set_] = None
        Y[set_] = [None for _ in range(len(imid_list))]

        if args.exp == 1:
            for i, imid in tqdm(enumerate(imid_list), total=len(imid_list)):
                # set image features
                fname = splitext(anno[set_][imid]["file_name"])[0] + ".dat"
                x = load(join(args.features_path, set_, fname))
                x = normalize_rows(x.reshape(1, -1)).squeeze()
                x = feat_extractor.predict(kind="img", x=x)
                if i == 0:
                    n_samples = len(imid_list)
                    n_dim = x.shape[0]
                    X[set_] = np.empty((n_samples, n_dim), dtype=np.float32)
                X[set_][i] = x

                # set word embeddings (OOV tags are set to the zero vector)
                tags = anno[set_][imid]["tags"]
                y = [[0] * vec_dim if vecs[w] is None else vecs[w]
                     for w in tags]
                y = normalize_rows(np.array(y, dtype=np.float32))
                y = [t.reshape(1, -1) for t in y]
                y = feat_extractor.predict(kind="text", y=y)
                Y[set_][i] = y
        elif args.exp == 2:
コード例 #6
0
# Declare the model
autoencoder = AutoEncoder()

x_train_noisy, x_train = build_dataset()

# train the autoencoder model
autoencoder.fit(x_train_noisy,
                x_train,
                epochs=100000,
                batch_size=128,
                shuffle=True,
                validation_data=(x_test_noisy, x_test),
                verbose=1)

# visaulize decoed denoise image
decoded_imgs = autoencoder.predict(x_test)

n = 10

plt.figure(figsize=(20, 4))
for i in range(n):
    # display original
    ax = plt.subplot(2, n, i + 1)
    plt.imshow(x_test_noisy[i].reshape(256, 256))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + 1 + n)
    plt.imshow(decoded_imgs[i].reshape(256, 256))
コード例 #7
0
    output_shape_model = tuple(
        [int(x) for x in model.encoder.output.shape[1:]])

    # Loading model
    model.load_models(loss='mse', optimizer="adam")

    # Convert images to numpy array of right dimensions
    print("\nConverting to numpy array of right dimensions")
    X_query = np.array(QueryImgs).reshape((-1, ) + input_shape_model)
    X_gallery = np.array(GalleryImgs).reshape((-1, ) + input_shape_model)
    print(">>> X_query.shape = " + str(X_query.shape))
    print(">>> X_gallery.shape = " + str(X_gallery.shape))

    # Create embeddings using model
    print("\nCreating embeddings")
    E_query = model.predict(X_query)
    E_query_flatten = E_query.reshape((-1, np.prod(output_shape_model)))
    E_gallery = model.predict(X_gallery)
    E_gallery_flatten = E_gallery.reshape((-1, np.prod(output_shape_model)))

    print("\nComputing pairwise distance between query and gallery images")

    # Define the distance between query - gallery features vectors
    pairwise_dist = spatial.distance.cdist(E_query_flatten,
                                           E_gallery_flatten,
                                           args.metric,
                                           p=2.)
    print('\nComputed distances and got c-dist {}'.format(pairwise_dist.shape))

    print("\nCalculating indices and gallery matches...")
    indices = np.argsort(pairwise_dist, axis=-1)