Example #1
0
def main():

    # Parse arguments
    parser = argparse.ArgumentParser()
    parser.add_argument(dest="data_path",
                        metavar="DATA_PATH",
                        help="Path to read data from.")
    parser.add_argument(dest="model_path",
                        metavar="MODEL_PATH",
                        help="Path to read model from.")
    parser.add_argument(
        "-b",
        "--read_batches",
        metavar="READ_BATCHES",
        default=False,
        help="If true, data is read incrementally in batches during training.")
    args = parser.parse_args()
    parse_args(args)

    # Load model
    with CustomObjectScope({
            '_euclidean_distance': cnn_siamese_online._euclidean_distance,
            'ALPHA': cnn_siamese_online.ALPHA,
            "relu_clipped": cnn_siamese_online.relu_clipped
    }):
        tower_model = load_model(args.model_path)
        tower_model.compile(
            optimizer='adam',
            loss='mean_squared_error')  # Model was previously not compile

    if not args.read_batches:  # Read all data at once

        # Load training triplets and validation triplets
        X_train, y_train = utils.load_examples(args.data_path, "train")
        X_valid, y_valid = utils.load_examples(args.data_path, "valid")

        # Get abs(distance) of embeddings
        X_train_emb = tower_model.predict(X_train)
        X_valid_emb = tower_model.predict(X_valid)

    else:  # Read data in batches
        raise ValueError("Reading in batches is not implemented yet.")

    # Shuffle the data
    X_train_emb, y_train = shuffle(X_train_emb, y_train)
    X_valid_emb, y_valid = shuffle(X_valid_emb, y_valid)

    # Run k-means on training data
    print("Running K-means...")
    k_means_model = KMeans(n_clusters=K, verbose=0)
    k_means_model.fit(X_train_emb)

    # Plot result
    k_means_PCA(k_means_model, X_train_emb, y_train, display_k_means=True)
    k_means_PCA(k_means_model, X_train_emb, y_train, display_k_means=False)

    # Compute percentage of each class in each cluster
    compute_cluster_class_fractions(k_means_model, y_train)
def compute_statistics(dataset, relative_threshold):
    path = './' + dataset.dataset_name + '/examples/'
    
    na_ex = load_examples(path+'examples.pkl')
    na_train = PerClassDataset(na_ex)

    a_ex = load_examples(path+'augmented_examples_topn5_cos_sim0.6.pkl')
    a_train = PerClassDataset(a_ex)
    threshold = int(a_train.stats()['most common labels number of examples'] / relative_threshold)

    valid_ex = load_examples(path+'valid_examples.pkl')
    valid = PerClassDataset(valid_ex)

    test_ex = load_examples(path+'test_examples.pkl')
    test = PerClassDataset(test_ex)

    val_test_ex = load_examples(path+'valid_test_examples.pkl')
    val_test = PerClassDataset(val_test_ex)

    oov_ex = load_examples(path+'oov_examples.pkl')
    oov = PerClassDataset(oov_ex)

    stats = {'non-augmented':na_train.stats(),
             'augmented':a_train.stats(threshold),
             'valid':valid.stats(),
             'test':test.stats(),
             'valid_test':val_test.stats(),
             'oov':oov.stats(),
             'sentences':{'train':len(dataset.get_train_sentences),
                          'valid':len(dataset.get_valid_sentences),
                          'test':len(dataset.get_test_sentences)
                         }
            }
    
    filepath = './'+dataset.dataset_name+'/'
    os.makedirs(filepath, exist_ok=True)
    with open(filepath + 'statistics.json', 'w') as file:
        json.dump(stats, file, indent=4)
Example #3
0
def main():

    # Parse arguments
    args = parse_args()

    # Load tower model
    with CustomObjectScope({'_euclidean_distance': nn_model.Model._euclidean_distance}):
        model = load_model(args.model_path)
        model.compile(optimizer='adam', loss='mean_squared_error')  # Model was previously not compiled

    X_shape, y_shape = utils.get_shapes(args.triplets_path, "train_anchors")

    # Build model to compute [A, P, N] => [abs(emb(A) - emb(P)), abs(emb(A) - emb(N))]
    pair_distance_model = build_pair_distance_model(model, X_shape[1:])
    pair_distance_model.compile(optimizer="adam", loss="mean_squared_error")  # Need to compile in order to predict

    # Load test data
    _, y_test_shape = utils.get_shapes(args.triplets_path, "test")
    n_users = y_test_shape[1]
    X_test_separated = []
    for j in range(n_users):
        X_test_j = utils.load_X(args.triplets_path, "test_" + str(j))
        X_test_separated.append(X_test_j)

    # If no svm model supplied, and no sweep:
    # Train a new model
    if args.load_model_path is None:

        # Load training triplets and validation triplets
        X_train_anchors, _ = utils.load_examples(args.triplets_path, "train_anchors")
        X_train_positives, _ = utils.load_examples(args.triplets_path, "train_positives")
        X_train_negatives, _ = utils.load_examples(args.triplets_path, "train_negatives")

        # Get abs(distance) of embeddings
        X_train_ap, X_train_an, X_train_pn = pair_distance_model.predict([X_train_anchors, X_train_positives, X_train_negatives])

        # Stack positive and negative examples
        X_train = np.vstack((X_train_ap, X_train_an, X_train_pn))
        y_train = np.hstack((np.ones(X_train_ap.shape[0], ), np.zeros(X_train_an.shape[0] + X_train_pn.shape[0],)))

        # Sign of distances should not matter ->  Train on both
        X_train = np.vstack((X_train, -X_train))
        y_train = np.hstack((y_train, y_train))

        # Shuffle the data
        X_train, y_train = shuffle(X_train, y_train)

        # Train SVM
        svm_model = svm.SVC(C=C, gamma=GAMMA, kernel=KERNEL, class_weight=CLASS_WEIGHTS, verbose=True, probability=args.sweep)
        svm_model.fit(X_train[:20000, :], y_train[:20000])

    else:  # if svm model supplied

        with open(args.load_model_path, "rb") as svm_file:
            svm_model = pickle.load(svm_file)

        random.seed(1)
        accuracy, FAR, FRR = predict_and_evaluate(pair_distance_model, svm_model, X_test_separated,
                                                  args.ensemble_size, args.ensemble_type, threshold=0.5, probability=False)

        print("\n---- Test Results ----")
        print("Accuracy = {}".format(accuracy))
        print("FAR = {}".format(FAR))
        print("FRR = {}".format(FRR))

    if args.sweep:

        # Sweep the threshold
        FARs, FRRs = [], []
        min_diff = float("inf")
        FAR_EER, FRR_EER = 1, 1
        accuracy_EER = 0
        threshold_EER = None
        for threshold in np.arange(0, 1, 0.01):

            # Predict and evaluate
            accuracy, FAR, FRR = predict_and_evaluate(pair_distance_model, svm_model, X_test_separated,
                                                      args.ensemble_size, args.ensemble_type, threshold=threshold, probability=True)

            # Store results
            FARs.append(FAR)
            FRRs.append(FRR)
            if np.abs(FAR - FRR) < min_diff:
                FAR_EER = FAR
                FRR_EER = FRR
                accuracy_EER = accuracy
                threshold_EER = threshold
                min_diff = np.abs(FAR - FRR)

        # Report EER and corresponding accuracy
        print("\n ---- Test Results: EER ----")
        print("Accuracy = {}".format(accuracy_EER))
        print("FAR = {}".format(FAR_EER))
        print("FRR = {}".format(FRR_EER))
        print("Threshold EER = {}".format(threshold_EER))

        # Plot FRR vs FAR
        plt.figure()
        plt.scatter(FARs, FRRs)
        plt.xlabel("FAR")
        plt.ylabel("FRR")
        plt.savefig("FRR_FAR.pdf")

    else:  # no sweep

        random.seed(1)
        accuracy, FAR, FRR = predict_and_evaluate(pair_distance_model, svm_model, X_test_separated,
                                                  args.ensemble_size, args.ensemble_type, threshold=0.5, probability=False)

        print("\n---- Test Results ----")
        print("Accuracy = {}".format(accuracy))
        print("FAR = {}".format(FAR))
        print("FRR = {}".format(FRR))

    # Save svm model
    if args.save_model_path is not None:
        with open(args.save_model_path + "svm_model.pkl", "wb") as svm_file:
            pickle.dump(svm_model, svm_file)
def prepare_data(
    dataset,
    embeddings,
    vectorizer,
    n=15,
    ratio=.8,
    use_gpu=False,
    k=1,
    data_augmentation=False,
    over_population_threshold=100,
    relative_over_population=True,
    debug_mode=False,
    verbose=True,
):
    # Train-validation part
    path = './data/' + dataset.dataset_name + '/examples/'
    if data_augmentation:
        examples = load_examples(path +
                                 'augmented_examples_topn5_cos_sim0.6.pkl')
    else:
        examples = load_examples(path + 'examples.pkl')
    if debug_mode:
        examples = list(examples)[:128]

    examples = truncate_examples(examples, n)

    transform = vectorizer.vectorize_unknown_example

    def target_transform(y):
        return embeddings[y]

    train_valid_dataset = PerClassDataset(
        examples,
        transform=transform,
        target_transform=target_transform,
    )

    train_dataset, valid_dataset = train_valid_dataset.split(
        ratio=.8, shuffle=True, reuse_label_mappings=False)

    filter_labels_cond = None
    if over_population_threshold != None:
        if relative_over_population:
            over_population_threshold = int(
                train_valid_dataset.stats()
                ['most common labels number of examples'] /
                over_population_threshold)

        def filter_labels_cond(label, N):
            return N <= over_population_threshold

    train_loader = PerClassLoader(dataset=train_dataset,
                                  collate_fn=collate_fn,
                                  batch_size=64,
                                  k=k,
                                  use_gpu=use_gpu,
                                  filter_labels_cond=filter_labels_cond)
    valid_loader = PerClassLoader(dataset=valid_dataset,
                                  collate_fn=collate_fn,
                                  batch_size=64,
                                  k=k,
                                  use_gpu=use_gpu,
                                  filter_labels_cond=filter_labels_cond)

    # Test part
    test_examples = load_examples(path + 'valid_test_examples.pkl')
    test_examples = truncate_examples(test_examples, n)
    test_dataset = PerClassDataset(dataset=test_examples, transform=transform)
    test_loader = PerClassLoader(dataset=test_dataset,
                                 collate_fn=collate_x,
                                 k=-1,
                                 shuffle=False,
                                 batch_size=64,
                                 use_gpu=use_gpu)

    # OOV part
    oov_examples = load_examples(path + 'oov_examples.pkl')
    oov_examples = truncate_examples(oov_examples, n)
    oov_dataset = PerClassDataset(dataset=oov_examples, transform=transform)
    oov_loader = PerClassLoader(dataset=oov_dataset,
                                collate_fn=collate_x,
                                k=-1,
                                shuffle=False,
                                batch_size=64,
                                use_gpu=use_gpu)

    if verbose:
        logging.info('Number of unique examples: {}'.format(len(examples)))

        logging.info('\nGlobal statistics:')
        stats = train_valid_dataset.stats()
        for stats, value in stats.items():
            logging.info(stats + ': ' + str(value))

        logging.info('\nStatistics on the training dataset:')
        stats = train_dataset.stats(over_population_threshold)
        for stats, value in stats.items():
            logging.info(stats + ': ' + str(value))

        logging.info('\nStatistics on the validation dataset:')
        stats = valid_dataset.stats(over_population_threshold)
        for stats, value in stats.items():
            logging.info(stats + ': ' + str(value))

        logging.info('\nStatistics on the test dataset:')
        stats = test_dataset.stats()
        for stats, value in stats.items():
            logging.info(stats + ': ' + str(value))

        logging.info('\nFor training, loading ' + str(k) +
                     ' examples per label per epoch.')

    return train_loader, valid_loader, test_loader, oov_loader
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument(dest="data_path", metavar="DATA_PATH", help="Path to read examples from.")
    parser.add_argument("-s", "--save_path", metavar="SAVE_PATH", default=None, help="Path to save trained model to. If no path is specified checkpoints are not saved.")
    parser.add_argument("-m", "--metrics-path", metavar="METRICS_PATH", default=None, help="Path to save additional performance metrics to (for debugging purposes).")
    args = parser.parse_args()

    if args.save_path is not None:
        if not os.path.isdir(args.save_path):
            response = input("Save path does not exist. Create it? (Y/n) >> ")
            if response.lower() not in ["y", "yes", "1", ""]:
                exit()
            else:
                os.makedirs(args.save_path)

    if args.metrics_path is not None:
        if not os.path.isdir(args.metrics_path):
            response = input("Metrics path does not exist. Create it? (Y/n) >> ")
            if response.lower() not in ["y", "yes", "1", ""]:
                exit()
            else:
                os.makedirs(args.metrics_path)

    # Load training and validation data
    X_train, y_train = utils.load_examples(args.data_path, "train")
    X_valid, y_valid = utils.load_examples(args.data_path, "valid")

    # Shuffle the data
    X_train, y_train = utils.shuffle_data(X_train, y_train)
    X_valid, y_valid = utils.shuffle_data(X_valid, y_valid)

    # Build model
    input_shape = X_train.shape[1:]
    n_classes = y_train.shape[1]
    model = build_model(input_shape, n_classes)

    # Compile model
    adam_optimizer = optimizers.Adam(lr=LEARNING_RATE)
    model.compile(loss="categorical_crossentropy", optimizer=adam_optimizer, metrics=["accuracy"])

    # Setup callbacks for early stopping and model saving
    callback_list = setup_callbacks(args.save_path, n_classes)

    # Train model
    model.fit(X_train, y_train, validation_data=(X_valid, y_valid), batch_size=BATCH_SIZE,
              epochs=EPOCHS, callbacks=callback_list)
    global training_complete
    training_complete = True

    # Load test data
    X_test_v, y_test_v = utils.load_examples(args.data_path, "test_valid")
    X_test_u, y_test_u = utils.load_examples(args.data_path, "test_unknown")
    X_test = np.vstack((X_test_v, X_test_u))
    y_test = np.vstack((y_test_v, y_test_u))

    # Test model
    print("Evaluating model...")
    loss, accuracy = model.evaluate(X_test_v, y_test_v, verbose=1)

    FAR, FRR = compute_FAR_FRR(model, X_test, y_test)

    print("\n---- Test Results ----")
    print("Loss = {}, Accuracy = {}".format(loss, accuracy))
    print("FAR = {}, FRR = {}".format(FAR, FRR))

    # Additional metrics
    if args.metrics_path is not None:

        # Confusion matrix
        y_pred = model.predict(X_test_v)
        y_pred = utils.one_hot_to_index(y_pred)
        y_true = utils.one_hot_to_index(y_test_v)
        conf_matrix = confusion_matrix(y_true, y_pred)
        np.savetxt(args.metrics_path + "confusion_matrix.txt", conf_matrix)
Example #6
0
def main():
    prework()

    # load data
    examples = load_examples(a)

    # load model
    model = create_model(examples.blur, examples.pan, examples.mul,
                         examples.blur_u)

    with tf.name_scope("images"):
        display_fetches = {
            "imnames": examples.imnames,
            "blur_u": examples.blur_u,
            "blur": examples.blur,
            "pan": examples.pan,
            "mul": examples.mul,
            "mul_hat": model.outputs,
        }

    with tf.name_scope("blur_u_summary"):
        tf.summary.image("blur_u", examples.blur_u)

    with tf.name_scope("blur_summary"):
        tf.summary.image("blur", examples.blur)

    with tf.name_scope("mul_summary"):
        tf.summary.image("mul", examples.mul)

    with tf.name_scope("mul_hat_summary"):
        tf.summary.image("mul_hat", model.outputs)

    with tf.name_scope("predict_real_summary"):
        tf.summary.image("predict_real", model.predict_real)

    with tf.name_scope("predict_fake_summary"):
        tf.summary.image("predict_fake", model.predict_fake)

    tf.summary.scalar("discriminator_loss", model.discrim_loss)
    tf.summary.scalar("generator_loss_GAN", model.gen_loss_GAN)
    tf.summary.scalar("generator_loss_L1", model.gen_loss_L1)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum(
            [tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    saver = tf.train.Saver(max_to_keep=1)

    logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None
    sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)
    with sv.managed_session() as sess:
        print("parameter_count = ", sess.run(parameter_count))

        if a.checkpoint is not None:
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            saver.restore(sess, checkpoint)

        max_steps = 100
        if a.max_epochs is not None:
            max_steps = examples.steps_per_epoch * a.max_epochs
        if a.max_steps is not None:
            max_steps = a.max_steps

        if a.mode == "test":
            max_steps = min(examples.steps_per_epoch, max_steps)
            for step in range(max_steps):
                results = sess.run(display_fetches)
                save_images(results, a)
            print("test over")
        else:
            start = time.time()
            sv_gloss = -1

            for step in range(max_steps):

                def should(freq):
                    return freq > 0 and ((step + 1) % freq == 0
                                         or step == max_steps - 1)

                options = None
                run_metadata = None
                if should(a.trace_freq):
                    options = tf.RunOptions(
                        trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()

                fetches = {
                    "train": model.train,
                    "global_step": sv.global_step,
                }

                if should(a.progress_freq) or should(a.save_freq):
                    fetches["discrim_loss"] = model.discrim_loss
                    fetches["gen_loss_GAN"] = model.gen_loss_GAN
                    fetches["gen_loss_L1"] = model.gen_loss_L1

                if should(a.summary_freq):
                    fetches["summary"] = sv.summary_op

                if should(a.display_freq):
                    fetches["display"] = display_fetches

                results = sess.run(fetches,
                                   options=options,
                                   run_metadata=run_metadata)

                if should(a.summary_freq):
                    print("recording summary")
                    sv.summary_writer.add_summary(results["summary"],
                                                  results["global_step"])

                if should(a.display_freq):
                    print("saving display images")
                    save_images(results["display"],
                                a,
                                step=results["global_step"])

                if should(a.trace_freq):
                    print("recording trace")
                    sv.summary_writer.add_run_metadata(
                        run_metadata, "step_%d" % results["global_step"])

                if should(a.progress_freq):
                    # global_step will have the correct step count if we resume from a checkpoint
                    train_epoch = math.ceil(results["global_step"] /
                                            examples.steps_per_epoch)
                    train_step = (results["global_step"] -
                                  1) % examples.steps_per_epoch + 1
                    rate = (step + 1) * a.batch_size / (time.time() - start)
                    remaining = (max_steps - step) * a.batch_size / rate
                    print(
                        "progress  epoch %d  step %d  image/sec %0.1f  remaining %dm"
                        % (train_epoch, train_step, rate, remaining / 60))
                    print("discrim_loss", results["discrim_loss"])
                    print("gen_loss_GAN", results["gen_loss_GAN"])
                    print("gen_loss_L1", results["gen_loss_L1"])

                if should(a.save_freq):
                    print("saving model")
                    gloss = results["gen_loss_GAN"] * a.gan_weight + results[
                        "gen_loss_L1"] * a.l1_weight
                    if sv_gloss == -1 or gloss < sv_gloss or True:
                        sv_gloss = gloss
                        saver.save(sess,
                                   os.path.join(a.output_dir, "model"),
                                   global_step=sv.global_step)
                    else:
                        print("skipped because of worse loss")

                if sv.should_stop():
                    break
Example #7
0
def main():

    # Parse arguments
    parser = argparse.ArgumentParser()
    parser.add_argument(dest="data_path", metavar="DATA_PATH", help="Path to read examples from.")
    parser.add_argument("-sW", "--save_weights_path", metavar="SAVE_WEIGHTS_PATH", default=None, help="Path to save trained weights to. If no path is specified checkpoints are not saved.")
    parser.add_argument("-sM", "--save_model_path", metavar="SAVE_MODEL_PATH", default=None, help="Path to save trained model to.")
    parser.add_argument("-l", "--load_path", metavar="LOAD_PATH", default=None, help="Path to load trained model from. If no path is specified model is trained from scratch.")
    parser.add_argument("-m", "--metrics-path", metavar="METRICS_PATH", default=None, help="Path to save additional performance metrics to (for debugging purposes).")
    parser.add_argument("-b", "--read_batches", metavar="READ_BATCHES", default=False, help="If true, data is read incrementally in batches during training.")
    parser.add_argument("--PCA", metavar="PCA", default=False, help="If true, a PCA plot is saved.")
    parser.add_argument("--TSNE", metavar="TSNE", default=False, help="If true, a TSNE plot is saved.")
    parser.add_argument("--output_loss_threshold", metavar="OUTPUT_LOSS_THRESHOLD", default=None, help="Value between 0.0-1.0. Main function will return loss value of triplet at set percentage.")

    args = parser.parse_args()
    parse_args(args)

    X_shape, y_shape = utils.get_shapes(args.data_path, "train_anchors")

    # Build model
    input_shape = X_shape[1:]
    tower_model = build_tower_cnn_model(input_shape)  # single input model
    triplet_model = build_triplet_model(input_shape, tower_model)  # siamese model
    if args.load_path is not None:
        triplet_model.load_weights(args.load_path)

    # Setup callbacks for early stopping and model saving
    callback_list = setup_callbacks(args.save_weights_path)

    # Compile model
    adam = Adam(lr=LEARNING_RATE)
    triplet_model.compile(optimizer=adam, loss='mean_squared_error')

    if not args.read_batches:  # Read all data at once

        # Load training triplets and validation triplets
        X_train_anchors, y_train_anchors = utils.load_examples(args.data_path, "train_anchors")
        X_train_positives, _ = utils.load_examples(args.data_path, "train_positives")
        X_train_negatives, _ = utils.load_examples(args.data_path, "train_negatives")
        X_valid_anchors, y_valid_anchors = utils.load_examples(args.data_path, "valid_anchors")
        X_valid_positives, _ = utils.load_examples(args.data_path, "valid_positives")
        X_valid_negatives, _ = utils.load_examples(args.data_path, "valid_negatives")

        # Create dummy y = 0 (since output of siamese model is triplet loss)
        y_train_dummy = np.zeros((X_shape[0],))
        y_valid_dummy = np.zeros((X_valid_anchors.shape[0],))

        # Train the model
        triplet_model.fit([X_train_anchors, X_train_positives, X_train_negatives],
                          y_train_dummy, validation_data=([X_valid_anchors, X_valid_positives, X_valid_negatives], y_valid_dummy),
                          epochs=EPOCHS, batch_size=BATCH_SIZE, callbacks=callback_list)
        global training_complete
        training_complete = True

    else:  # Read data in batches

        training_batch_generator = utils.DataGenerator(args.data_path, "train", batch_size=1000)
        validation_batch_generator = utils.DataGenerator(args.data_path, "valid", batch_size=1000)

        triplet_model.fit_generator(generator=training_batch_generator, validation_data=validation_batch_generator,
                                    callbacks=callback_list, epochs=EPOCHS)

    # Save weights
    if args.save_weights_path is not None:
        triplet_model.save_weights(args.save_weights_path + "final_weights.hdf5")

    # Save model
    if args.save_model_path is not None:
        tower_model.save(args.save_model_path + "tower_model.hdf5")
        triplet_model.save(args.save_model_path + "triplet_model.hdf5")

    # Plot PCA/TSNE
    # For now, read all the valid anchors to do PCA
    # TODO: add function in util that reads a specified number of random samples from a dataset.
    if args.PCA is not False or args.TSNE is not False:
        X_valid_anchors, y_valid_anchors = utils.load_examples(args.data_path, "valid_anchors")
        X, Y = utils.shuffle_data(X_valid_anchors[:, :, :], y_valid_anchors[:, :], one_hot_labels=True)
        X = X[:5000, :, :]
        Y = Y[:5000, :]
        X = tower_model.predict(X)
        if args.PCA:
            utils.plot_with_PCA(X, Y)
        if args.TSNE:
            utils.plot_with_TSNE(X, Y)

    # Calculate loss value of triplet at a certain threshold
    if args.output_loss_threshold is not None:

        if not args.read_batches:  # Read all data at once

            # Load training triplets and validation triplets
            X_train_anchors, _ = utils.load_examples(args.data_path, "train_anchors")
            X_train_positives, _ = utils.load_examples(args.data_path, "train_positives")
            X_train_negatives, _ = utils.load_examples(args.data_path, "train_negatives")

            # Get abs(distance) of embeddings
            X_train = triplet_model.predict([X_train_anchors, X_train_positives, X_train_negatives])

        else:  # Read data in batches

            training_batch_generator = utils.DataGenerator(args.data_path, "train", batch_size=100, stop_after_batch=10)

            # Get abs(distance) of embeddings (one batch at a time)
            X_train = triplet_model.predict_generator(generator=training_batch_generator, verbose=1)

        X_train = np.sort(X_train, axis=None)
        print(X_train[int(float(args.output_loss_threshold) * X_train.shape[0])])
Example #8
0
Les résultats sont écrits en CSV dans la sortie standard.
"""

from contextlib import contextmanager
import csv
import time
import sys

from aima.search import Node, depth_first_graph_search, hill_climbing, greedy_best_first_graph_search

from sudoku import Sudoku, FilledSudoku, NormalizedSudoku, RandomizedSudoku, SortedSudoku
from heuristics import most_constrained_cell
from utils import load_examples, compact


examples = load_examples(sys.argv[1])
results = csv.DictWriter(sys.stdout, fieldnames=('initial', 'algorithm', 'heuristic', 'bound', 'final', 'score','explored', 'time'))

results.writeheader()

def bench(algorithm, problem, *argv, **kwargs):
    """Benchmark an algorithm and write the results in the standard output."""
    # benchmark algorithm runtime
    a = time.clock()
    solution, explored = algorithm(problem, *argv, **kwargs)
    delta = time.clock() - a

    if isinstance(solution, Node):
        solution = solution.state

    # write results
Example #9
0
import sys

from aima.search import depth_first_graph_search
from sudoku import Sudoku, RandomizedSudoku, SortedSudoku

from utils import load_examples

for example in load_examples(sys.argv[1]):
    s = Sudoku(example)
    r = RandomizedSudoku(example)
    sorted_sudoku = SortedSudoku(example)
    print depth_first_graph_search(s, bound=10000)
    print depth_first_graph_search(r, bound=10000)
    print depth_first_graph_search(sorted_sudoku, bound=10000)
def main():
    if tf.__version__ != "1.0.0":
        raise Exception("Tensorflow version 1.0.0 required")

    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    if a.mode == "test" or a.mode == "export":
        if a.checkpoint is None:
            raise Exception("checkpoint required for test mode")

        # load some options from the checkpoint
        options = {"which_direction", "ngf", "ndf", "lab_colorization"}
        with open(os.path.join(a.checkpoint, "options.json")) as f:
            for key, val in json.loads(f.read()).items():
                if key in options:
                    print("loaded", key, "=", val)
                    setattr(a, key, val)
        # disable these features in test mode
        a.scale_size = CROP_SIZE
        a.flip = False

    for k, v in a._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(a.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(a), sort_keys=True, indent=4))

    if a.mode == "export":
        # export the generator to a meta graph that can be imported later for standalone generation
        if a.lab_colorization:
            raise Exception("export not supported for lab_colorization")

        input = tf.placeholder(tf.string, shape=[1])
        input_data = tf.decode_base64(input[0])
        input_image = tf.image.decode_png(input_data)
        # remove alpha channel if present
        input_image = input_image[:, :, :3]
        input_image = tf.image.convert_image_dtype(input_image,
                                                   dtype=tf.float32)
        input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
        batch_input = tf.expand_dims(input_image, axis=0)

        with tf.variable_scope("generator") as scope:
            batch_output = deprocess(
                create_generator_Unet(a, preprocess(batch_input), 3))

        output_image = tf.image.convert_image_dtype(batch_output,
                                                    dtype=tf.uint8)[0]
        output_data = tf.image.encode_png(output_image)
        output = tf.convert_to_tensor([tf.encode_base64(output_data)])

        key = tf.placeholder(tf.string, shape=[1])
        inputs = {"key": key.name, "input": input.name}
        tf.add_to_collection("inputs", json.dumps(inputs))
        outputs = {
            "key": tf.identity(key).name,
            "output": output.name,
        }
        tf.add_to_collection("outputs", json.dumps(outputs))

        init_op = tf.global_variables_initializer()
        restore_saver = tf.train.Saver()
        export_saver = tf.train.Saver()

        # config = tf.ConfigProto()
        # config.allow_soft_placement = False
        # config.log_device_placement = True
        # config.gpu_options.allow_growth = True
        # config.gpu_options.per_process_gpu_memory_fraction = 0.4
        with tf.Session() as sess:
            sess.run(init_op)
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            restore_saver.restore(sess, checkpoint)
            print("exporting model")
            export_saver.export_meta_graph(
                filename=os.path.join(a.output_dir, "export.meta"))
            export_saver.save(sess,
                              os.path.join(a.output_dir, "export"),
                              write_meta_graph=False)

        return

    examples = load_examples(a)
    print("examples count = %d" % examples.count)

    # inputs and targets are [batch_size, height, width, channels]
    model = create_model(examples.inputs, examples.targets)

    # undo colorization splitting on images that we use for display/output
    if a.lab_colorization:
        if a.which_direction == "AtoB":
            # inputs is brightness, this will be handled fine as a grayscale image
            # need to augment targets and outputs with brightness
            targets = augment(examples.targets, examples.inputs)
            outputs = augment(model.outputs, examples.inputs)
            # inputs can be deprocessed normally and handled as if they are single channel
            # grayscale images
            inputs = deprocess(examples.inputs)
        elif a.which_direction == "BtoA":
            # inputs will be color channels only, get brightness from targets
            inputs = augment(examples.inputs, examples.targets)
            targets = deprocess(examples.targets)
            outputs = deprocess(model.outputs)
        else:
            raise Exception("invalid direction")
    else:
        inputs = deprocess(examples.inputs)
        targets = deprocess(examples.targets)
        outputs = deprocess(model.outputs)

    def convert(image):
        if a.aspect_ratio != 1.0:
            # upscale to correct aspect ratio
            size = [CROP_SIZE, int(round(CROP_SIZE * a.aspect_ratio))]
            image = tf.image.resize_images(
                image, size=size, method=tf.image.ResizeMethod.BICUBIC)

        return tf.image.convert_image_dtype(image,
                                            dtype=tf.uint8,
                                            saturate=True)

    # reverse any processing on images so they can be written to disk or displayed to user
    with tf.name_scope("convert_inputs"):
        converted_inputs = convert(inputs)

    with tf.name_scope("convert_targets"):
        converted_targets = convert(targets)

    with tf.name_scope("convert_outputs"):
        converted_outputs = convert(outputs)

    with tf.name_scope("encode_images"):
        display_fetches = {
            "paths":
            examples.paths,
            "inputs":
            tf.map_fn(tf.image.encode_png,
                      converted_inputs,
                      dtype=tf.string,
                      name="input_pngs"),
            "targets":
            tf.map_fn(tf.image.encode_png,
                      converted_targets,
                      dtype=tf.string,
                      name="target_pngs"),
            "outputs":
            tf.map_fn(tf.image.encode_png,
                      converted_outputs,
                      dtype=tf.string,
                      name="output_pngs"),
        }

    # summaries
    with tf.name_scope("inputs_summary"):
        tf.summary.image("inputs", converted_inputs)

    with tf.name_scope("targets_summary"):
        tf.summary.image("targets", converted_targets)

    with tf.name_scope("outputs_summary"):
        tf.summary.image("outputs", converted_outputs)

    with tf.name_scope("predict_real_summary"):
        tf.summary.image(
            "predict_real",
            tf.image.convert_image_dtype(model.predict_real, dtype=tf.uint8))

    with tf.name_scope("predict_fake_summary"):
        tf.summary.image(
            "predict_fake",
            tf.image.convert_image_dtype(model.predict_fake, dtype=tf.uint8))

    tf.summary.scalar("discriminator_loss", model.discrim_loss)
    tf.summary.scalar("gennerator_loss", model.gen_loss)
    tf.summary.scalar("generator_loss_GAN", model.gen_loss_GAN)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum(
            [tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    saver = tf.train.Saver(max_to_keep=1)

    logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None
    sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)

    with sv.managed_session() as sess:
        print("parameter_count =", sess.run(parameter_count))

        if a.checkpoint is not None:
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            saver.restore(sess, checkpoint)

        max_steps = 2**32
        if a.max_epochs is not None:
            max_steps = examples.steps_per_epoch * a.max_epochs
        if a.max_steps is not None:
            max_steps = a.max_steps

        if a.mode == "test":
            # testing
            # at most, process the test data once
            test_start = time.time()
            max_steps = min(examples.steps_per_epoch, max_steps)
            for step in range(max_steps):
                results = sess.run(display_fetches)
                filesets = save_images(a, results)
                for i, f in enumerate(filesets):
                    print("evaluated image", f["name"])

                index_path = append_index(a, filesets)
            with open(a.output_dir + '/test.time', 'w') as f:
                f.write(str(time.time() - test_start) + '\n')

            print("wrote index at", index_path)
        else:
            # training
            start = time.time()
            with open(a.output_dir + '/train.precession', 'a') as f:
                f.write('step RFE ACC \n')

            for step in range(max_steps):

                def should(freq):
                    return freq > 0 and ((step + 1) % freq == 0
                                         or step == max_steps - 1)

                options = None
                run_metadata = None
                if should(a.trace_freq):
                    options = tf.RunOptions(
                        trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()

                fetches = {
                    "train": model.train,
                    "global_step": sv.global_step,
                }

                if should(a.progress_freq):
                    fetches["discrim_loss"] = model.discrim_loss
                    fetches["gen_loss"] = model.gen_loss
                    fetches["gen_loss_GAN"] = model.gen_loss_GAN

                if should(a.summary_freq):
                    fetches["summary"] = sv.summary_op

                if should(a.display_freq):
                    fetches["display"] = display_fetches

                results = sess.run(fetches,
                                   options=options,
                                   run_metadata=run_metadata)

                if should(a.summary_freq):
                    print("recording summary")
                    sv.summary_writer.add_summary(results["summary"],
                                                  results["global_step"])

                if should(a.display_freq):
                    print("saving display images")
                    filesets = save_images(a,
                                           results["display"],
                                           step=results["global_step"])
                    append_index(a, filesets, step=True)

                if should(a.trace_freq):
                    print("recording trace")
                    sv.summary_writer.add_run_metadata(
                        run_metadata, "step_%d" % results["global_step"])

                if should(a.progress_freq):
                    # global_step will have the correct step count if we resume from a checkpoint
                    train_epoch = math.ceil(results["global_step"] /
                                            examples.steps_per_epoch)
                    train_step = (results["global_step"] -
                                  1) % examples.steps_per_epoch + 1
                    rate = (step + 1) * a.batch_size / (time.time() - start)
                    remaining = (max_steps - step) * a.batch_size / rate
                    print(
                        "progress  epoch %d  step %d  image/sec %0.1f  remaining %dm"
                        % (train_epoch, train_step, rate, remaining / 60))
                    print("discrim_loss", results["discrim_loss"])
                    print("gen_loss", results["gen_loss"])
                    print("gen_loss_GAN", results["gen_loss_GAN"])

                #with open(a.output_dir + '/train.log', 'a') as f:
                #    f.write('step gen_loss discrim_loss')
                if should(a.save_freq):
                    print("saving model")
                    saver.save(sess,
                               os.path.join(a.output_dir, "model"),
                               global_step=sv.global_step)
                    line_str = '%s %s %s\n' % (results["global_step"],
                                               results["gen_loss"],
                                               results["discrim_loss"])
                    with open(a.output_dir + '/train.log', 'a') as f:
                        f.write(line_str)

                if should(a.test_freq):

                    if a.output_dir is None:
                        raise Exception("checkpoint required for test mode")

                    # load some options from the checkpoint
                    options = {
                        "which_direction", "ngf", "ndf", "lab_colorization"
                    }
                    with open(os.path.join(a.output_dir, "options.json")) as f:
                        for key, val in json.loads(f.read()).items():
                            if key in options:
                                print("loaded", key, "=", val)
                                setattr(a, key, val)
                    # disable these features in test mode
                    a.scale_size = CROP_SIZE
                    a.flip = False

                    # testing
                    # at most, process the test data once
                    max_steps = min(examples.steps_per_epoch, max_steps)

                    for step in range(max_steps):
                        test_results = sess.run(display_fetches)
                        test_filesets = save_images(
                            a, test_results, step=results["global_step"])

                        index_path = append_index(a, test_filesets)

                        if 'ThreeObj_gamma_1.0' in test_filesets[0]['outputs']:
                            #pdb.set_trace()
                            print(
                                'file',
                                'checkpoints.lr.%s/simulation_test/ossgan_sgan_l1/%s/images/%s'
                                %
                                (a.lr, a.f_type, test_filesets[0]['outputs']))
                            outp = cv2.imread(
                                'checkpoints.lr.%s/simulation_test/ossgan_sgan_l1/%s/images/%s'
                                %
                                (a.lr, a.f_type, test_filesets[0]['outputs']))
                            targ = cv2.imread(
                                'checkpoints.lr.%s/simulation_test/ossgan_sgan_l1/%s/images/%s'
                                %
                                (a.lr, a.f_type, test_filesets[0]['targets']))
                            inp = cv2.imread(
                                'checkpoints.lr.%s/simulation_test/ossgan_sgan_l1/%s/images/%s'
                                % (a.lr, a.f_type, test_filesets[0]['inputs']))
                            rfe = region_fitting_error(outp, targ)
                            acc = calculate_accuracy(inp, outp, targ)
                            #print('region fitting error', rfe)
                            #print('accuracy', acc)
                            line_str = '%s %s %s\n' % (results["global_step"],
                                                       rfe, acc)
                            with open(a.output_dir + '/train.precession',
                                      'a') as f:
                                f.write(line_str)

                    print("wrote index at", index_path)

                if sv.should_stop():
                    break

            with open(a.output_dir + '/train.time', 'w') as f:
                f.write(str(time.time() - start) + '\n')
Example #11
0
def main():

    # Parse arguments
    parser = argparse.ArgumentParser()
    parser.add_argument(dest="data_path",
                        metavar="DATA_PATH",
                        help="Path to read examples from.")
    parser.add_argument(
        "-sW",
        "--save_weights_path",
        metavar="SAVE_WEIGHTS_PATH",
        default=None,
        help=
        "Path to save trained weights to. If no path is specified checkpoints are not saved."
    )
    parser.add_argument("-sM",
                        "--save_model_path",
                        metavar="SAVE_MODEL_PATH",
                        default=None,
                        help="Path to save trained model to.")
    parser.add_argument(
        "-l",
        "--load_path",
        metavar="LOAD_PATH",
        default=None,
        help=
        "Path to load trained model from. If no path is specified model is trained from scratch."
    )
    parser.add_argument("--PCA",
                        metavar="PCA",
                        default=False,
                        help="If true, a PCA plot is saved.")
    parser.add_argument("--TSNE",
                        metavar="TSNE",
                        default=False,
                        help="If true, a TSNE plot is saved.")

    args = parser.parse_args()
    parse_args(args)

    X_shape, y_shape = utils.get_shapes(args.data_path, "train")

    # Build model
    input_shape = X_shape[1:]
    tower_model = build_tower_cnn_model(input_shape)  # single input model
    triplet_model = build_triplet_model(input_shape,
                                        tower_model)  # siamese model
    if args.load_path is not None:
        triplet_model.load_weights(args.load_path)

    # Setup callbacks for early stopping and model saving
    callback_list = setup_callbacks(args.save_weights_path)

    # Compile model
    adam = Adam(lr=LEARNING_RATE)
    triplet_model.compile(optimizer=adam, loss=custom_loss)
    tower_model.predict(np.zeros(
        (1, ) +
        input_shape))  # predict on some random data to activate predict()

    # Initializate online triplet generators
    training_batch_generator = OnlineTripletGenerator(args.data_path,
                                                      "train",
                                                      tower_model,
                                                      batch_size=BATCH_SIZE,
                                                      triplet_mode="batch_all")
    validation_batch_generator = utils.DataGenerator(args.data_path,
                                                     "valid",
                                                     batch_size=BATCH_SIZE)

    triplet_model.fit_generator(generator=training_batch_generator,
                                validation_data=validation_batch_generator,
                                callbacks=callback_list,
                                epochs=EPOCHS)

    # Save weights
    if args.save_weights_path is not None:
        triplet_model.save_weights(args.save_weights_path +
                                   "final_weights.hdf5")

    # Save model
    if args.save_model_path is not None:
        tower_model.save(args.save_model_path + "tower_model.hdf5")

    # Plot PCA/TSNE
    # TODO: add function in util that reads a specified number of random samples from a dataset.
    if args.PCA is not False or args.TSNE is not False:
        X_valid, y_valid = utils.load_examples(args.data_path, "train")
        X, Y = utils.shuffle_data(X_valid[:, :, :],
                                  y_valid[:, :],
                                  one_hot_labels=True)
        X = X[:5000, :, :]
        Y = Y[:5000, :]
        X = tower_model.predict(X)
        if args.PCA:
            utils.plot_with_PCA(X, Y)
        if args.TSNE:
            utils.plot_with_TSNE(X, Y)
Example #12
0
def main():

    # Parse arguments
    parser = argparse.ArgumentParser()
    parser.add_argument(dest="triplets_path",
                        metavar="TRIPLETS_PATH",
                        help="Path to read triplets from.")
    parser.add_argument(dest="model_path",
                        metavar="MODEL_PATH",
                        help="Path to read model from.")
    parser.add_argument(
        "-e",
        "--ensemble",
        metavar="ENSEMBLE",
        default=1,
        help="How many examples to ensemble when predicting. Default: 1")
    parser.add_argument(
        "-b",
        "--read_batches",
        metavar="READ_BATCHES",
        default=False,
        help="If true, data is read incrementally in batches during training.")
    args = parser.parse_args()
    parse_args(args)

    # Load model
    with CustomObjectScope({
            '_euclidean_distance': cnn_siamese_online._euclidean_distance,
            'ALPHA': cnn_siamese_online.ALPHA,
            "relu_clipped": cnn_siamese_online.relu_clipped
    }):
        tower_model = load_model(args.model_path)
        tower_model.compile(
            optimizer='adam',
            loss='mean_squared_error')  # Model was previously not compiled

    X_shape, y_shape = utils.get_shapes(args.triplets_path, "train_anchors")

    # Build model to compute [A, P, N] => [abs(emb(A) - emb(P)), abs(emb(A) - emb(N))]
    pair_distance_model = build_pair_distance_model(tower_model, X_shape[1:])
    pair_distance_model.compile(
        optimizer="adam",
        loss="mean_squared_error")  # Need to compile in order to predict

    if not args.read_batches:  # Read all data at once

        # Load training triplets
        X_train_anchors, _ = utils.load_examples(args.triplets_path,
                                                 "train_anchors")
        X_train_positives, _ = utils.load_examples(args.triplets_path,
                                                   "train_positives")
        X_train_negatives, _ = utils.load_examples(args.triplets_path,
                                                   "train_negatives")

        # Get abs(distance) of embeddings
        X_train_1, X_train_0 = pair_distance_model.predict(
            [X_train_anchors, X_train_positives, X_train_negatives])

    # Stack positive and negative examples
    X_train = np.vstack((X_train_1, X_train_0))
    y_train = np.hstack(
        (np.ones(X_train_1.shape[0], ), np.zeros(X_train_0.shape[0], )))

    # Shuffle the data
    X_train, y_train = shuffle(X_train, y_train)

    # Train SVM
    clf = svm.SVC(gamma='scale', verbose=True)
    clf.fit(X_train[:10000, :], y_train[:10000])

    # Evaluate SVM
    y_pred = clf.predict(X_train)

    if args.ensemble > 1:
        accuracy, FAR, FRR = ensemble_accuracy_FAR_FRR(y_train, y_pred,
                                                       args.ensemble)
        print("\n\n---- Validation Results. With ensembling = {}. ----".format(
            args.ensemble))
    else:
        accuracy, FAR, FRR = accuracy_FAR_FRR(y_train, y_pred)
        print("\n\n---- Validation Results. No ensembling. ----")

    print("Accuracy = {}".format(accuracy))
    print("FAR = {}".format(FAR))
    print("FRR = {}".format(FRR))