def mlp():
    """Trains a multilayer perceptron with 1 hidden layer."""
    tf.logging.set_verbosity(FLAGS.verbosity)

    print("Preprocessing data...")
    tic()
    train_raw, x_train, y_train, x_test, y_test, _, _, classes = preprocess_data(FLAGS)
    toc()

    # Set the output dimension according to the number of classes
    FLAGS.output_dim = len(classes)

    # Train and evaluate the MLP model.
    tic()
    run_experiment(x_train, y_train, x_test, y_test,
                   bag_of_words_MLP_model, 'train_and_evaluate', FLAGS)
    toc()

    # Associate metadata with the word embedding for TensorBoard Projector
    config = projector.ProjectorConfig()
    word_embedding = config.embeddings.add()
    # The name of the embedding tensor was discovered by using TensorBoard.
    word_embedding.tensor_name = 'MLP/input_layer/words_embedding/embedding_weights'
    word_embedding.metadata_path = path.join(getcwd(), FLAGS.word_meta_file)
    writer = tf.summary.FileWriter(FLAGS.model_dir)
    # Writes projector_config.pbtxt
    projector.visualize_embeddings(writer, config)

    # Create metadata for TensorBoard Projector.
    create_metadata(train_raw, classes, FLAGS)
Esempio n. 2
0
def run_experiments(expts, func, case, roleouts, episodes, in_cloud, minor=1):

    experiment = func(case, minor)

    profile = experiment.profile
    maxSteps = len(profile) # num profile values
    na = len(experiment.agents)
    ni = roleouts * episodes * maxSteps # no. interactions

    expt_action = zeros((expts, na, ni))
    expt_reward = zeros((expts, na, ni))

    for expt in range(expts):
        action, reward, epsilon = \
            run_experiment(experiment, roleouts, episodes, in_cloud)

        expt_action[expt, :, :] = action
        expt_reward[expt, :, :] = reward

        experiment = func(case)

    expt_action_mean = mean(expt_action, axis=0)
    expt_action_std = std(expt_action, axis=0, ddof=1)

    expt_reward_mean = mean(expt_reward, axis=0)
    expt_reward_std = std(expt_reward, axis=0, ddof=1)

    return expt_action_mean, expt_action_std, \
           expt_reward_mean, expt_reward_std, epsilon
Esempio n. 3
0
def run_years(func, case, roleouts, episodes, in_cloud):

    experiment = func(case)

    dynProfile = profile.reshape((364, maxSteps))
#    maxSteps = dynProfile.shape[1]

    na = len(experiment.agents)
#    ni = roleouts * episodes * maxSteps # no. interactions

#    expt_reward = zeros((na, ni))

    _, reward, epsilon = \
        run_experiment(experiment, roleouts, episodes, in_cloud, dynProfile)

#    expt_reward[expt, :, :] = reward

    experiment = func(case)

    reward_mean = zeros((na, maxSteps))
    reward_std = zeros((na, maxSteps))

    for s in range(maxSteps):
        reward_mean[:, s] = mean(reward[:, s::maxSteps], axis=1)
        reward_std[:, s] = std(reward[:, s::maxSteps], axis=1, ddof=1)

    return reward, (None, None, reward_mean, reward_std, epsilon)
Esempio n. 4
0
def perceptron():
    """Train and evaluate the perceptron model."""
    tf.logging.set_verbosity(FLAGS.verbosity)

    print("Preprocessing data...")
    tic()
    train_raw, x_train, y_train, x_test, y_test, _, _, classes = preprocess_data(
        FLAGS)
    toc()

    # Set the output dimension according to the number of classes
    FLAGS.output_dim = len(classes)

    # Train and evaluate the model.
    tic()
    run_experiment(x_train, y_train, x_test, y_test,
                   bag_of_words_perceptron_model, 'train_and_evaluate', FLAGS)
    toc()
def rnn():
    """Trains a multilayer perceptron with 1 hidden layer. It assumes that the data has already been preprocessed,
    e.g. by perceptron.py"""
    tf.logging.set_verbosity(FLAGS.verbosity)

    print("Preprocessing data...")
    tic()
    train_raw, x_train, y_train, x_test, y_test, train_lengths, test_lengths, classes \
        = preprocess_data(FLAGS, sequence_lengths=True)
    toc()

    # Set the output dimension according to the number of classes
    FLAGS.output_dim = len(classes)

    # Train the RNN model.
    tic()
    run_experiment(x_train, y_train, x_test, y_test, rnn_model,
                   'train_and_evaluate', FLAGS, train_lengths, test_lengths)
    toc()

    # Create metadata for TensorBoard Projector.
    create_metadata(train_raw, classes, FLAGS)
Esempio n. 6
0

def run_years(func, case, roleouts, episodes, in_cloud):

    experiment = func(case)

    dynProfile = profile.reshape((364, maxSteps))
    #    maxSteps = dynProfile.shape[1]

    na = len(experiment.agents)
    #    ni = roleouts * episodes * maxSteps # no. interactions

    #    expt_reward = zeros((na, ni))

    _, reward, epsilon = \
        run_experiment(experiment, roleouts, episodes, in_cloud, dynProfile)

    #    expt_reward[expt, :, :] = reward

    experiment = func(case)

    reward_mean = zeros((na, maxSteps))
    reward_std = zeros((na, maxSteps))

    for s in range(maxSteps):
        reward_mean[:, s] = mean(reward[:, s::maxSteps], axis=1)
        reward_std[:, s] = std(reward[:, s::maxSteps], axis=1, ddof=1)

    return reward, (None, None, reward_mean, reward_std, epsilon)

Esempio n. 7
0
def run_experiments(expts, func, case, roleouts, episodes, in_cloud, minor=1):

    experiment = func(case, minor)

    profile = experiment.profile
    maxSteps = len(profile) # num profile values
    na = len(experiment.agents)
    ni = roleouts * episodes * maxSteps # no. interactions

    expt_action = zeros((expts, na, ni))
    expt_reward = zeros((expts, na, ni))

    for expt in range(expts):
        action, reward, epsilon = \
            run_experiment(experiment, roleouts, episodes, in_cloud)

        expt_action[expt, :, :] = action
        expt_reward[expt, :, :] = reward

        experiment = func(case)

    expt_action_mean = mean(expt_action, axis=0)
    expt_action_std = std(expt_action, axis=0, ddof=1)

    expt_reward_mean = mean(expt_reward, axis=0)
    expt_reward_std = std(expt_reward, axis=0, ddof=1)

    return expt_action_mean, expt_action_std, \
           expt_reward_mean, expt_reward_std, epsilon
def perceptron_example():
    """Perceptron example demonstrating online learning, and also evaluation
       separate from training."""
    tf.logging.set_verbosity(FLAGS.verbosity)

    train_raw, test_raw, classes = get_data(FLAGS.data_dir)

    # Set the output dimension according to the number of classes
    FLAGS.output_dim = len(classes)

    print("\nSplitting the training and test data into two pieces...")
    # Seeding necessary for reproducibility.
    np.random.seed(FLAGS.np_seed)

    # Shuffle data to make the distribution of classes roughly stratified after splitting.
    train_raw = shuffle(train_raw)
    test_raw = shuffle(test_raw)

    train1_raw, train2_raw = np.split(train_raw, 2)
    test1_raw, test2_raw = np.split(test_raw, 2)

    print("First split:")
    x_train1_sentences, y_train1, x_test1_sentences, y_test1 = extract_data(
        train1_raw, test1_raw)

    print("\nProcessing the vocabulary...")
    tic()
    x_train1, x_test1, _, _, vocab_processor, n_words = process_vocabulary(
        x_train1_sentences, x_test1_sentences, FLAGS, reuse=False)
    toc()

    # Train the model on the first split.
    tic()
    run_experiment(x_train1, y_train1, x_test1, y_test1,
                   bag_of_words_perceptron_model, 'train_and_evaluate', FLAGS)
    toc()

    # Next we perform incremental training with the 2nd half of the split data.
    print("\nSecond split extends the vocabulary.")
    x_train2_sentences, y_train2, x_test2_sentences, y_test2 = extract_data(
        train2_raw, test2_raw)

    # Extend vocab_processor with the newly added training vocabulary, and save the vocabulary processor for later use.
    tic()
    x_train2, x_test2, _, _, vocab_processor, n_words = process_vocabulary(
        x_train2_sentences,
        x_test2_sentences,
        FLAGS,
        reuse=False,
        vocabulary_processor=vocab_processor,
        extend=True)
    toc()

    # Train the model on the second split.
    tic()
    run_experiment(x_train2, y_train2, x_test2, y_test2,
                   bag_of_words_perceptron_model, 'train_and_evaluate', FLAGS)
    toc()

    # We may be interested in the model performance on the training data
    # (e.g. to evaluate removable bias).
    print("\nEvaluation of the model performance on the training data.:")
    run_experiment(None, None, x_train1, y_train1,
                   bag_of_words_perceptron_model, 'evaluate', FLAGS)