def run_experiment(experiment_type, data_folder, save_model_folder,
                   save_results_folder):
    """
    Runs experiments and saves results

    Parameters
    ----------
    experiment_type
    data_folder
    save_model_folder
    save_results_folder
    """
    def set_experiment_variables(hidden_state_size=512,
                                 down_project_size=None,
                                 load_embeddings=False):
        tf.flags.DEFINE_integer("hidden_state_size", hidden_state_size,
                                "hidden state size (default 512)")
        tf.flags.DEFINE_integer(
            "down_project_size", down_project_size,
            "Down projection size. Should be used with a hidden_state_size of 1024 (default None)"
        )
        tf.flags.DEFINE_boolean(
            "load_embeddings", load_embeddings,
            "Whether to use pretrained embeddings or not (default False)")

    if experiment_type == 'A':
        set_experiment_variables(512, None, False)
    elif experiment_type == 'B':
        set_experiment_variables(512, None, True)
    elif experiment_type == 'C':
        set_experiment_variables(1024, 512, True)

    print("\nExperiment Arguments:")
    for key in FLAGS.flag_values_dict():
        if key == 'f':
            continue
        print("{:<22}: {}".format(key.upper(), FLAGS[key].value))
    print(" ")

    data_processing = DataProcessing(FLAGS.sentence_length,
                                     FLAGS.max_vocabulary_size)
    train_corpus = data_processing.preprocess_dataset(data_folder,
                                                      'sentences.train')
    validation_corpus = data_processing.preprocess_dataset(
        data_folder, 'sentences.eval')
    test_corpus = data_processing.preprocess_dataset(data_folder,
                                                     'sentences_test.txt')
    continuation_corpus = data_processing.preprocess_dataset(
        data_folder, 'sentences.continuation', pad_to_sentence_length=False)

    print(f'Number of train sentences is \t\t{len(train_corpus)}')
    print(f'Number of validation sentences is \t{len(validation_corpus)}')
    print(f'Number of test sentences is \t\t{len(test_corpus)}')
    print(f'Number of continuation sentences is \t{len(continuation_corpus)}')
    print(" ")

    best_perplexity = None
    best_model = None

    with tf.Graph().as_default():
        with tf.Session() as session:
            # Create a variable to contain a counter for the global training step.
            global_step = tf.Variable(1, name='global_step', trainable=False)

            lstm = LSTMCell(FLAGS.embedding_size,
                            FLAGS.hidden_state_size,
                            FLAGS.sentence_length,
                            FLAGS.max_vocabulary_size,
                            down_project_size=FLAGS.down_project_size,
                            pad_symbol=data_processing.vocab['<pad>'])

            if FLAGS.load_embeddings:
                load_embedding(session, data_processing.vocab,
                               lstm.input_embeddings,
                               data_folder + '/wordembeddings-dim100.word2vec',
                               FLAGS.embedding_size,
                               len(data_processing.vocab))

            ####
            # Set optimizer and crop all gradients to values [-5, 5]
            ####
            with tf.name_scope('train'):
                optimizer = tf.train.AdamOptimizer()
                gvs = optimizer.compute_gradients(lstm.loss)
                capped_gvs = [(tf.clip_by_value(grad, -5., 5.), var)
                              for grad, var in gvs]
                train_step = optimizer.apply_gradients(capped_gvs,
                                                       global_step=global_step)

            saver = tf.train.Saver(tf.global_variables(),
                                   max_to_keep=FLAGS.num_checkpoints)

            session.run(tf.global_variables_initializer())
            summaries_merged = tf.summary.merge(lstm.summaries)

            ####
            # Create checkpoint directory
            ####
            timestamp = str(int(time.time()))
            out_dir = os.path.abspath(
                os.path.join(save_model_folder, "runs", timestamp))
            checkpoint_dir = os.path.abspath(
                os.path.join(out_dir, "checkpoints"))
            if not os.path.exists(checkpoint_dir):
                os.makedirs(checkpoint_dir)

            ####
            # Start training for the specified epochs
            ####
            print('Start training...')
            for epoch in range(FLAGS.num_epochs):
                for sentences_batch in get_batches(
                        train_corpus, batch_size=FLAGS.batch_size):
                    # run a single step
                    train_batch(sentences_batch, lstm, train_step, global_step,
                                session, summaries_merged)

                current_step = tf.train.global_step(session, global_step)
                if current_step % FLAGS.checkpoint_every == 0:
                    perplexities = dev_step(
                        get_batches(validation_corpus,
                                    batch_size=FLAGS.batch_size,
                                    do_shuffle=False), lstm, global_step,
                        session)

                    average_perplexity = np.mean(perplexities)
                    model_name = "model_experiment-{}_epoch-{}_val-perplexity-{}".format(
                        experiment_type, epoch + 1, average_perplexity)
                    path = saver.save(session,
                                      os.path.join(checkpoint_dir, model_name))

                    print("Saved model checkpoint to {}".format(path))

                    if best_perplexity is None or best_perplexity > average_perplexity:
                        best_perplexity = average_perplexity
                        best_model = model_name

                print('Done with epoch', epoch + 1)

            if best_model is None:
                raise Exception(
                    "Model has not been saved. Run for at least one epoch")

            print('Restoring best model', best_model)
            saver.restore(session, os.path.join(checkpoint_dir, best_model))

            # evaluate on test set
            perplexities = dev_step(get_batches(test_corpus,
                                                batch_size=FLAGS.batch_size,
                                                do_shuffle=False),
                                    lstm,
                                    global_step,
                                    session,
                                    verbose=0)

            print('Perplexity on test_set is', np.mean(perplexities))

            filename = "group25.perplexity{}".format(experiment_type)
            savefile = os.path.join(save_results_folder, filename)
            print('Saving results to', savefile)

            with open(savefile, 'w') as f:
                f.writelines(str(i) + '\n' for i in perplexities)

            if experiment_type == 'C':
                continuation_sentences = continue_sentences(
                    continuation_corpus, session, lstm, data_processing)

                filename = "group25.continuation"
                savefile = os.path.join(save_results_folder, filename)
                print('Saving results to', savefile)

                with open(savefile, 'w') as f:
                    f.writelines(str(i) + '\n' for i in continuation_sentences)

    print('Done')
Example #2
0
    saver = tf.train.Saver()
    summary_op = tf.summary.merge_all()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        train_summary_writer = tf.summary.FileWriter(TRAIN_LOG_DIR, sess.graph)
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        step = 0
        for epoch in range(EPOCH_NUM):
            accuracy_epoch = 0
            loss_epoch = 0
            batch_index = 0
            for i in range(len(train_video_indices) // BATCH_SIZE):
                step += 1
                batch_data, batch_index = data_processing.get_batches(TRAIN_LIST_PATH, NUM_CLASSES, batch_index,
                                                         train_video_indices, BATCH_SIZE)
                _, loss_out, accuracy_out, summary = sess.run([optimizer, loss, accuracy, summary_op],
                                                              feed_dict={batch_clips:batch_data['clips'],
                                                              batch_labels:batch_data['labels'],
                                                                        keep_prob: 0.5})
                loss_epoch += loss_out
                accuracy_epoch += accuracy_out

                if i % 10 == 0:
                    print('Epoch %d, Batch %d: Loss is %.5f; Accuracy is %.5f'%(epoch+1, i, loss_out, accuracy_out))
                    train_summary_writer.add_summary(summary, step)

            print('Epoch %d: Average loss is: %.5f; Average accuracy is: %.5f'%(epoch+1, loss_epoch / (len(train_video_indices) // BATCH_SIZE),
                                                                                accuracy_epoch / (len(train_video_indices) // BATCH_SIZE)))
            accuracy_epoch = 0
            loss_epoch = 0
Example #3
0
    logits = C3D_model.C3D(batch_clips, NUM_CLASSES, 0.5)
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(logits, 1), tf.argmax(batch_labels, 1)),
                np.float32))

    restorer = tf.train.Saver()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        restorer.restore(sess, TRAIN_CHECK_POINT)
        accuracy_epoch = 0
        batch_index = 0
        for i in range(test_num // BATCH_SIZE):
            if i % 10 == 0:
                print('Testing %d of %d' %
                      (i + 1, range(test_num // BATCH_SIZE)))
            batch_data, batch_index = data_processing.get_batches(
                TEST_LIST_PATH, NUM_CLASSES, batch_index, test_video_indices,
                BATCH_SIZE)
            accuracy_out = sess.run(accuracy,
                                    feed_dict={
                                        batch_clips: batch_data['clips'],
                                        batch_labels: batch_data['labels']
                                    })
            accuracy_epoch += accuracy_out

    print('Test accuracy is %.5f' % (accuracy_epoch /
                                     (test_num // BATCH_SIZE)))
# ----------------------------------------
#                 Training
# ----------------------------------------

torch.backends.cudnn.benchmark=True

for epoch in range(opt.epochs):
    # scheduler.step()
    accuracy_epoch = 0
    loss_epoch = 0
    batch_index = 0
    step = 0
    for i in range(len(train_video_indices) // opt.batch_size):
        step += 1
        batch_data, batch_index = data_processing.get_batches(TRAIN_LIST_PATH, opt.num_classes, batch_index,
                                                                train_video_indices, opt.batch_size)
        data = batch_data['clips']
        data = data.transpose(0, 4, 1, 2, 3)
        data = torch.from_numpy(data)
        data = data.type(torch.cuda.FloatTensor)
        target = batch_data['labels']
        target = torch.from_numpy(target)
        target = target.type(torch.cuda.LongTensor)
        if cuda:
            data = data.cuda()
            target = target.cuda()
        
        optimizer.zero_grad()

        out = model(data)
        loss = criterion(out, target)
Example #5
0
 config = tf.ConfigProto()
 config.gpu_options.allow_growth = True
 with tf.Session() as sess:
     train_writer = tf.summary.FileWriter(CHECK_POINT_PATH, sess.graph)
     # Restore variables from disk.
     sess.run(tf.global_variables_initializer())
     sess.run(tf.local_variables_initializer())
     restorer.restore(sess, MODEL_PATH)
     #Obtain style image batch
     style_image_batch = data_processing.read_image(STYLE_IMAGE_PATH, True,
                                                    BATCH_SIZE)
     step = 0
     for epoch in range(EPOCH_NUM):
         batch_index = 0
         for i in range(DATA_SIZE // BATCH_SIZE):
             image_batch, batch_index = data_processing.get_batches(
                 TRAIN_IMAGE_PATH, batch_index, BATCH_SIZE)
             _, batch_ls, style_ls, content_ls, summary_str = sess.run(
                 [opt_op, loss, style_loss, content_loss, summary],
                 feed_dict={
                     image: image_batch,
                     style_image: style_image_batch
                 })
             step += 1
             if i % 10 == 0:
                 print(
                     'Epoch %d, Batch %d of %d, loss is %.3f, style loss is %.3f, content loss is %.3f'
                     % (epoch + 1, i, DATA_SIZE // BATCH_SIZE, batch_ls,
                        220 * style_ls, content_ls))
                 train_writer.add_summary(summary_str, step)
                 test_image = data_processing.read_image(TEST_IMAGE_PATH)
                 styled_image = sess.run(squeezed_generated_image,
Example #6
0
def main(nets_archi, train_data, test_data, mode_, name="test"):
    # Preprocessing data
    data_size = train_data.shape[0]
    # Create weights DST dir
    DST = create_DST(name)

    ###### Reset tf graph ######
    tf.reset_default_graph()
    start_time = time.time()
    print("\nPreparing variables and building model ...")

    ###### Create tf placeholder for obs variables ######
    y = tf.placeholder(dtype=data_type(),
                       shape=(None, IMAGE_SIZE, IMAGE_SIZE, 1))
    normal_mean = tf.placeholder(dtype=data_type(), shape=(K, N, N + 1))

    ###### Create varaible for batch ######
    batch = tf.Variable(0, dtype=data_type())
    ###### CLearning rate decay ######
    learning_rate = tf.train.exponential_decay(
        learning_rate_init,  # Base learning rate.
        batch * BATCH_SIZE,  # Current index into the dataset.
        20 * data_size,  # Decay step.
        0.97,  # Decay rate.
        staircase=True)

    ###### Create instance SVAE ######
    recognition_net = nets_archi["recog"]
    generator_net = nets_archi["gener"]
    svae_ = svae.SVAE(
        recog_archi=recognition_net,  # architecture of the recognition network
        gener_archi=generator_net,  # architecture of the generative network
        K=K,  # dim of the discrete latents z
        N=N,  # dim of the gaussian latents x
        P=IMAGE_SIZE * IMAGE_SIZE,  # dim of the obs variables y
        max_iter=niter)  # number of iterations in the coordinate block ascent

    ###### Initialize parameters ######
    labels_stats_init_tiled, label_global_mean, gauss_global_mean = svae_._init_params(
    )
    # We need to tile the natural parameters for each inputs in batch (inputs are iid)
    tile_shape = [BATCH_SIZE, 1, 1, 1]
    gauss_global_mean_tiled = tf.tile(
        tf.expand_dims(gauss_global_mean,
                       0), tile_shape)  # shape: [batch,n_mixtures,dim,1+dim]
    label_global_mean_tiled = tf.tile(tf.expand_dims(
        label_global_mean, 0), tile_shape[:-1])  # shape: [batch,n_mixtures,1]
    # We convert the global mean parameters to global natural parameters
    gaussian_global = svae_.gaussian.standard_to_natural(
        gauss_global_mean_tiled)
    label_global = svae_.labels.standard_to_natural(label_global_mean_tiled)

    ###### Build loss and optimizer ######
    svae_._create_loss_optimizer(gaussian_global, label_global,
                                 labels_stats_init_tiled, y, learning_rate,
                                 batch)

    ###### Build generator ######
    svae_._generate(
        tf.tile(tf.expand_dims(normal_mean, 0), [nexamples, 1, 1, 1]))

    ###### Initializer ######
    init = tf.global_variables_initializer()
    ###### Saver ######
    saver = tf.train.Saver()
    ###### Create a local session to run the training ######
    with tf.Session() as sess:
        # Training
        if mode_ == "training":
            # Opening csv file
            csv_path = "./Perf"
            if not tf.gfile.Exists(csv_path):
                os.makedirs(csv_path)
            csvfileTrain = open(os.path.join(csv_path, name) + ".csv", 'w')
            Trainwriter = csv.writer(
                csvfileTrain,
                delimiter=';',
            )
            Trainwriter.writerow(['Num Epoch', 'train loss', 'test_loss'])

            # Initialize variables
            sess.run(tf.global_variables_initializer())

            # initialize performance indicators
            best_l = -10000000000.0

            #training loop
            print("\nStart training ...")
            for epoch in range(num_epochs):
                start_time = time.time()
                print("")
                print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
                # Training loop
                train_l = 0.0
                batches = data_processing.get_batches(train_data, BATCH_SIZE)
                for i, batch in enumerate(batches):
                    _, l, lr = sess.run(
                        [svae_.optimizer, svae_.SVAE_obj, learning_rate],
                        feed_dict={y: batch})
                    # Update average loss
                    train_l += l / len(batches)

                # Testing loop
                test_l = 0.0
                batches = data_processing.get_batches(test_data, BATCH_SIZE)
                for i, batch in enumerate(batches):
                    l = sess.run(svae_.SVAE_obj, feed_dict={y: batch})
                    # Update average loss
                    test_l += l / len(batches)

                # Update best perf and save model
                if test_l > best_l:
                    best_l = test_l
                    if epoch > 20:
                        saver.save(sess, DST)
                        print("model saved.")
                # Print info for previous epoch
                print("Epoch {} done, took {:.2f}s, learning rate: {:10.2e}".
                      format(epoch,
                             time.time() - start_time, lr))
                print(
                    "Train loss: {:.3f}, Test loss: {:.3f},Best test loss: {:.3f}"
                    .format(train_l, test_l, best_l))

                # Writing csv file with results and saving models
                Trainwriter.writerow([epoch + 1, train_l, test_l])

        if mode_ == "reconstruct":
            #Plot reconstruction mean
            if not tf.gfile.Exists(DST + ".meta"):
                raise Exception("no weights given")
            saver.restore(sess, DST)
            img = test_data[np.random.randint(0,
                                              high=test_data.shape[0],
                                              size=BATCH_SIZE)]
            bernouilli_mean = sess.run(svae_.y_reconstr_mean,
                                       feed_dict={y: img})
            save_reconstruct(img[:nexamples], bernouilli_mean[:nexamples],
                             "./reconstruct")

        if mode_ == "generate":
            #Test for ploting images
            if not tf.gfile.Exists(DST + ".meta"):
                raise Exception("no weights given")
            saver.restore(sess, DST)
            gaussian_mean = sess.run(gauss_global_mean, feed_dict={})
            #pdb.set_trace()
            bernouilli_mean = sess.run(svae_.y_generate_mean,
                                       feed_dict={normal_mean: gaussian_mean})
            bernouilli_mean = np.transpose(
                np.reshape(bernouilli_mean,
                           (nexamples, K, IMAGE_SIZE, IMAGE_SIZE)),
                (1, 0, 2, 3))
            save_gene(bernouilli_mean, "./generate")
def main(_):
    with tf.device('/gpu:0'):

        for regularization_type in ['Blackout', 'None', 'L1', 'L2']:

            dataset_sizes = np.linspace(2500, 55000, num=22)
            for size in dataset_sizes:
                # Getting the appropriate dataset

                print(int(size))
                train_x, train_y, valid_x, valid_y, test_x, test_y = split_data(
                    dataset, int(size))

                # Resetting the graph incase of multiple runs on the same console
                tf.reset_default_graph()
                for i in range(numOfTests):
                    num_layers = random.choice([5, 6, 7, 8, 9, 10])
                    num_nodes = random.choice([200, 400, 600])
                    num_inputs = int(train_x.shape[1])
                    num_steps = random.choice([50, 100, 150, 200])
                    regularization_scale = random.choice(
                        [0.01, 0.005, 0.001, 0.0005])
                    percent_connections_kept = random.choice([0.9, 0.95, 0.85])
                    num_classes = len(np.unique(train_y))

                    print('Test No. ' + str(i) + '/' + str(numOfTests))
                    print('Parameters: ' + str(size) + ',' +
                          regularization_type + ',' + str(num_layers) + ',' +
                          str(num_nodes) + ',' + str(num_steps) + ',' +
                          str(regularization_scale) + ',' +
                          str(percent_connections_kept))

                    # Create the model
                    x = tf.placeholder(tf.float32, [None, num_inputs])
                    y = create_model(x, num_layers, num_nodes, num_classes)

                    # Define loss and optimizer
                    y_ = tf.placeholder(tf.int64, [None])

                    # Retrieving weights and defining regularization penalty
                    weights = tf.trainable_variables()
                    regularization_penalty, blackout_weights = get_regularization_penalty(
                        weights, regularization_scale,
                        percent_connections_kept, regularization_type)

                    # Defining loss and optimizer
                    cross = tf.losses.sparse_softmax_cross_entropy(labels=y_,
                                                                   logits=y)
                    loss = cross + regularization_penalty
                    train_step = tf.train.RMSPropOptimizer(0.001).minimize(
                        loss)

                    # Evaluate Model
                    correct_prediction = tf.equal(tf.argmax(y, 1), y_)
                    accuracy = tf.reduce_mean(
                        tf.cast(correct_prediction, tf.float32))
                    config = tf.ConfigProto()
                    config.gpu_options.allow_growth = True

                    # Initializing session
                    sess = tf.InteractiveSession(config=config)
                    tf.global_variables_initializer().run()

                    # Train
                    #                PercentageOfConnOff=[]
                    #                LossFunctionRegu=[]
                    #                LossFunctionCrossTrain=[]
                    #                LossFunctionCrossValid=[]
                    #
                    numOfBatches = 50
                    all_batches_x, all_batches_y = get_batches(
                        train_x, train_y, numOfBatches)

                    # Train
                    for i in range(num_steps):
                        randomPick = random.randint(0, numOfBatches)
                        #print(str(len(all_batches_x)) + " getting " + str(randomPick))
                        if randomPick == 50:
                            randomPick = 49
                        currentBatchX = all_batches_x[randomPick]
                        currentBatchY = all_batches_y[randomPick]
                        sess.run(train_step,
                                 feed_dict={
                                     x: currentBatchX,
                                     y_: currentBatchY
                                 })
                        # Test trained model
                        if i % 20 == 1:
                            print('Accuracy: ' + str(
                                sess.run(accuracy,
                                         feed_dict={
                                             x: valid_x,
                                             y_: valid_y
                                         })))
    #                            if regularization_type=='Blackout':
    #                                currentWeights=sess.run(blackout_weights)
    #                                part1=currentWeights>-0.01
    #                                part2=currentWeights<0.01
    #                                turnedOff=np.sum(np.logical_and(part1,part2))
    #                                TotalNumOfWeights=float(currentWeights.shape[0])
    #                                LossFunctionCrossTrain.append(sess.run(cross, feed_dict={x: train_x, y_: train_y}))
    #                                LossFunctionCrossValid.append(sess.run(cross, feed_dict={x: valid_x, y_: valid_y}))
    #                                LossFunctionRegu.append(sess.run(regularization_penalty))
    #                                PercentageOfConnOff.append((TotalNumOfWeights-turnedOff)/TotalNumOfWeights)
    #if regularization_type=='Blackout':
    #    fig = plt.figure()
    #    ax1 = fig.add_subplot(1, 2, 1)
    #   ax2 = fig.add_subplot(1, 2, 2)
    #    ax1.plot(PercentageOfConnOff)
    #    ax2.plot(LossFunctionCrossTrain,label='Cross-Entropy Train')
    #    ax2.plot(LossFunctionCrossValid,label='Cross-Entropy Validation')
    #    ax2.plot(LossFunctionRegu,label='Regularization')
    #    ax2.legend()
    #    fig.show()
                    accuracyVal = sess.run(accuracy,
                                           feed_dict={
                                               x: valid_x,
                                               y_: valid_y
                                           })
                    accuracyTest = sess.run(accuracy,
                                            feed_dict={
                                                x: test_x,
                                                y_: test_y
                                            })
                    tf.reset_default_graph()
                    store_results(dataset, regularization_type, num_layers,
                                  num_nodes, num_steps, regularization_scale,
                                  percent_connections_kept, accuracyVal,
                                  accuracyTest, size)
                    print('Accuracy Val: ' + str(accuracyVal) +
                          ' , Accuracy Test: ' + str(accuracyTest))
Example #8
0
def read_labels_from_file(filepath):
    with open(filepath, 'r') as f:
        labels = [line.strip() for line in f.readlines()]
    return labels


test_path = 'test_cross.list'
torch.backends.cudnn.benchmark = True
if __name__ == "__main__":
    with open(test_path, 'r') as t:
        test_num = len(list(t))
    test_video_indices = range(test_num)
    batch_index = 0
    for i in range(test_num // opt.batch_size):
        batch_data, batch_index = data_processing.get_batches(
            test_path, opt.num_classes, batch_index, test_video_indices,
            opt.batch_size)

        clip = torch.from_numpy(batch_data['clips'].transpose(0, 4, 1, 2, 3))
        clip = clip.cuda()
        net = C3D_model_pytorch.C3D(dropout_rate=1)
        net.load_state_dict(torch.load('C3D_model_pytorch.pkl'))
        net.cuda()
        net.eval()

        output = net(clip)

        labels = torch.from_numpy(np.array(batch_data['labels']))
        output_index = torch.max(output, 1)[1].cpu().numpy()
        equ_num = 0
        for i in range(opt.batch_size):