Exemple #1
0
def train_step(net,
               X,
               Y,
               epoch_num,
               dev,
               optimizer,
               num_classes=2,
               batchSize=50,
               use_gpu=False,
               device=torch.device('cpu')):
    """
    Performs one supervised training epoch on batches of data
    """
    num_correct = 0
    total_loss = 0.0
    net.train()  #Put the network into training model
    for batch in tqdm.tqdm(range(0, len(X), batchSize), leave=False):
        batch_tweets = X[batch:batch + batchSize]
        batch_labels = Y[batch:batch + batchSize]
        batch_tweets = pad_batch_input(batch_tweets, device=device)
        batch_onehot_labels = convert_to_onehot(batch_labels,
                                                NUM_CLASSES=num_classes,
                                                device=device)
        optimizer.zero_grad()
        batch_y_hat = net.forward(batch_tweets)
        batch_losses = torch.neg(
            batch_y_hat) * batch_onehot_labels  #cross entropy loss
        loss = batch_losses.mean()
        loss.backward()
        optimizer.step()
        total_loss += float(loss.detach().item())

    net.eval()  #Switch to eval mode
    print(f"loss on epoch {epoch_num} = {total_loss}")
    accuracy = eval_network(dev,
                            net,
                            use_gpu=use_gpu,
                            batch_size=batchSize,
                            device=device)
    return total_loss, accuracy
Exemple #2
0
    train_args = utils.load_args(folder=args.folder)
    args = fix_args_for_test(args, train_args)

    checkpoint_path = utils.join_path(
        args.folder, ParallelActorCritic.CHECKPOINT_SUBDIR, ParallelActorCritic.CHECKPOINT_LAST
    )
    env_creator = get_environment_creator(args)
    network = create_network(args, env_creator.num_actions, env_creator.obs_shape)
    steps_trained = load_trained_weights(network, checkpoint_path, args.device == 'cpu')

    if args.old_preprocessing:
        network._preprocess = old_preprocess_images

    print(args_to_str(args), '=='*30, sep='\n')
    print('Model was trained for {} steps'.format(steps_trained))
    if not args.visualize:
        #eval_network prints stats by itself
        eval_network(network, env_creator, args.test_count, greedy=args.greedy)
    else:
        num_steps, rewards = evaluate.visual_eval(
            network, env_creator, args.greedy,
            args.test_count, verbose=1, delay=args.step_delay
        )
        print('Perfromed {0} tests'.format(args.test_count))
        print('Mean number of steps: {0:.3f}'.format(np.mean(num_steps)))
        print('Mean R: {0:.2f}'.format(np.mean(rewards)), end=' | ')
        print('Max R: {0:.2f}'.format(np.max(rewards)), end=' | ')
        print('Min R: {0:.2f}'.format(np.min(rewards)), end=' | ')
        print('Std of R: {0:.2f}'.format(np.std(rewards)))

Exemple #3
0
                                      PAACLearner.CHECKPOINT_LAST)
    net_creator, env_creator = get_network_and_environment_creator(args)
    network, steps_trained = load_trained_network(net_creator, checkpoint_path)
    if args.old_preprocessing:
        network._preprocess = old_preprocess_images
    use_rnn = hasattr(network, 'get_initial_state')

    print_dict(vars(args), 'ARGS')
    print('Model was trained for {} steps'.format(steps_trained))
    if args.visualize:
        num_steps, rewards = evaluate.visual_eval(network,
                                                  env_creator,
                                                  args.greedy,
                                                  use_rnn,
                                                  args.test_count,
                                                  verbose=1,
                                                  delay=args.step_delay)
    else:
        num_steps, rewards = eval_network(network,
                                          env_creator,
                                          args.test_count,
                                          use_rnn,
                                          greedy=args.greedy)

    print('Perfromed {0} tests for {1}.'.format(args.test_count, args.game))
    print('Mean number of steps: {0:.3f}'.format(np.mean(num_steps)))
    print('Mean R: {0:.2f}'.format(np.mean(rewards)), end=' | ')
    print('Max R: {0:.2f}'.format(np.max(rewards)), end=' | ')
    print('Min R: {0:.2f}'.format(np.min(rewards)), end=' | ')
    print('Std of R: {0:.2f}'.format(np.std(rewards)))
Exemple #4
0
def main():
    args = parse_args()
    # twitter_csv_path = args.tweet_csv_file
    labeled_twitter_csv_path = args.labeled_tweet_csv_file
    unlabeled_twitter_csv_path = args.unlabeled_tweet_csv_file

    device_type = args.device
    acquistion_function_type = args.acquisition_func
    human_label = args.human_label

    use_model_acq = True  #flag for using model to generate inputs for acquisition funciton
    if acquistion_function_type == "least_confidence":
        acquisition_func = least_confidence
    elif acquistion_function_type == "random":
        acquisition_func = random_score
    elif acquistion_function_type == "entropy":
        acquisition_func = entropy_score
    elif acquistion_function_type == "tweet_count":
        acquisition_func = tweet_count_norm
        use_model_acq = False
    else:
        acquisition_func = least_confidence

    seed_data_size = args.seed_data_size
    use_bert = False
    shuffle = False
    train_data, dev_data, test_data = load_twitter_data(
        labeled_twitter_csv_path,
        test_split_percent=0.1,
        val_split_percent=0.2,
        shuffle=shuffle,
        overfit=True,
        use_bert=use_bert,
        overfit_val=40000)
    unlabeled_tweets, ground_truth_labels = load_unlabeled_tweet_csv(
        unlabeled_twitter_csv_path, num_tweets=45000)

    #convert "unlabeled" tweets to token ids
    X_unlabeled = train_data.convert_text_to_ids(unlabeled_tweets)
    # ground_truth_labels = ground_truth_labels[0:70000]
    ground_truth_labels = (ground_truth_labels + 1.0) / 2.0

    X_seed = train_data.Xwordlist[0:seed_data_size]
    Y_seed = train_data.labels[0:seed_data_size]
    Y_seed = (Y_seed + 1.0) / 2.0

    print(train_data.vocab_size)
    print(len(X_seed))
    print(dev_data.length)
    print(test_data.length)
    num_samples = args.sample_size

    cnn_net = CNN(train_data.vocab_size, DIM_EMB=300, NUM_CLASSES=2)
    if device_type == "gpu" and torch.cuda.is_available():
        device = torch.device('cuda:0')
        cnn_net = cnn_net.cuda()
        epoch_losses, eval_accuracy, hand_labeled_data = train_active_learning(
            cnn_net,
            train_data,
            X_seed,
            Y_seed,
            X_unlabeled,
            ground_truth_labels,
            dev_data,
            use_model=use_model_acq,
            num_epochs=8,
            human_label=human_label,
            acquisition_func=acquisition_func,
            lr=0.0035,
            batchSize=150,
            num_samples=num_samples,
            use_gpu=True,
            device=device)
        cnn_net.eval()
        print("Test Set")
        test_accuracy = eval_network(test_data,
                                     cnn_net,
                                     use_gpu=True,
                                     device=device)

    else:
        device = torch.device('cpu')
        # cnn_net = cnn_net.cuda()
        epoch_losses, eval_accuracy, hand_labeled_data = train_active_learning(
            cnn_net,
            train_data,
            X_seed,
            Y_seed,
            X_unlabeled,
            ground_truth_labels,
            dev_data,
            use_model=use_model_acq,
            num_epochs=8,
            human_label=human_label,
            acquisition_func=acquisition_func,
            lr=0.0035,
            batchSize=150,
            num_samples=num_samples,
            use_gpu=False,
            device=device)
        cnn_net.eval()
        print("Test Set")
        test_accuracy = eval_network(test_data,
                                     cnn_net,
                                     use_gpu=False,
                                     device=device)

    # plot_accuracy((min_accs, eval_accuracy, max_accs), "Sentiment CNN lr=0.001", train_data.length)
    plot_accuracy(
        eval_accuracy, "Sentiment CNN (Active Learning) lr=0.0035 " +
        acquistion_function_type, seed_data_size)
    # plot_losses(epoch_losses, "Sentiment CNN (Active Learning) lr=0.0030" + acquistion_function_type, train_data.length)
    torch.save(cnn_net.state_dict(), "saved_models\\cnn_active_learn.pth")
    # np.save("cnn_active_learning_train_loss" + acquistion_function_type + "_" + str(seed_data_size) + ".npy", np.array(epoch_losses))
    np.save(
        "human_labelling_results/cnn_active_learning_validation_accuracy_" +
        acquistion_function_type + "_" + str(seed_data_size) + "_" +
        str(num_samples) + ".npy", np.array(eval_accuracy))

    human_labels = []
    ground_truth_labels = []
    tweets = []
    save_labels = True

    if save_labels:
        for tweet, label, ground_truth_label in hand_labeled_data:
            # tweet, score = sample
            tweet = train_data.convert_to_words(tweet)
            tweets.append(tweet)
            human_labels.append(label)
            ground_truth_labels.append(ground_truth_label)

        new_labeled_tweets = pd.DataFrame({
            'label': human_labels,
            'ground truth': ground_truth_labels,
            'text': tweets
        })
        new_labeled_tweets.to_csv("human_labeled_tweets_lc_rk.csv",
                                  header=True,
                                  index=False)
def main():
    #parameters
    # sampling_functions = ['random_score', 'entropy_score', 'least_confidence']
    sampling_functions = ['tweet_count']
    sampling_sizes = [5000, 10000, 15000, 20000]
    num_active_samples = [10, 25, 50]

    # sampling_functions = ['least_confidence']
    # num_active_samples = [25, 50]
    # sampling_sizes = [20000]

    args = parse_args()
    # twitter_csv_path = args.tweet_csv_file
    labeled_twitter_csv_path = args.labeled_tweet_csv_file
    unlabeled_twitter_csv_path = args.unlabeled_tweet_csv_file
    save_models = args.save_models

    use_bert = False
    shuffle = False
    train_data, dev_data, test_data = load_twitter_data(labeled_twitter_csv_path,
                                                        test_split_percent=0.1,
                                                        val_split_percent=0.2,
                                                        shuffle=shuffle,
                                                        overfit=True, use_bert=use_bert,
                                                        overfit_val=40000)
    unlabeled_tweets, ground_truth_labels = load_unlabeled_tweet_csv(unlabeled_twitter_csv_path, num_tweets=45000)
    X_unlabeled = train_data.convert_text_to_ids(unlabeled_tweets)
    ground_truth_labels = ground_truth_labels
    ground_truth_labels = (ground_truth_labels + 1.0)/2.0

    test_accuracies = {}

    print("Running ablation experiment on sampling functions and seed sizes")
    use_model=True
    for af in sampling_functions:
        if af == 'random_score':
            acquisition_func = random_score
        elif af == 'entropy_score':
            acquisition_func = entropy_score
        elif af == 'least_confidence':
            acquisition_func = least_confidence
        elif af == 'tweet_count':
            acquisition_func = tweet_count_norm
            use_model=False
        for seed_data_size in sampling_sizes:
            for sample_size in num_active_samples:
                param_combo = "Acquisition_Func: " + af + " Seed Size: " + str(seed_data_size) + " Sample Size: " + str(sample_size)
                print(param_combo + "\n")
                X_seed = train_data.Xwordlist[0:seed_data_size]
                Y_seed = train_data.labels[0:seed_data_size]
                Y_seed = (Y_seed + 1.0)/2.0
                cnn_net = CNN(train_data.vocab_size, DIM_EMB=300, NUM_CLASSES = 2)

                device = torch.device('cuda:0')
                cnn_net = cnn_net.cuda()
                print("Train active learning")
                epoch_losses, eval_accuracy, hand_labeled_data = train_active_learning(cnn_net, train_data,
                                                                    X_seed, Y_seed,
                                                                    copy.deepcopy(X_unlabeled), np.copy(ground_truth_labels), dev_data,
                                                                    num_epochs=8, use_model=use_model, acquisition_func=acquisition_func,
                                                                    lr=0.0035, batchSize=150, num_samples=sample_size,
                                                                    use_gpu=True, device=device)
                print("Finished Training")
                cnn_net.eval()

                print("Test Set")
                test_accuracy = eval_network(test_data, cnn_net, use_gpu=True, device=device)
                model_save_path = "model_weights/cnn_active_learn_weights_"+ af + "_" + str(seed_data_size) + "_" + str(sample_size) + ".pth"
                if save_models:
                    torch.save(cnn_net.state_dict(), model_save_path)

                param_combo = "CNN Active Learning: " + " Acquisition_Func: " + af + " Seed Size: " + str(seed_data_size) + " Sample Size: " + str(sample_size)
                test_accuracies[param_combo] = test_accuracy
                filename = "results_ablation/cnn_active_learning_val_accuracy_" + af + "_" + str(seed_data_size) + "_" + str(sample_size) + ".npy"
                np.save(filename, np.array(eval_accuracy))

    print("Finished experiments")
    with open("ablation_test_accuracies1.txt", "w") as f:
        for key in test_accuracies.keys():
            accuracy = test_accuracies[key]
            line = key + " Acc: " + str(accuracy) + "\n"
            f.write(line)