Beispiel #1
0
def test(model_name='model.pkl'):
    cnn = CNN()
    cnn.eval()
    cnn.load_state_dict(torch.load(model_name))
    print('load cnn net.')

    test_dataloader = dataset.get_test_data_loader()

    correct = 0
    total = 0
    for i, (images, labels) in enumerate(test_dataloader):
        image = images
        vimage = Variable(image)
        predict_label = cnn(vimage)

        chars = ''
        for i in range(setting.MAX_CAPTCHA):
            chars += setting.ALL_CHAR_SET[np.argmax(
                predict_label[0, i * setting.ALL_CHAR_SET_LEN:(i + 1) *
                              setting.ALL_CHAR_SET_LEN].data.numpy())]

        predict_label = chars
        true_label = one_hot.decode(labels.numpy()[0])
        total += labels.size(0)

        if (predict_label == true_label):
            correct += 1
        else:
            print('Predict:' + predict_label)
            print('Real   :' + true_label)
        if (total % 200 == 0):
            print('Test Accuracy of the model on the %d test images: %f %%' %
                  (total, 100 * correct / total))
    print('Test Accuracy of the model on the %d test images: %f %%' %
          (total, 100 * correct / total))
Beispiel #2
0
def main():
    # Load net
    cnn = CNN()
    loss_func = nn.MultiLabelSoftMarginLoss()
    optimizer = optim.Adam(cnn.parameters(), lr=learning_rate)
    if torch.cuda.is_available():
        cnn.cuda()
        loss_func.cuda()

    # Load data
    train_dataloader = dataset.get_train_data_loader()
    test_dataloader = dataset.get_test_data_loader()

    # Train model
    for epoch in range(num_epochs):
        cnn.train()
        for i, (images, labels) in enumerate(train_dataloader):
            images = Variable(images)
            labels = Variable(labels.long())
            if torch.cuda.is_available():
                images = images.cuda()
                labels = labels.cuda()
            predict_labels = cnn(images)
            loss = loss_func(predict_labels, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (i + 1) % 100 == 0:
                print("epoch:", epoch, "step:", i, "loss:", loss.item())

        # Save and test model
        if (epoch + 1) % 10 == 0:
            filename = "model" + str(epoch + 1) + ".pkl"
            torch.save(cnn.state_dict(), filename)
            cnn.eval()
            correct = 0
            total = 0
            for (image, label) in test_dataloader:
                vimage = Variable(image)
                if torch.cuda.is_available():
                    vimage = vimage.cuda()
                output = cnn(vimage)
                predict_label = ""
                for k in range(4):
                    predict_label += config.CHAR_SET[np.argmax(
                        output[0, k * config.CHAR_SET_LEN:(k + 1) *
                               config.CHAR_SET_LEN].data.cpu().numpy())]
                true_label = one_hot.vec2text(label.numpy()[0])
                total += label.size(0)
                if predict_label == true_label:
                    correct += 1
                if total % 200 == 0:
                    print(
                        'Test Accuracy of the model on the %d test images: %f %%'
                        % (total, 100 * correct / total))
            print('Test Accuracy of the model on the %d test images: %f %%' %
                  (total, 100 * correct / total))
            print("save and test model...")
    torch.save(cnn.state_dict(), "./model.pkl")  # current is model.pkl
    print("save last model")
Beispiel #3
0
def main():
    cnn = CNN()
    cnn.eval()
    cnn.load_state_dict(torch.load('model/1500_model.pkl'))
    print("load cnn net.")

    predict_dataloader = my_dataset.get_predict_data_loader()

    for i, (images, labels) in enumerate(predict_dataloader):
        image = images
        vimage = Variable(image)
        predict_label = cnn(vimage)

        c0 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, 0:captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        c1 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, captcha_setting.ALL_CHAR_SET_LEN:2 *
                          captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        c2 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, 2 * captcha_setting.ALL_CHAR_SET_LEN:3 *
                          captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        c3 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, 3 * captcha_setting.ALL_CHAR_SET_LEN:4 *
                          captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]

        c = '%s%s%s%s' % (c0, c1, c2, c3)
    return c
Beispiel #4
0
def main():
    cnn = CNN()
    cnn.eval()
    cnn.load_state_dict(torch.load('model/1500_model.pkl'))
    print("load cnn net.")

    test_dataloader = my_dataset.get_test_data_loader()

    correct = 0
    total = 0
    error = []
    true = []
    for i, (images, labels) in enumerate(test_dataloader):
        image = images
        vimage = Variable(image)
        predict_label = cnn(vimage)

        c0 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, 0:captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        c1 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, captcha_setting.ALL_CHAR_SET_LEN:2 *
                          captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        c2 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, 2 * captcha_setting.ALL_CHAR_SET_LEN:3 *
                          captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        c3 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, 3 * captcha_setting.ALL_CHAR_SET_LEN:4 *
                          captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        predict_label = '%s%s%s%s' % (c0, c1, c2, c3)
        true_label = one_hot_encoding.decode(labels.numpy()[0])
        print("true_label: ", true_label)
        print("predict_lable: ", predict_label, "\n")
        total += labels.size(0)
        if (predict_label == true_label):
            correct += 1
        else:
            error.append(predict_label)
            true.append(true_label)
        if (total % 200 == 0):
            print('测试集数量:%d, 准确率 : %f %%' % (total, 100 * correct / total))
    print('测试集数量:%d, 准确率 : %f %%' % (total, 100 * correct / total))
    print('预测错误例子:\n')
    print('正确字符:', true)
    print('错误字符:', error)
Beispiel #5
0
def main():
    args = parse_args()
    twitter_csv_path = args.tweet_csv_file
    device_type = args.device
    use_bert = False
    shuffle = False
    train_data, dev_data, test_data = load_twitter_data(twitter_csv_path, test_split_percent=0.1, val_split_percent=0.2, overfit=True, shuffle=shuffle, use_bert=use_bert, overfit_val=12639)
    vocab_size = train_data.vocab_size
    print(vocab_size)
    print(train_data.length)
    print(dev_data.length)
    print(test_data.length)
    cnn_net = CNN(vocab_size, DIM_EMB=300, NUM_CLASSES = 2)
    if device_type == "gpu" and torch.cuda.is_available():
        device = torch.device('cuda:0')
        cnn_net = cnn_net.cuda()
        epoch_losses, eval_accuracy = train_network(cnn_net,
                                        train_data.Xwordlist,
                                        (train_data.labels + 1.0)/2.0,
                                        10, dev_data, lr=0.003,
                                        batchSize=150, use_gpu=True, device=device)
        cnn_net.eval()
        print("Test Set")
        test_accuracy = eval_network(test_data, cnn_net, use_gpu=True, device=device)

    else:
        device = torch.device('cpu')
        epoch_losses, eval_accuracy = train_network(cnn_net,
                                        train_data.Xwordlist,
                                        (train_data.labels + 1.0)/2.0,
                                        10, dev_data, lr=0.003,
                                        batchSize=150, use_gpu=False, device=device)
        cnn_net.eval()
        print("Test Set")
        test_accuracy = eval_network(test_data, cnn_net, use_gpu=False, batch_size=batchSize, device=device)

    # plot_accuracy((min_accs, eval_accuracy, max_accs), "Sentiment CNN lr=0.001", train_data.length)
    plot_accuracy(eval_accuracy, "Sentiment CNN lr=0.003", train_data.length)
    plot_losses(epoch_losses, "Sentiment CNN lr=0.003", train_data.length)
    torch.save(cnn_net.state_dict(), "saved_models\\cnn.pth")
    np.save("cnn_train_loss_" + str(train_data.length) +  ".npy", np.array(epoch_losses))
    np.save("cnn_validation_accuracy_" + str(train_data.length) +  ".npy", np.array(eval_accuracy))
Beispiel #6
0
def recognize(model_name='model.pk'):
    cnn = CNN()
    cnn.eval()
    cnn.load_state_dict(torch.load(model_name))
    # print(load cnn net.)
    # NUM_LEN = len(setting.NUMBER)

    captcha_dataloader = dataset.get_captcha_data_loader()
    code = ''
    images = {}
    for image, label in captcha_dataloader:
        images[label] = image
    images = [images[key] for key in sorted(images)]
    for image in images:
        vimage = Variable(image)
        predict_label = cnn(vimage)

        for i in range(setting.MAX_CAPTCHA):
            code += setting.ALL_CHAR_SET[np.argmax(
                predict_label[0, i * setting.ALL_CHAR_SET_LEN:(i + 1) *
                              setting.ALL_CHAR_SET_LEN].data.numpy())]

    return code
Beispiel #7
0
def main():
    args = parse_args()
    # twitter_csv_path = args.tweet_csv_file
    labeled_twitter_csv_path = args.labeled_tweet_csv_file
    unlabeled_twitter_csv_path = args.unlabeled_tweet_csv_file

    device_type = args.device
    acquistion_function_type = args.acquisition_func
    human_label = args.human_label

    use_model_acq = True  #flag for using model to generate inputs for acquisition funciton
    if acquistion_function_type == "least_confidence":
        acquisition_func = least_confidence
    elif acquistion_function_type == "random":
        acquisition_func = random_score
    elif acquistion_function_type == "entropy":
        acquisition_func = entropy_score
    elif acquistion_function_type == "tweet_count":
        acquisition_func = tweet_count_norm
        use_model_acq = False
    else:
        acquisition_func = least_confidence

    seed_data_size = args.seed_data_size
    use_bert = False
    shuffle = False
    train_data, dev_data, test_data = load_twitter_data(
        labeled_twitter_csv_path,
        test_split_percent=0.1,
        val_split_percent=0.2,
        shuffle=shuffle,
        overfit=True,
        use_bert=use_bert,
        overfit_val=40000)
    unlabeled_tweets, ground_truth_labels = load_unlabeled_tweet_csv(
        unlabeled_twitter_csv_path, num_tweets=45000)

    #convert "unlabeled" tweets to token ids
    X_unlabeled = train_data.convert_text_to_ids(unlabeled_tweets)
    # ground_truth_labels = ground_truth_labels[0:70000]
    ground_truth_labels = (ground_truth_labels + 1.0) / 2.0

    X_seed = train_data.Xwordlist[0:seed_data_size]
    Y_seed = train_data.labels[0:seed_data_size]
    Y_seed = (Y_seed + 1.0) / 2.0

    print(train_data.vocab_size)
    print(len(X_seed))
    print(dev_data.length)
    print(test_data.length)
    num_samples = args.sample_size

    cnn_net = CNN(train_data.vocab_size, DIM_EMB=300, NUM_CLASSES=2)
    if device_type == "gpu" and torch.cuda.is_available():
        device = torch.device('cuda:0')
        cnn_net = cnn_net.cuda()
        epoch_losses, eval_accuracy, hand_labeled_data = train_active_learning(
            cnn_net,
            train_data,
            X_seed,
            Y_seed,
            X_unlabeled,
            ground_truth_labels,
            dev_data,
            use_model=use_model_acq,
            num_epochs=8,
            human_label=human_label,
            acquisition_func=acquisition_func,
            lr=0.0035,
            batchSize=150,
            num_samples=num_samples,
            use_gpu=True,
            device=device)
        cnn_net.eval()
        print("Test Set")
        test_accuracy = eval_network(test_data,
                                     cnn_net,
                                     use_gpu=True,
                                     device=device)

    else:
        device = torch.device('cpu')
        # cnn_net = cnn_net.cuda()
        epoch_losses, eval_accuracy, hand_labeled_data = train_active_learning(
            cnn_net,
            train_data,
            X_seed,
            Y_seed,
            X_unlabeled,
            ground_truth_labels,
            dev_data,
            use_model=use_model_acq,
            num_epochs=8,
            human_label=human_label,
            acquisition_func=acquisition_func,
            lr=0.0035,
            batchSize=150,
            num_samples=num_samples,
            use_gpu=False,
            device=device)
        cnn_net.eval()
        print("Test Set")
        test_accuracy = eval_network(test_data,
                                     cnn_net,
                                     use_gpu=False,
                                     device=device)

    # plot_accuracy((min_accs, eval_accuracy, max_accs), "Sentiment CNN lr=0.001", train_data.length)
    plot_accuracy(
        eval_accuracy, "Sentiment CNN (Active Learning) lr=0.0035 " +
        acquistion_function_type, seed_data_size)
    # plot_losses(epoch_losses, "Sentiment CNN (Active Learning) lr=0.0030" + acquistion_function_type, train_data.length)
    torch.save(cnn_net.state_dict(), "saved_models\\cnn_active_learn.pth")
    # np.save("cnn_active_learning_train_loss" + acquistion_function_type + "_" + str(seed_data_size) + ".npy", np.array(epoch_losses))
    np.save(
        "human_labelling_results/cnn_active_learning_validation_accuracy_" +
        acquistion_function_type + "_" + str(seed_data_size) + "_" +
        str(num_samples) + ".npy", np.array(eval_accuracy))

    human_labels = []
    ground_truth_labels = []
    tweets = []
    save_labels = True

    if save_labels:
        for tweet, label, ground_truth_label in hand_labeled_data:
            # tweet, score = sample
            tweet = train_data.convert_to_words(tweet)
            tweets.append(tweet)
            human_labels.append(label)
            ground_truth_labels.append(ground_truth_label)

        new_labeled_tweets = pd.DataFrame({
            'label': human_labels,
            'ground truth': ground_truth_labels,
            'text': tweets
        })
        new_labeled_tweets.to_csv("human_labeled_tweets_lc_rk.csv",
                                  header=True,
                                  index=False)
def main():
    #parameters
    # sampling_functions = ['random_score', 'entropy_score', 'least_confidence']
    sampling_functions = ['tweet_count']
    sampling_sizes = [5000, 10000, 15000, 20000]
    num_active_samples = [10, 25, 50]

    # sampling_functions = ['least_confidence']
    # num_active_samples = [25, 50]
    # sampling_sizes = [20000]

    args = parse_args()
    # twitter_csv_path = args.tweet_csv_file
    labeled_twitter_csv_path = args.labeled_tweet_csv_file
    unlabeled_twitter_csv_path = args.unlabeled_tweet_csv_file
    save_models = args.save_models

    use_bert = False
    shuffle = False
    train_data, dev_data, test_data = load_twitter_data(labeled_twitter_csv_path,
                                                        test_split_percent=0.1,
                                                        val_split_percent=0.2,
                                                        shuffle=shuffle,
                                                        overfit=True, use_bert=use_bert,
                                                        overfit_val=40000)
    unlabeled_tweets, ground_truth_labels = load_unlabeled_tweet_csv(unlabeled_twitter_csv_path, num_tweets=45000)
    X_unlabeled = train_data.convert_text_to_ids(unlabeled_tweets)
    ground_truth_labels = ground_truth_labels
    ground_truth_labels = (ground_truth_labels + 1.0)/2.0

    test_accuracies = {}

    print("Running ablation experiment on sampling functions and seed sizes")
    use_model=True
    for af in sampling_functions:
        if af == 'random_score':
            acquisition_func = random_score
        elif af == 'entropy_score':
            acquisition_func = entropy_score
        elif af == 'least_confidence':
            acquisition_func = least_confidence
        elif af == 'tweet_count':
            acquisition_func = tweet_count_norm
            use_model=False
        for seed_data_size in sampling_sizes:
            for sample_size in num_active_samples:
                param_combo = "Acquisition_Func: " + af + " Seed Size: " + str(seed_data_size) + " Sample Size: " + str(sample_size)
                print(param_combo + "\n")
                X_seed = train_data.Xwordlist[0:seed_data_size]
                Y_seed = train_data.labels[0:seed_data_size]
                Y_seed = (Y_seed + 1.0)/2.0
                cnn_net = CNN(train_data.vocab_size, DIM_EMB=300, NUM_CLASSES = 2)

                device = torch.device('cuda:0')
                cnn_net = cnn_net.cuda()
                print("Train active learning")
                epoch_losses, eval_accuracy, hand_labeled_data = train_active_learning(cnn_net, train_data,
                                                                    X_seed, Y_seed,
                                                                    copy.deepcopy(X_unlabeled), np.copy(ground_truth_labels), dev_data,
                                                                    num_epochs=8, use_model=use_model, acquisition_func=acquisition_func,
                                                                    lr=0.0035, batchSize=150, num_samples=sample_size,
                                                                    use_gpu=True, device=device)
                print("Finished Training")
                cnn_net.eval()

                print("Test Set")
                test_accuracy = eval_network(test_data, cnn_net, use_gpu=True, device=device)
                model_save_path = "model_weights/cnn_active_learn_weights_"+ af + "_" + str(seed_data_size) + "_" + str(sample_size) + ".pth"
                if save_models:
                    torch.save(cnn_net.state_dict(), model_save_path)

                param_combo = "CNN Active Learning: " + " Acquisition_Func: " + af + " Seed Size: " + str(seed_data_size) + " Sample Size: " + str(sample_size)
                test_accuracies[param_combo] = test_accuracy
                filename = "results_ablation/cnn_active_learning_val_accuracy_" + af + "_" + str(seed_data_size) + "_" + str(sample_size) + ".npy"
                np.save(filename, np.array(eval_accuracy))

    print("Finished experiments")
    with open("ablation_test_accuracies1.txt", "w") as f:
        for key in test_accuracies.keys():
            accuracy = test_accuracies[key]
            line = key + " Acc: " + str(accuracy) + "\n"
            f.write(line)
Beispiel #9
0
def train():
    """
    Performs training and evaluation of MLP cnn.
    NOTE: You should the cnn on the whole test set each eval_freq iterations.
    """
    # YOUR TRAINING CODE GOES HERE
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    train_data = datasets.CIFAR10('data',
                                  train=True,
                                  download=True,
                                  transform=transform)
    test_data = datasets.CIFAR10('data',
                                 train=False,
                                 download=True,
                                 transform=transform)
    train_on_gpu = torch.cuda.is_available()
    num_train = len(train_data)
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=FLAGS.batch_size,
                                               shuffle=True,
                                               num_workers=0)
    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=FLAGS.batch_size,
                                              shuffle=False,
                                              num_workers=0)
    classes = [
        'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog',
        'horse', 'ship', 'truck'
    ]
    cnn = CNN(3, 10)
    if train_on_gpu:
        cnn.cuda()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(cnn.parameters(), lr=FLAGS.learning_rate)

    for epoch in range(1, FLAGS.max_steps):
        class_correct = list(0. for i in range(10))
        class_total = list(0. for i in range(10))
        train_loss = 0.0
        test_loss = 0.0
        cnn.train()
        for data, target in train_loader:
            if train_on_gpu:
                data, target = data.cuda(), target.cuda()
            optimizer.zero_grad()
            output = cnn(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            _, pred = torch.max(output, 1)
            correct_tensor = pred.eq(target.data.view_as(pred))
            train_loss += loss.item() * data.size(0)
            correct = np.squeeze(
                correct_tensor.numpy()) if not train_on_gpu else np.squeeze(
                    correct_tensor.cpu().numpy())
            for i in range(len(target.data)):
                label = target.data[i]
                class_correct[label] += correct[i].item()
                class_total[label] += 1

        if epoch % FLAGS.eval_freq == 0:
            test_correct = list(0. for i in range(10))
            test_total = list(0. for i in range(10))
            cnn.eval()
            for data, target in test_loader:
                if train_on_gpu:
                    data, target = data.cuda(), target.cuda()
                output = cnn(data)
                _, pred = torch.max(output, 1)
                correct_tensor = pred.eq(target.data.view_as(pred))
                correct = np.squeeze(correct_tensor.numpy()
                                     ) if not train_on_gpu else np.squeeze(
                                         correct_tensor.cpu().numpy())
                loss = criterion(output, target)
                test_loss += loss.item() * data.size(0)
                for i in range(len(target.data)):
                    label = target.data[i]
                    test_correct[label] += correct[i].item()
                    test_total[label] += 1

            train_loss = train_loss / len(train_loader.dataset)
            test_loss = test_loss / len(test_loader.dataset)
            plot_epoch.append(epoch)
            plot_train_loss.append(train_loss)
            plot_test_loss.append(test_loss)
            print(
                'Epoch: {} \tTraining Loss: {:.6f} \tTest Loss: {:.6f}'.format(
                    epoch, train_loss, test_loss))

            percent_train = accuracy(class_correct, class_total) * 100
            percent_test = accuracy(test_correct, test_total) * 100
            plot_train_accuracy.append(percent_train)
            plot_test_accuracy.append(percent_test)
            print('train accuracy: ', percent_train, 'test accuracy: ',
                  percent_test)

    fig1 = plt.subplot(2, 1, 1)
    fig2 = plt.subplot(2, 1, 2)
    fig1.plot(plot_epoch,
              plot_train_accuracy,
              c='red',
              label='training data accuracy')
    fig1.plot(plot_epoch,
              plot_test_accuracy,
              c='blue',
              label='test data accuracy')
    fig1.legend()
    fig2.plot(plot_epoch, plot_train_loss, c='green', label='train CE loss')
    fig2.plot(plot_epoch, plot_test_loss, c='yellow', label='test CE loss')
    fig2.legend()
    plt.show()