Example #1
0
def main():

    img = Image.open(IMAGEPATH)
    transed_img = trans(img)

    n_channels = 3
    n_classes = 5

    test_model = CNN(n_channels, n_classes)

    model_dict = test_model.load_state_dict(torch.load(MODELPATH))

    transed_img = transed_img.view(1, 3, DROPSIZE, DROPSIZE)

    outputs = test_model(transed_img)

    _, predicted = torch.max(outputs.data, 1)

    # loss:
    criterion = nn.CrossEntropyLoss()
    loss = criterion(outputs, torch.from_numpy(np.array([2])))

    # if loss >= 1:
    #     print("Unknown")
    #     return
    if predicted == 0:
        print("Dorm")
    if predicted == 1:
        print("Gym")
    if predicted == 2:
        print("Library")
    if predicted == 3:
        print("Market")
    if predicted == 4:
        print("Teaching Building 1")
Example #2
0
def train(trainloader):
    """
    Performs training and evaluation of CNN model.
    NOTE: You should the model on the whole test set each eval_freq iterations.
    """
    # YOUR TRAINING CODE GOES HERE
    n_channels = 3
    n_classes = 5

    cnn = CNN(n_channels, n_classes)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    cnn.to(device)

    # Loss and Optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(cnn.parameters(), lr=LEARNING_RATE_DEFAULT)

    losses = []
    accuracies = []

    for epoch in range(MAX_EPOCHS_DEFAULT):
        timestart = time.time()
        running_loss = 0.0
        for step, (batch_x, batch_y) in enumerate(trainloader):

            # zero the parameter gradients
            optimizer.zero_grad()

            # Forward + Backward + Optimize

            batch_x, batch_y = batch_x.to(device), batch_y.to(device)

            outputs = cnn(batch_x)
            loss = criterion(outputs, batch_y)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

            if step % EVAL_FREQ_DEFAULT == EVAL_FREQ_DEFAULT - 1:
                print('[epoch: %d, step: %5d] loss: %.4f' %
                      (epoch, step, running_loss / EVAL_FREQ_DEFAULT))
                losses.append(running_loss / EVAL_FREQ_DEFAULT)
                running_loss = 0.0
                accu = accuracy(outputs, batch_y)
                accuracies.append(accu)
                print('Accuracy on the %d train images: %.3f %%' %
                      (batch_y.size(0), accu))

        break

        print('epoch %d cost %3f sec' % (epoch, time.time() - timestart))

    print('---Finished Training---')

    return cnn, losses, accuracies
def train(config):
    """
    Performs training and evaluation of MLP model.
    NOTE: You should the model on the whole test set each eval_freq iterations.
    """
    # YOUR TRAINING CODE GOES HERE
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    train_data = CIFAR10(DATA_DIR_DEFAULT,
                         train=True,
                         download=True,
                         transform=transform)
    data_loader = DataLoader(train_data, batch_size=config.batch_size)

    model = CNN(3, 10)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

    for step, (batch_inputs, batch_targets) in enumerate(data_loader):
        # print(batch_inputs.size())
        hit = 0
        n, dim, _, __ = batch_inputs.size()

        # for i in range(n):
        #     temp_x = torch.unsqueeze(batch_inputs[i], 0)
        #     print(temp_x.size())
        #     y_pre = model.forward(temp_x)
        y_pre = model.forward(batch_inputs)
        for i in range(n):
            y_ev, _ = max(enumerate(y_pre[i]), key=itemgetter(1))
            y = batch_targets[i].item()
            if y_ev == y:
                hit += 1

        torch.nn.utils.clip_grad_norm(model.parameters(), max_norm=10)

        # Add more code here ...
        loss = criterion(y_pre, batch_targets)  # fixme
        accuracy = hit / n * 100  # fixme

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if step % config.eval_freq == 0:
            print("loss: ", loss.item())
            print("accuracy: ", accuracy)

        if step == config.max_steps:
            # If you receive a PyTorch data-loader error, check this bug report:
            # https://github.com/pytorch/pytorch/pull/9655
            break

    print('Done training.')
def main(args):
    transform = transforms.ToTensor()
    mode = args.mode
    train_loader = torch.utils.data.DataLoader(
      UrbanSound8KDataset('UrbanSound8K_train.pkl', mode),
      batch_size=args.batch_size,
      shuffle=True,
      num_workers=args.worker_count,
      pin_memory=True
    )
    val_loader = torch.utils.data.DataLoader(
     UrbanSound8KDataset('UrbanSound8K_test.pkl', mode),
     batch_size=args.batch_size,
     shuffle=True,
     num_workers=args.worker_count,
     pin_memory=True
    )

    ## Build a model based on mode
    if args.mode == 'MLMC':
        model = CNN(height=145, width=41, channels=1, class_count=10, dropout=args.dropout,mode = args.mode)
    else:
        model = CNN(height=85, width=41, channels=1, class_count=10, dropout=args.dropout,mode = args.mode)

    ## Redefine the criterion to be softmax cross entropy
    criterion = nn.CrossEntropyLoss()

    ## Use adam optimizer. AdamW is Adam with L-2 regularisation.
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)

    log_dir = get_summary_writer_log_dir(args)
    print(f"Writing logs to {log_dir}")
    summary_writer = SummaryWriter(
            str(log_dir),
            flush_secs=5
    )
    trainer = Trainer(
        model, train_loader, val_loader, criterion, optimizer, summary_writer,
        DEVICE, args.checkpoint_path, checkpoint_frequency = args.checkpoint_frequency
    )

    trainer.train(
        args.epochs,
        args.val_frequency,
        print_frequency=args.print_frequency,
        log_frequency=args.log_frequency,
    )

    summary_writer.close()
Example #5
0
    def __init__(self):
        self.session = tf.Session()
        self.model = CNN()
        self.session.run(tf.global_variables_initializer())

        saver = tf.train.Saver()
        saver.restore(sess=self.session, save_path=SAVE_DIR_MNIST)
Example #6
0
def buildCNN(X,
             Y,
             layers,
             activation="relu",
             loss_type="softmax",
             optimizer="adam",
             regularization=None,
             batch_size=64,
             padding='SAME',
             learning_rate=1e-3,
             iteration=10000,
             batch_norm=False,
             drop_out=False,
             drop_out_rate=0):
    '''
	Create a CNN object given the model specifications

	input: Details can be found in CNN class of cnn_model.py
	return: CNN model
	'''

    model = CNN(X, Y, layers, activation, loss_type, optimizer, regularization,
                batch_size, padding, learning_rate, iteration, batch_norm,
                drop_out, drop_out_rate)

    return model
    def initialize(self, is_load_other_model, other_model_name=""):

        self._model = CNN()

        if (is_load_other_model):
            serializers.load_npz(other_model_name, self.model)

        self._optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)
    def init_model(self):
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=config)
        self.config = CNNConfig()
        self.cnn = CNN(self.config)
        # self.cnn.setVGG16()

        print('Loading model from file:', self.model_path)
        saver = tf.train.import_meta_graph(self.model_path + '.meta')
        saver.restore(self.sess, self.model_path)
        self.graph = tf.get_default_graph()
        # 从图中读取变量
        self.input_x = self.graph.get_operation_by_name("input_x").outputs[0]
        self.labels = self.graph.get_operation_by_name("labels").outputs[0]
        self.dropout_keep_prob = self.graph.get_operation_by_name(
            "dropout_keep_prob").outputs[0]
        self.score = self.graph.get_operation_by_name('score/Relu').outputs[0]
        self.prediction = self.graph.get_operation_by_name(
            "prediction").outputs[0]
        self.training = self.graph.get_operation_by_name("training").outputs[0]
Example #9
0
def test(model_name='model.pkl'):
    cnn = CNN()
    cnn.eval()
    cnn.load_state_dict(torch.load(model_name))
    print('load cnn net.')

    test_dataloader = dataset.get_test_data_loader()

    correct = 0
    total = 0
    for i, (images, labels) in enumerate(test_dataloader):
        image = images
        vimage = Variable(image)
        predict_label = cnn(vimage)

        chars = ''
        for i in range(setting.MAX_CAPTCHA):
            chars += setting.ALL_CHAR_SET[np.argmax(
                predict_label[0, i * setting.ALL_CHAR_SET_LEN:(i + 1) *
                              setting.ALL_CHAR_SET_LEN].data.numpy())]

        predict_label = chars
        true_label = one_hot.decode(labels.numpy()[0])
        total += labels.size(0)

        if (predict_label == true_label):
            correct += 1
        else:
            print('Predict:' + predict_label)
            print('Real   :' + true_label)
        if (total % 200 == 0):
            print('Test Accuracy of the model on the %d test images: %f %%' %
                  (total, 100 * correct / total))
    print('Test Accuracy of the model on the %d test images: %f %%' %
          (total, 100 * correct / total))
Example #10
0
def main():
    cnn = CNN()
    cnn.eval()
    cnn.load_state_dict(torch.load('model/1500_model.pkl'))
    print("load cnn net.")

    predict_dataloader = my_dataset.get_predict_data_loader()

    for i, (images, labels) in enumerate(predict_dataloader):
        image = images
        vimage = Variable(image)
        predict_label = cnn(vimage)

        c0 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, 0:captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        c1 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, captcha_setting.ALL_CHAR_SET_LEN:2 *
                          captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        c2 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, 2 * captcha_setting.ALL_CHAR_SET_LEN:3 *
                          captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        c3 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, 3 * captcha_setting.ALL_CHAR_SET_LEN:4 *
                          captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]

        c = '%s%s%s%s' % (c0, c1, c2, c3)
    return c
def train_cnn_model(emb_layer, x_train, y_train, x_val, y_val, opt):
    model = CNN(embedding_layer=emb_layer,
                num_words=opt.n_words,
                embedding_dim=opt.embed_dim,
                filter_sizes=opt.cnn_filter_shapes,
                feature_maps=opt.filter_sizes,
                max_seq_length=opt.sent_len,
                dropout_rate=opt.dropout_ratio,
                hidden_units=200,
                nb_classes=2).build_model()

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.Adam(),
                  metrics=['accuracy'])

    #     y_train = y_train.reshape(-1, 1)
    #     model = build_model(emb_layer, opt)
    print(model.summary())

    early_stopping = EarlyStopping(monitor='val_loss', patience=2)
    history = model.fit(x_train,
                        y_train,
                        epochs=opt.cnn_epoch,
                        batch_size=opt.batch_size,
                        verbose=1,
                        validation_data=(x_val, y_val),
                        callbacks=[early_stopping])

    with open("CNN_train_history.txt", "w") as f:
        print(history.history, file=f)
    return model
Example #12
0
def evaluation(method, dataset, user, device_source):
    log_name = 'log/cnn_%s_%s_evaluation.txt' % (dataset, method)
    if os.path.exists(log_name):
        os.remove(log_name)
    if user == 'mlsnrs':
        root_dir_prefix = '/home/mlsnrs/apks'
    elif user == 'shellhand':
        root_dir_prefix = '/mnt'
    save_feature_path = '%s/%s/mamadroid/%s/%s/%s_save_feature_list.csv' % (
        root_dir_prefix, device_source, dataset, method, method)
    save_feature_dict = get_save_feature_dict(save_feature_path)
    print('have read save_feature_dict: %d' % len(save_feature_dict))
    x_train, y_train = get_train_data(dataset, method, save_feature_dict,
                                      root_dir_prefix, device_source)
    print('x_train shape: %s y_train shape: %s' %
          (str(x_train.shape), str(y_train.shape)))
    start = time.time()
    print('start train')
    clf = CNN(layer_num=3, kernel_size=3, gpu_id=2)
    clf.fit(x_train, y_train, epoch=5, batch_size=500, lr=0.01)
    end = time.time()
    print('Training  model time used: %f s' % (end - start))
    #     torch.cuda.empty_cache()
    print(x_train.shape)
    y_pred = clf.predict(x_train, batch_size=20)
    print(y_pred.shape)
    cm = confusion_matrix(y_train, np.int32(y_pred >= 0.5))
    TP = cm[1][1]
    FP = cm[0][1]
    TN = cm[0][0]
    FN = cm[1][0]
    F1 = float(2 * TP) / (2 * TP + FN + FP)
    print('train data TP FP TN FN F1: %d %d %d %d %.4f' % (TP, FP, TN, FN, F1))
    with open(log_name, 'a') as f:
        f.write('train data TP FP TN FN F1: %d %d %d %d %.4f\n' %
                (TP, FP, TN, FN, F1))
    x_train = []
    y_train = []
    for test_id in range(0, 1):  #13):
        x_test, y_test = get_test_data(dataset, test_id, method,
                                       save_feature_dict, root_dir_prefix,
                                       device_source)
        print('x_test shape: %s y_test shape: %s' %
              (str(x_test.shape), str(y_test.shape)))
        y_pred = clf.predict(x_test, batch_size=500)
        #         y_pred = classify(y_pred)
        cm = confusion_matrix(y_test, y_pred)
        TP = cm[1][1]
        FP = cm[0][1]
        TN = cm[0][0]
        FN = cm[1][0]
        F1 = float(2 * TP) / (2 * TP + FN + FP)
        print('test_id %d TP FP TN FN F1: %d %d %d %d %.4f' %
              (test_id, TP, FP, TN, FN, F1))
        with open(log_name, 'a') as f:
            f.write('test_id %d TP FP TN FN F1: %d %d %d %d %.4f\n' %
                    (test_id, TP, FP, TN, FN, F1))
Example #13
0
def main():
    args = parse_args()
    twitter_csv_path = args.tweet_csv_file
    device_type = args.device
    use_bert = False
    shuffle = False
    train_data, dev_data, test_data = load_twitter_data(twitter_csv_path, test_split_percent=0.1, val_split_percent=0.2, overfit=True, shuffle=shuffle, use_bert=use_bert, overfit_val=12639)
    vocab_size = train_data.vocab_size
    print(vocab_size)
    print(train_data.length)
    print(dev_data.length)
    print(test_data.length)
    cnn_net = CNN(vocab_size, DIM_EMB=300, NUM_CLASSES = 2)
    if device_type == "gpu" and torch.cuda.is_available():
        device = torch.device('cuda:0')
        cnn_net = cnn_net.cuda()
        epoch_losses, eval_accuracy = train_network(cnn_net,
                                        train_data.Xwordlist,
                                        (train_data.labels + 1.0)/2.0,
                                        10, dev_data, lr=0.003,
                                        batchSize=150, use_gpu=True, device=device)
        cnn_net.eval()
        print("Test Set")
        test_accuracy = eval_network(test_data, cnn_net, use_gpu=True, device=device)

    else:
        device = torch.device('cpu')
        epoch_losses, eval_accuracy = train_network(cnn_net,
                                        train_data.Xwordlist,
                                        (train_data.labels + 1.0)/2.0,
                                        10, dev_data, lr=0.003,
                                        batchSize=150, use_gpu=False, device=device)
        cnn_net.eval()
        print("Test Set")
        test_accuracy = eval_network(test_data, cnn_net, use_gpu=False, batch_size=batchSize, device=device)

    # plot_accuracy((min_accs, eval_accuracy, max_accs), "Sentiment CNN lr=0.001", train_data.length)
    plot_accuracy(eval_accuracy, "Sentiment CNN lr=0.003", train_data.length)
    plot_losses(epoch_losses, "Sentiment CNN lr=0.003", train_data.length)
    torch.save(cnn_net.state_dict(), "saved_models\\cnn.pth")
    np.save("cnn_train_loss_" + str(train_data.length) +  ".npy", np.array(epoch_losses))
    np.save("cnn_validation_accuracy_" + str(train_data.length) +  ".npy", np.array(eval_accuracy))
def train(args):
    """Train model"""
    data = CIFAR10(args.batch_size, TRAIN_FILES)

    # create save directory if it does not already exist
    if not os.path.isdir(args.save_dir):
        os.makedirs(args.save_dir)

    print('Initializing model...')
    images = tf.placeholder(tf.float32, [None, 32, 32, 3], 'input_images')
    distorted = distort_images(images)
    model = CNN(distorted, learning_rate=args.learning_rate)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()

        print('Starting training...')
        for n in range(args.num_epochs):

            for i in range(data.n_batches):
                start = time.time()
                x, y = data.next_batch()
                loss, _ = sess.run([model.loss, model.train_step],
                                   feed_dict={
                                       images: x,
                                       model.labels: y
                                   })
                end = time.time()
                print('{}/{} (epoch {}), train_loss={:.3f}, time/batch={:.3f}'.
                      format(n * data.n_batches + i,
                             args.num_epochs * data.n_batches, n, loss,
                             end - start))

            checkpoint_path = os.path.join(args.save_dir, 'model.ckpt')
            saver.save(sess, checkpoint_path, global_step=n * data.n_batches)
            print("model saved to {}".format(checkpoint_path))
Example #15
0
def main():
    cnn = CNN()
    cnn.eval()
    cnn.load_state_dict(torch.load('model/1500_model.pkl'))
    print("load cnn net.")

    test_dataloader = my_dataset.get_test_data_loader()

    correct = 0
    total = 0
    error = []
    true = []
    for i, (images, labels) in enumerate(test_dataloader):
        image = images
        vimage = Variable(image)
        predict_label = cnn(vimage)

        c0 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, 0:captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        c1 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, captcha_setting.ALL_CHAR_SET_LEN:2 *
                          captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        c2 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, 2 * captcha_setting.ALL_CHAR_SET_LEN:3 *
                          captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        c3 = captcha_setting.ALL_CHAR_SET[np.argmax(
            predict_label[0, 3 * captcha_setting.ALL_CHAR_SET_LEN:4 *
                          captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
        predict_label = '%s%s%s%s' % (c0, c1, c2, c3)
        true_label = one_hot_encoding.decode(labels.numpy()[0])
        print("true_label: ", true_label)
        print("predict_lable: ", predict_label, "\n")
        total += labels.size(0)
        if (predict_label == true_label):
            correct += 1
        else:
            error.append(predict_label)
            true.append(true_label)
        if (total % 200 == 0):
            print('测试集数量:%d, 准确率 : %f %%' % (total, 100 * correct / total))
    print('测试集数量:%d, 准确率 : %f %%' % (total, 100 * correct / total))
    print('预测错误例子:\n')
    print('正确字符:', true)
    print('错误字符:', error)
def train_baseline_cnn(emb_layer, x_train, y_train, x_val, y_val, opt):
    model = CNN(embedding_layer=emb_layer,
                num_words=opt.transfer_n_words,
                embedding_dim=opt.baseline_embed_dim,
                filter_sizes=opt.cnn_filter_shapes,
                feature_maps=opt.filter_sizes,
                max_seq_length=opt.baseline_sent_len,
                dropout_rate=opt.baseline_drop_out_ratio,
                hidden_units=200,
                nb_classes=2).build_model()

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.Adam(),
                  metrics=['accuracy'])

    #     y_train = y_train.reshape(-1, 1)
    #     model = build_model(emb_layer, opt)
    print(model.summary())
    tb_call_back = TensorBoard(log_dir=f'{opt.tbpath}/baseline_cnn_{time()}',
                               histogram_freq=1,
                               write_graph=True,
                               write_images=True)

    checkpoint = ModelCheckpoint("baseline_cnn.h5",
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=False,
                                 mode='auto',
                                 period=1)
    early_stopping = EarlyStopping(monitor='val_loss', patience=2)
    history = model.fit(x_train,
                        y_train,
                        epochs=opt.baseline_epochs,
                        batch_size=opt.baseline_batchsize,
                        verbose=1,
                        validation_data=(x_val, y_val),
                        callbacks=[early_stopping, tb_call_back, checkpoint])

    with open("CNN_train_baseline_history.txt", "w") as f:
        print(history.history, file=f)
    return model
Example #17
0
def train(model_name='model.pkl'):
    cnn = CNN()
    cnn.train()
    print('init net')
    criterion = nn.MultiLabelSoftMarginLoss()
    optimizer = torch.optim.Adam(cnn.parameters(),
                                 lr=setting.TRAIN_LEARNING_RATE)

    # Train the Model
    train_dataloader = dataset.get_train_data_loader()
    for epoch in range(setting.TRAIN_NUM_EPOCHS):
        for i, (images, labels) in enumerate(train_dataloader):
            images = Variable(images)
            labels = Variable(labels.float())
            predict_labels = cnn(images)
            loss = criterion(predict_labels, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('epoch: % -3s loss: %s' % (epoch, loss.item()))
    torch.save(cnn.state_dict(), model_name)  # current is model.pkl
    print('save last model')
Example #18
0
def recognize(model_name='model.pk'):
    cnn = CNN()
    cnn.eval()
    cnn.load_state_dict(torch.load(model_name))
    # print(load cnn net.)
    # NUM_LEN = len(setting.NUMBER)

    captcha_dataloader = dataset.get_captcha_data_loader()
    code = ''
    images = {}
    for image, label in captcha_dataloader:
        images[label] = image
    images = [images[key] for key in sorted(images)]
    for image in images:
        vimage = Variable(image)
        predict_label = cnn(vimage)

        for i in range(setting.MAX_CAPTCHA):
            code += setting.ALL_CHAR_SET[np.argmax(
                predict_label[0, i * setting.ALL_CHAR_SET_LEN:(i + 1) *
                              setting.ALL_CHAR_SET_LEN].data.numpy())]

    return code
Example #19
0
def get_model(conf, data_helper, model_name):
    # retrieve model configurations
    max_epoch = conf.max_epoch
    num_negatives = conf.num_negatives
    batch_size_p = conf.batch_size_p
    eval_topk = conf.eval_topk
    optimizer = conf.optimizer
    loss = conf.loss
    user_dim = conf.user_dim
    item_dim = conf.item_dim
    data_spec = data_helper.data_spec
    user_count = data_spec.user_count
    item_count = data_spec.item_count
    word_count = data_spec.word_count
    emb_normalization = conf.emb_normalization
    max_content_len = data_spec.max_content_len
    support_groupping_for_all = True  # provide general speed-up

    # standard input & output of the model
    input_dtype = 'int32'
    row_cidx_prefx = tf.Variable(np.arange(batch_size_p, \
        dtype=input_dtype).reshape((batch_size_p, 1)))
    uid = Input(shape=(1,), dtype=input_dtype)
    cid = Input(shape=(1,), dtype=input_dtype)
    U_emb_given = Input(shape=(user_dim,), dtype='float32')
    C_emb_given = Input(shape=(item_dim,), dtype='float32')
    # unique cid, in two Lambda thanks to Keras, dummy
    cid_u = Lambda(lambda x: tf.reshape(tf.unique(x)[0], (-1, 1)), 
                   output_shape=(1,))(Reshape(())(cid))
    cid_x = Lambda(lambda x: tf.reshape(tf.unique(x)[1], (-1, 1)),
                   output_shape=(1,))(Reshape(())(cid))

    # retrieve content
    with tf.device('/cpu:0'):
        C = tf.Variable(data_helper.data['C'])
        get_content = lambda x: tf.reshape(tf.gather(C, x), 
                                           (-1, max_content_len))
        content = Lambda(get_content, output_shape=(max_content_len, ))(cid)
        content_u = Lambda(get_content, output_shape=(max_content_len, ))(cid_u)

    # user embedding: U_emb, U_emb_front (first batch_size_p)
    Emb_U = Embedding(user_count, user_dim, name='user_embedding',
                      activity_regularizer=activity_l2(conf.u_reg))
    U_emb = Reshape((user_dim, ))(Emb_U(uid))
    if emb_normalization:
        U_emb = Lambda(lambda x: tf.nn.l2_normalize(x, dim=-1))(U_emb)
    uid_front = Lambda(lambda x: x[:batch_size_p])(uid)  # thanks keras, dummy
    U_emb_front = Reshape((user_dim, ))(Emb_U(uid_front))

    # item embedding: C_emb_compact (no duplication), C_emb
    get_item_emb_combined_pretrain = ItemCombination().get_model()
    if model_name == 'pretrained':
        if conf.evaluation_mode:
            Emb_U = Embedding(user_count, user_dim, trainable=False,
                              weights=[conf.pretrain['user_emb']])
            U_emb = Reshape((user_dim, ))(Emb_U(uid))
            Emb_C = Embedding(item_count, item_dim, trainable=False,
                              weights=[conf.pretrain['item_emb']])
            C_emb = Reshape((item_dim, ))(Emb_C(cid))
        else:
            if conf.pretrain['transform']:
                C_emb = get_item_emb_combined_pretrain(None, cid, conf, data_spec)
            else:
                Emb_C = Embedding(item_count, item_dim, trainable=False,
                                  weights=[data_spec.C_pretrain])
                C_emb = Reshape((item_dim, ))(Emb_C(cid))
        C_emb_compact = C_emb
    elif model_name == 'mf':
        Emb_C = Embedding(item_count, item_dim, name='item_embedding')
        C_emb = Reshape((item_dim, ))(Emb_C(cid))
        C_emb_compact = C_emb
    else:
        if model_name == 'basic_embedding':
            Content_model = MeanPool(data_spec, conf).get_model()
        elif model_name == 'cnn_embedding':
            Content_model = CNN(data_spec, conf).get_model()
        elif model_name == 'rnn_embedding':
            Content_model = RNN(data_spec, conf).get_model()
        else:
            assert False, '[ERROR] Model name {} unknown'.format(model_name)
        C_emb_compact = Content_model([content_u, cid_u])  # (None, item_dim)
        C_emb_compact = get_item_emb_combined_pretrain(C_emb_compact, cid_u, \
            conf, data_spec) # (None, item_dim)
        # C_emb_u only computes unique set of items, no duplication
        C_emb_u = Lambda( \
            lambda x: tf.reshape(tf.gather(x[0], x[1]), (-1, item_dim)), \
            output_shape=(item_dim, ))([C_emb_compact, cid_x])
        if support_groupping_for_all:
            C_emb = C_emb_u
        else:  # otherwise only support groupping for group_neg_shared
            C_emb = Content_model([content, cid])  # (None, item_dim)
        if emb_normalization:
            C_emb_compact = Lambda(lambda x: tf.nn.l2_normalize(x, dim=-1))(C_emb_compact)
            C_emb = Lambda(lambda x: tf.nn.l2_normalize(x, dim=-1))(C_emb)
    
    # item embedding more: C_emb_front, C_emb_back
    cid_front = Lambda(lambda x: x[:batch_size_p])(cid)
    cid_back = Lambda(lambda x: x[batch_size_p:])(cid)
    C_emb_front = Lambda(lambda x: x[:batch_size_p])(C_emb)
    C_emb_back = Lambda(lambda x: x[batch_size_p:])(C_emb)

    # interact (with or without bias)
    Interact = InteractionDot(bias=conf.interaction_bias, 
                              user_count=user_count, item_count=item_count)

    pred_score = Interact.set_form('mul')([U_emb, C_emb, uid, cid])

    pred_score_with_given = Interact.set_form('mul')([U_emb_given, C_emb_given,
                                                      uid, cid])

    pred_score_neg_shared = Interact.set_form('matmul')([U_emb, C_emb, 
                                                         uid, cid])

    pred_score_neg_shared_comp = Interact.set_form('matmul')([ \
        U_emb, C_emb_compact, uid, cid_u])
    pos_idxs = tf.concat([row_cidx_prefx, \
        tf.reshape(cid_x, (-1, 1))], 1)  # (batch_size_p, 2)
    loss_neg_shared_comp = get_group_neg_shared_loss( \
        pred_score_neg_shared_comp, pos_idxs, loss, batch_size_p, conf)

    pred_pos_sampled_neg_shared = Interact.set_form('mul')([ \
        U_emb_front, C_emb_front, uid_front, cid_front])  # (batch_size_p, 1)
    pred_neg_sampled_neg_shared = Interact.set_form('matmul')([ \
        U_emb_front, C_emb_back, uid_front, cid_back])  # (batch_size_p, num_negatives)
    pred_score_sampled_neg_shared = Lambda(lambda x: tf.concat([x[0], x[1]], 1))( \
        [pred_pos_sampled_neg_shared, pred_neg_sampled_neg_shared])

    # uid-cid element-wise interaction
    # during training, first batch_size_p assumed positive
    model = Model(input=[uid, cid], output=[pred_score])
    model.compile(optimizer=optimizer, \
        loss=get_original_loss(loss, batch_size_p, num_negatives, conf))

    # uid-cid complete pairwise interaction (produce prediction matrix)
    # during training, diag is assumed positive
    model_neg_shared = Model(input=[uid, cid], output=[pred_score_neg_shared])
    model_neg_shared.compile(optimizer=optimizer, \
        loss=get_neg_shared_loss(loss, batch_size_p, conf))

    # uid and compacted cid complete pairwise interactions
    model_group_neg_shared = Model(input=[uid, cid], \
        output=[pred_score_neg_shared_comp])
    model_group_neg_shared.compile(optimizer=optimizer, \
        loss=lambda y_true, y_pred: loss_neg_shared_comp)  # dummy
    
    # sampled negatives are shared
    # first batch_size_p pairs are positive ones, 
    # uid[:batch_size_p] and cid[batch_size_p:] are negative links
    model_sampled_neg_shared = Model(input=[uid, cid], \
        output=[pred_score_sampled_neg_shared])
    model_sampled_neg_shared.compile(optimizer=optimizer, \
        loss=get_sampled_neg_shared_loss(loss, batch_size_p, 
                                         num_negatives, conf))

    # test efficient methods with given (uid, cid) pairs
    model_user_emb = Model(input=[uid], output=[U_emb])
    model_item_emb = Model(input=[cid], output=[C_emb])
    model_pred_pairs = Model(input=[U_emb_given, C_emb_given, uid, cid], \
        output=[pred_score_with_given])

    # construct models for monitoring all types of losses during training
    def get_all_losses(input, output, loss):
        model_all_loss = {'skip-gram': None, 'mse': None,
                          'log-loss': None, 'max-margin': None}
        for lname in model_all_loss:
            from keras.optimizers import SGD
            m = Model(input=input, output=output)
            m.compile(optimizer=SGD(0.), loss=loss)
            model_all_loss[lname] = m
        return model_all_loss

    model_all_loss = get_all_losses([uid, cid], [pred_score], \
        get_original_loss(loss, batch_size_p, num_negatives, conf))
    model_neg_shared_all_loss = get_all_losses([uid, cid], \
        [pred_score_neg_shared], \
        get_neg_shared_loss(loss, batch_size_p, conf))

    model_dict = {'model': model,
                  'model_neg_shared': model_neg_shared,
                  'model_group_neg_shared': model_group_neg_shared,
                  'model_sampled_neg_shared': model_sampled_neg_shared,
                  'model_user_emb': model_user_emb,
                  'model_item_emb': model_item_emb,
                  'model_pred_pairs': model_pred_pairs,
                  'model_all_loss': model_all_loss,
                  'model_neg_shared_all_loss': model_neg_shared_all_loss
                  }

    return model_dict


if __name__ == '__main__':
    train_data_dir = 'data_scaled/'
    validation_data_dir = 'data_scaled_validation/'
    nb_train_samples = 1763
    nb_validation_samples = 194
    epochs = 100
    batch_size = 16

    mimg = MoleImages()
    X_test, y_test = mimg.load_test_images('data_scaled_test/benign',
                                            'data_scaled_test/malign')

    mycnn = CNN()
    train_datagen = ImageDataGenerator(
    vertical_flip=True,
    horizontal_flip=True)
    test_datagen = ImageDataGenerator()

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(128, 128),
        batch_size=batch_size,
        class_mode='binary')

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(128, 128),
        batch_size=batch_size,
Example #21
0
def train_model(embedding_size, hidden_size, filter_width, max_or_mean,
                max_num_epochs, batch_size, learning_rate, loss_margin,
                training_checkpoint, dropout_prob, eval_batch_size):
    global load_model_path, train_data, source_questions
    global dev_data, dev_label_dict, test_data, test_label_dict
    global dev_pos_data, dev_neg_data, test_pos_data, test_neg_data, target_questions

    # Generate model
    cnn = CNN(embedding_size, hidden_size, filter_width, max_or_mean,
              dropout_prob)
    optimizer = optim.Adam(cnn.parameters(), lr=learning_rate)
    criterion = nn.MultiMarginLoss(margin=loss_margin)
    init_epoch = 1

    # Load model
    if load_model_path is not None:
        print("Loading model from \"" + load_model_path + "\"...")
        init_epoch = load_model(load_model_path, cnn, optimizer)

    # Training
    print("***************************************")
    print("Starting run with following parameters:")
    print(" --embedding size:   %d" % (cnn.input_size))
    print(" --hidden size:      %d" % (cnn.hidden_size))
    print(" --filter width:     %d" % (cnn.n))
    print(" --dropout:          %f" % (cnn.dropout_prob))
    print(" --pooling:          %s" % (cnn.max_or_mean))
    print(" --initial epoch:    %d" % (init_epoch))
    print(" --number of epochs: %d" % (max_num_epochs))
    print(" --batch size:       %d" % (batch_size))
    print(" --learning rate:    %f" % (learning_rate))
    print(" --loss margin:      %f" % (loss_margin))

    start = time.time()
    current_loss = 0

    for iter in range(init_epoch, max_num_epochs + 1):
        current_loss += train(cnn, criterion, optimizer, train_data,
                              source_questions, batch_size, 21)
        if iter % training_checkpoint == 0:
            print("Epoch %d: Average Train Loss: %.5f, Time: %s" %
                  (iter,
                   (current_loss / training_checkpoint), timeSince(start)))
            d_auc = evaluate_auc(cnn, dev_pos_data, dev_neg_data,
                                 target_questions, eval_batch_size)
            t_auc = evaluate_auc(cnn, test_pos_data, test_neg_data,
                                 target_questions, eval_batch_size)
            print("Dev AUC(0.05): %.2f" % (d_auc))
            print("Test AUC(0.05): %.2f" % (t_auc))

            current_loss = 0

            if SAVE_MODEL:
                state = {}
                state["model"] = cnn.state_dict()
                state["optimizer"] = optimizer.state_dict()
                state["epoch"] = iter
                save_model(save_model_path, "cnn_dt", state,
                           iter == max_num_epochs)

    # Compute final results
    print("-------")
    print("FINAL RESULTS:")
    d_auc = evaluate_auc(cnn, dev_pos_data, dev_neg_data, target_questions,
                         eval_batch_size)
    t_auc = evaluate_auc(cnn, test_pos_data, test_neg_data, target_questions,
                         eval_batch_size)
    print("Training time: %s" % (timeSince(start)))
    print("Dev AUC(0.05): %.2f" % (d_auc))
    print("Test AUC(0.05): %.2f" % (t_auc))

    if SAVE_MODEL:
        state = {}
        state["model"] = cnn.state_dict()
        state["optimizer"] = optimizer.state_dict()
        state[
            "epoch"] = max_num_epochs if init_epoch < max_num_epochs else init_epoch
        save_model(save_model_path, "cnn", state, True)

    return (d_auc, t_auc)
Example #22
0
def main(arg):
    # build vocab
    CSV_PATH = './MSR_Video_Description_Corpus.csv'
    data = MSVD_Caption(CSV_PATH)
    captions = data['Description'].values
    del data
    vocabs = Vocabs(list(replace_map(captions)))
    # build model
    cnn = CNN(arg.net) if arg.net else CNN()
    cnn.load_weights(arg.cnn_weight_path)
    rnn = CaptionGenerator(n_words=vocabs.n_words,
                           batch_size=1,
                           dim_feature=1280,
                           dim_hidden=500,
                           n_video_lstm=80,
                           n_caption_lstm=20,
                           bias_init_vector=vocabs.bias_init_vector)
    rnn.load_weights(arg.rnn_weight_path, by_name=True)
    # extract video features
    print('extract %s video features' % (arg.video_path))
    this_features = []
    if arg.video_path.endswith('.avi'):
        cap = cv2.VideoCapture(arg.video_path)
        flame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)  # 视频总帧数
        if flame_count > rnn.n_video_lstm:  # 高于要求的帧数均匀采样
            select_flames = np.linspace(0,
                                        flame_count,
                                        num=rnn.n_video_lstm,
                                        dtype=np.int32)
        else:
            select_flames = np.arange(0, flame_count, dtype=np.int32)
        print('flame count:', flame_count, 'select: ', select_flames[:10])
        flames = []
        i, flame_index, selected_num = 0, 0, 0
        while True:
            ret, flame = cap.read()
            if ret is False:
                break
            if i == select_flames[flame_index]:
                flame_index += 1
                selected_num += 1
                flame = preprocess_image(flame)
                flames.append(flame)
            if selected_num == 64:
                selected_num = 0
                this_features.append(cnn.get_features(np.array(flames)))
                flames = []
            i += 1
            if i == flame_count and flames:
                this_features.append(cnn.get_features(np.array(flames)))
    else:
        raise ValueError("only support .avi video")
    #  generator caption
    this_features = np.concatenate(this_features, axis=0)
    this_feature_nums, dims_feature = this_features.shape
    if this_feature_nums < rnn.n_video_lstm:  # 小于需要的帧数则填充0
        this_features = np.vstack([
            this_features,
            np.zeros(shape=(rnn.n_video_lstm - this_feature_nums,
                            dims_feature))
        ])
    if this_feature_nums > rnn.n_video_lstm:  # 大于指定帧数则使用均匀采样
        selected_idxs = np.linspace(0, this_feature_nums, num=rnn.n_video_lstm)
        this_features = this_features[selected_idxs, :]
    generator = rnn.predict(this_features.reshape(1, *this_features.shape))
    captions = []
    for i, gen_caption in enumerate(generator):
        sent = []
        for ii in gen_caption:
            if ii == vocabs.word2idx['<eos>']:
                break
            if ii == vocabs.word2idx['<pad>']:
                continue
            if ii != vocabs.word2idx['<bos>']:
                sent.append(vocabs.idx2word[ii])

        caption = ' '.join(sent)
        captions.append(caption)
    return captions
Example #23
0
    # change this according to your path
    path_to_train_set = "/home/tassos/Desktop/DATA_ASR/ASVspoof2017_V2_train_fbank"
    path_to_valid_set = "/home/tassos/Desktop/DATA_ASR/ASVspoof2017_V2_train_dev"

    model_id = get_model_id()
    # model_id = read_model_id
    n_tfiles=200 # how many train files will read per step
    n_vfiles=round(0.25*n_tfiles) # number of  validation files to be read per iter
    # print("a= \n")
    # print(n_vfiles)
    # cheat count files number
    total_inp_files = len(os.listdir(path_to_train_set))


    # Create the network
    network = CNN(model_id)
    iter=0
    # mf.input(network,n_tfiles,n_vfiles)
    # print(network.Xtrain_in)
    for i in range(1,total_inp_files,n_tfiles):
    # loop until all data are read

        mf.input(network,n_tfiles,n_vfiles)

        with tf.device('/gpu:0'):
            # restore()
            if(iter==0):
                # Define the train computation graph
                network.define_train_operations()

            # Train the network
Example #24
0
                                                    random_state=random_state)
        X_train = [X_train, X_train_c]
        X_val = [X_val, X_val_c]

    emb_layer = None
    if USE_GLOVE:
        emb_layer = create_glove_embeddings()

    model = CNN(
        embedding_layer=emb_layer,
        num_words=MAX_NUM_WORDS,
        embedding_dim=EMBEDDING_DIM,
        kernel_sizes=KERNEL_SIZES,
        feature_maps=FEATURE_MAPS,
        max_seq_length=MAX_SEQ_LENGTH,
        use_char=USE_CHAR,
        char_max_length=CHAR_MAX_LENGTH,
        alphabet_size=ALPHABET_SIZE,
        char_kernel_sizes=CHAR_KERNEL_SIZES,
        char_feature_maps=CHAR_FEATURE_MAPS,
        dropout_rate=DROPOUT_RATE,
        hidden_units=HIDDEN_UNITS,
        nb_classes=NB_CLASSES
    ).build_model()

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.Adam(),
        metrics=['accuracy']
    )

    # model.summary()
Example #25
0
def main(args):
    load, save, train, evaluate = args.load, args.save, not args.no_train, not args.no_evaluate
    del args.load
    del args.save
    del args.no_train
    del args.no_evaluate

    print "Initializing Training Datasets..."
    if train:
        ubuntu_train_loader = DataLoader(
            UbuntuDataset(name='ubuntu', partition='train'),
            batch_size=args.batch_size,  # 20*n -> n questions.
            shuffle=False,
            num_workers=8,
            collate_fn=batchify,
        )

        # Note, Android train data isn't labeled.
        android_train_loader = DataLoader(
            UbuntuDataset(name='android', partition='train'),
            batch_size=args.batch_size,  # 20*n -> n questions.
            shuffle=False,
            num_workers=8,
            collate_fn=batchify,
        )

    print "Initializing Validation Datasets..."
    if evaluate:
        ubuntu_val_loader = DataLoader(
            UbuntuDataset(name='ubuntu', partition='dev'),
            batch_size=args.batch_size,  # 20*n -> n questions.
            shuffle=False,
            num_workers=8,
            collate_fn=batchify,
        )
        android_val_loader = DataLoader(
            UbuntuDataset(name='android', partition='dev'),
            batch_size=args.batch_size,  # 20*n -> n questions.
            shuffle=False,
            num_workers=8,
            collate_fn=batchify,
        )

    # MODELS

    dc_model = DomainClassifier(args.hidden_size)

    if args.model_type == 'lstm':
        print "----LSTM----"
        qr_model = LSTMRetrieval(args.input_size,
                                 args.hidden_size,
                                 args.num_layers,
                                 args.pool,
                                 batch_size=args.batch_size)
    elif args.model_type == 'cnn':
        print "----CNN----"
        qr_model = CNN(args.input_size,
                       args.hidden_size,
                       args.pool,
                       batch_size=args.batch_size)
    else:
        raise RuntimeError('Unknown --model_type')

    if load != '':
        print "Loading Model state from 'saved_models/{}'".format(load)
        qr_model.load_state_dict(torch.load(
            'saved_models/gen_{}'.format(load)))
        dc_model.load_state_dict(
            torch.load('saved_models/discrim_{}'.format(load)))

    # CUDA

    if torch.cuda.is_available():
        print "Using CUDA"
        for model in [dc_model, qr_model]:
            model = model.cuda()
            model.share_memory()

    # Loss functions and Optimizers
    # dc_criterion = nn.L1Loss() # TODO: Replace with actual.
    dc_criterion = nn.BCELoss()
    dc_optimizer = torch.optim.SGD(dc_model.parameters(), lr=args.dc_lr)

    qr_criterion = MaxMarginCosineSimilarityLoss()  # TODO...
    qr_optimizer = torch.optim.SGD(qr_model.parameters(), lr=args.qr_lr)
    if load != '' and train:
        print "Loading Optimizer state from 'saved_optimizers/{}'".format(load)
        qr_optimizer.load_state_dict(
            torch.load(
                'saved_optimizers/DA_Gen_Optimizer({}).pth'.format(load)))
        qr_optimizer.state = defaultdict(
            dict, qr_optimizer.state
        )  # https://discuss.pytorch.org/t/saving-and-loading-sgd-optimizer/2536/5
        dc_optimizer.load_state_dict(
            torch.load(
                'saved_optimizers/DA_Discrim_Optimizer({}).pth'.format(load)))
        dc_optimizer.state = defaultdict(dict, dc_optimizer.state)

    for epoch in xrange(args.epochs):
        if train:
            run_epoch(args,
                      ubuntu_train_loader,
                      android_train_loader,
                      qr_model,
                      qr_criterion,
                      qr_optimizer,
                      dc_model,
                      dc_criterion,
                      dc_optimizer,
                      epoch,
                      mode='train')
        if evaluate:
            if epoch % args.val_epoch == 0:
                run_epoch(args,
                          ubuntu_val_loader,
                          android_val_loader,
                          qr_model,
                          qr_criterion,
                          qr_optimizer,
                          dc_model,
                          dc_criterion,
                          dc_optimizer,
                          epoch,
                          mode='val')

    if save:
        print "Saving Gen Model state to 'saved_models/DA_Gen_Model({}).pth'".format(
            args)
        torch.save(qr_model.state_dict(),
                   'saved_models/DA_Gen_Model({}).pth'.format(args))
        print "Saving Gen Optimizer state to 'saved_optimizers/DA_Gen_Optimizer({}).pth'".format(
            args)
        torch.save(qr_optimizer.state_dict(),
                   'saved_optimizers/DA_Gen_Optimizer({}).pth'.format(args))

        print "Saving Discrim Model state to 'saved_models/DA_Discrim_Model({}).pth'".format(
            args)
        torch.save(dc_model.state_dict(),
                   'saved_models/DA_Discrim_Model({}).pth'.format(args))
        print "Saving Discrim Optimizer state to 'saved_optimizers/DA_Discrim_Optimizer({}).pth'".format(
            args)
        torch.save(
            dc_optimizer.state_dict(),
            'saved_optimizers/DA_Discrim_Optimizer({}).pth'.format(args))
Example #26
0
            })
            a.append(test_acc)
        print("test acc:{}".format(sum(a) / 350))
        logger.info("test acc:{}".format(sum(a) / 350))
        # saver.save(sess, save_path=SAVE_DIR)
    end_time = time.time()
    print("train takes %d Seconds" % (int(end_time) - int(start_time)))
    logger.info("train takes %d Seconds" % (int(end_time) - int(start_time)))


def evel(sess, data):
    print("evel acc of cnn>>>>")
    a = []
    b = []
    for i in range(350):
        timg, tlab = data.test.next_batch(50)
        loss, acc = sess.run([model.loss, model.acc],
                             feed_dict={
                                 model.input_x: timg,
                                 model.input_y: tlab,
                                 model.keep_prod: 1
                             })
        a.append(loss)
        b.append(acc)
    return sum(a) / 350, sum(b) / 350


if __name__ == "__main__":
    model = CNN()
    train()
class Predictor(object):
    def __init__(self, model_path):
        self.model_path = model_path
        self.class_name = [
            'Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'
        ]

    def init_model(self):
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=config)
        self.config = CNNConfig()
        self.cnn = CNN(self.config)
        # self.cnn.setVGG16()

        print('Loading model from file:', self.model_path)
        saver = tf.train.import_meta_graph(self.model_path + '.meta')
        saver.restore(self.sess, self.model_path)
        self.graph = tf.get_default_graph()
        # 从图中读取变量
        self.input_x = self.graph.get_operation_by_name("input_x").outputs[0]
        self.labels = self.graph.get_operation_by_name("labels").outputs[0]
        self.dropout_keep_prob = self.graph.get_operation_by_name(
            "dropout_keep_prob").outputs[0]
        self.score = self.graph.get_operation_by_name('score/Relu').outputs[0]
        self.prediction = self.graph.get_operation_by_name(
            "prediction").outputs[0]
        self.training = self.graph.get_operation_by_name("training").outputs[0]

    def predict(self, batch_x):
        feed_dict = {
            self.input_x: batch_x,
            self.dropout_keep_prob: 1.0,
            self.training: False
        }
        score, pre = self.sess.run([self.score, self.prediction], feed_dict)
        return score, pre

    def draw_confusion_matrix(self):
        # train_init_op, test_init_op, next_train_element, next_test_element = self.cnn.prepare_data()
        test_dataset = TextLineDataset(
            os.path.join('data', preprocess.FILTERED_TEST_PATH)).skip(1).batch(
                self.cnn.test_batch_size)
        # Create a reinitializable iterator
        test_iterator = test_dataset.make_one_shot_iterator()
        next_test_element = test_iterator.get_next()

        y_true = []
        y_pred = []
        test_loss = 0.0
        test_accuracy = 0.0
        test_precision = 0.0
        test_recall = 0.0
        test_f1_score = 0.0
        i = 0
        while True:
            try:
                lines = self.sess.run(next_test_element)
                batch_x, batch_y = self.cnn.convert_input(lines)
                feed_dict = {
                    self.input_x: batch_x,
                    self.labels: batch_y,
                    self.dropout_keep_prob: 1.0,
                    self.training: False
                }
                # loss, pred, true = sess.run([self.cnn.loss, self.cnn.prediction, self.cnn.labels], feed_dict)
                # 多次验证,取loss和score均值
                mean_score = 0
                for i in range(self.config.multi_test_num):
                    score = self.sess.run(self.score, feed_dict)
                    mean_score += score
                mean_score /= self.config.multi_test_num
                pred = self.sess.run(tf.argmax(mean_score, 1))
                y_pred.extend(pred)
                y_true.extend(batch_y)
                i += 1
            except tf.errors.OutOfRangeError:
                # 遍历完验证集,计算评估
                test_loss /= i
                test_accuracy = metrics.accuracy_score(y_true=y_true,
                                                       y_pred=y_pred)
                test_precision = metrics.precision_score(y_true=y_true,
                                                         y_pred=y_pred,
                                                         average='weighted')
                test_recall = metrics.recall_score(y_true=y_true,
                                                   y_pred=y_pred,
                                                   average='weighted')
                test_f1_score = metrics.f1_score(y_true=y_true,
                                                 y_pred=y_pred,
                                                 average='weighted')
                log = ('precision: %0.6f, recall: %0.6f, f1_score: %0.6f' %
                       (test_precision, test_recall, test_f1_score))
                print(log)

                cm = confusion_matrix(y_true, y_pred)
                print('Total samples:', np.sum(cm))
                cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]  # 归一化
                print('Confusion matrix:\n', cm)
                # 绘制混淆矩阵
                # ==============================================================
                fig, ax = plt.subplots()
                im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
                ax.figure.colorbar(im, ax=ax)
                # We want to show all ticks...
                ax.set(
                    xticks=np.arange(cm.shape[1]),
                    yticks=np.arange(cm.shape[0]),
                    # ... and label them with the respective list entries
                    xticklabels=self.class_name,
                    yticklabels=self.class_name,
                    title="Normalized confusion matrix",
                    ylabel='True label',
                    xlabel='Predicted label')

                # Rotate the tick labels and set their alignment.
                plt.setp(ax.get_xticklabels(),
                         rotation=45,
                         ha="right",
                         rotation_mode="anchor")

                # Loop over data dimensions and create text annotations.
                fmt = '.2f'
                thresh = cm.max() / 2.
                for i in range(cm.shape[0]):
                    for j in range(cm.shape[1]):
                        ax.text(
                            j,
                            i,
                            format(cm[i, j], fmt),
                            ha="center",
                            va="center",
                            color="white" if cm[i, j] > thresh else "black")
                fig.tight_layout()
                plt.savefig('./data/confusion_matrix.jpg')
                plt.show()
                # =====================================================================
                break

    def _detect_sentiment(self, detector, img):
        # 转为灰度图片
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = detector.detectMultiScale(image=gray,
                                          scaleFactor=1.1,
                                          minNeighbors=2,
                                          minSize=(30, 30),
                                          flags=0)
        if len(faces) != 0:
            batch_x = []
            for face in faces:
                x, y, w, h = face
                cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 1)
                # opencv的图像是[y, x]储存的
                # 裁剪并显示人脸部分
                img_cropped = cv2.resize(
                    gray[y:y + h,
                         x:x + w], (self.cnn.img_size, self.cnn.img_size))
                cv2.imshow('cropped', img_cropped)
                img_input = img_cropped.reshape(
                    [self.cnn.img_size, self.cnn.img_size, 1])
                batch_x.append(img_input)
            batch_x = np.stack(batch_x)
            mean_score = 0
            for i in range(self.config.multi_test_num):
                score, _ = self.predict(batch_x)
                mean_score += score
            mean_score /= self.config.multi_test_num
            pred = self.sess.run(tf.argmax(mean_score, 1))
            for i in range(len(faces)):
                # 给score显示条形图
                # =======================================================
                plt.bar(range(self.cnn.class_num),
                        mean_score[i],
                        align='center',
                        color='steelblue',
                        alpha=0.8)
                plt.ylabel('Score')
                plt.xticks(range(self.cnn.class_num), self.class_name)
                plt.show()
                # ========================================================
                cv2.putText(img=img,
                            text=self.class_name[pred[i]],
                            org=(faces[i][0], faces[i][1] + faces[i][3] + 20),
                            fontFace=cv2.FONT_HERSHEY_COMPLEX,
                            fontScale=0.6,
                            color=(0, 0, 255))
            return img
        else:
            return None

    def camera_detect(self):
        # 调用笔记本内置摄像头,所以参数为0,如果有其他的摄像头可以调整参数为1,2
        cam = cv2.VideoCapture(0)
        detector = cv2.CascadeClassifier(
            './data/haarcascade_frontalface_alt.xml')
        while True:
            # 从摄像头读取图片
            sucess, img = cam.read()
            if not sucess:
                continue
            img = self._detect_sentiment(detector, img)
            # 显示摄像头,背景是灰度。
            if img is not None:
                cv2.imshow("Sentiment Detection", img)
            # 保持画面的持续。
            k = cv2.waitKey(1)
            if k == 27:
                # 通过esc键退出摄像
                cv2.destroyAllWindows()
                break
            elif k == ord("s"):
                # 通过s键保存图片,并退出。
                cv2.imwrite("capture.jpg", img)
                cv2.destroyAllWindows()
                break
        # 关闭摄像头
        cam.release()

    def image_detect(self, img_path):
        img = cv2.imread(img_path)
        detector = cv2.CascadeClassifier(
            './data/haarcascade_frontalface_alt.xml')
        img = self._detect_sentiment(detector, img)
        cv2.imshow('Sentiment Detection Result', img)
        k = cv2.waitKey()
        if k == ord("s"):
            # 通过s键保存图片,并退出。
            cv2.imwrite("result.jpg", img)
        cv2.destroyAllWindows()
Example #28
0
def main():
    args = parse_args()
    # twitter_csv_path = args.tweet_csv_file
    labeled_twitter_csv_path = args.labeled_tweet_csv_file
    unlabeled_twitter_csv_path = args.unlabeled_tweet_csv_file

    device_type = args.device
    acquistion_function_type = args.acquisition_func
    human_label = args.human_label

    use_model_acq = True  #flag for using model to generate inputs for acquisition funciton
    if acquistion_function_type == "least_confidence":
        acquisition_func = least_confidence
    elif acquistion_function_type == "random":
        acquisition_func = random_score
    elif acquistion_function_type == "entropy":
        acquisition_func = entropy_score
    elif acquistion_function_type == "tweet_count":
        acquisition_func = tweet_count_norm
        use_model_acq = False
    else:
        acquisition_func = least_confidence

    seed_data_size = args.seed_data_size
    use_bert = False
    shuffle = False
    train_data, dev_data, test_data = load_twitter_data(
        labeled_twitter_csv_path,
        test_split_percent=0.1,
        val_split_percent=0.2,
        shuffle=shuffle,
        overfit=True,
        use_bert=use_bert,
        overfit_val=40000)
    unlabeled_tweets, ground_truth_labels = load_unlabeled_tweet_csv(
        unlabeled_twitter_csv_path, num_tweets=45000)

    #convert "unlabeled" tweets to token ids
    X_unlabeled = train_data.convert_text_to_ids(unlabeled_tweets)
    # ground_truth_labels = ground_truth_labels[0:70000]
    ground_truth_labels = (ground_truth_labels + 1.0) / 2.0

    X_seed = train_data.Xwordlist[0:seed_data_size]
    Y_seed = train_data.labels[0:seed_data_size]
    Y_seed = (Y_seed + 1.0) / 2.0

    print(train_data.vocab_size)
    print(len(X_seed))
    print(dev_data.length)
    print(test_data.length)
    num_samples = args.sample_size

    cnn_net = CNN(train_data.vocab_size, DIM_EMB=300, NUM_CLASSES=2)
    if device_type == "gpu" and torch.cuda.is_available():
        device = torch.device('cuda:0')
        cnn_net = cnn_net.cuda()
        epoch_losses, eval_accuracy, hand_labeled_data = train_active_learning(
            cnn_net,
            train_data,
            X_seed,
            Y_seed,
            X_unlabeled,
            ground_truth_labels,
            dev_data,
            use_model=use_model_acq,
            num_epochs=8,
            human_label=human_label,
            acquisition_func=acquisition_func,
            lr=0.0035,
            batchSize=150,
            num_samples=num_samples,
            use_gpu=True,
            device=device)
        cnn_net.eval()
        print("Test Set")
        test_accuracy = eval_network(test_data,
                                     cnn_net,
                                     use_gpu=True,
                                     device=device)

    else:
        device = torch.device('cpu')
        # cnn_net = cnn_net.cuda()
        epoch_losses, eval_accuracy, hand_labeled_data = train_active_learning(
            cnn_net,
            train_data,
            X_seed,
            Y_seed,
            X_unlabeled,
            ground_truth_labels,
            dev_data,
            use_model=use_model_acq,
            num_epochs=8,
            human_label=human_label,
            acquisition_func=acquisition_func,
            lr=0.0035,
            batchSize=150,
            num_samples=num_samples,
            use_gpu=False,
            device=device)
        cnn_net.eval()
        print("Test Set")
        test_accuracy = eval_network(test_data,
                                     cnn_net,
                                     use_gpu=False,
                                     device=device)

    # plot_accuracy((min_accs, eval_accuracy, max_accs), "Sentiment CNN lr=0.001", train_data.length)
    plot_accuracy(
        eval_accuracy, "Sentiment CNN (Active Learning) lr=0.0035 " +
        acquistion_function_type, seed_data_size)
    # plot_losses(epoch_losses, "Sentiment CNN (Active Learning) lr=0.0030" + acquistion_function_type, train_data.length)
    torch.save(cnn_net.state_dict(), "saved_models\\cnn_active_learn.pth")
    # np.save("cnn_active_learning_train_loss" + acquistion_function_type + "_" + str(seed_data_size) + ".npy", np.array(epoch_losses))
    np.save(
        "human_labelling_results/cnn_active_learning_validation_accuracy_" +
        acquistion_function_type + "_" + str(seed_data_size) + "_" +
        str(num_samples) + ".npy", np.array(eval_accuracy))

    human_labels = []
    ground_truth_labels = []
    tweets = []
    save_labels = True

    if save_labels:
        for tweet, label, ground_truth_label in hand_labeled_data:
            # tweet, score = sample
            tweet = train_data.convert_to_words(tweet)
            tweets.append(tweet)
            human_labels.append(label)
            ground_truth_labels.append(ground_truth_label)

        new_labeled_tweets = pd.DataFrame({
            'label': human_labels,
            'ground truth': ground_truth_labels,
            'text': tweets
        })
        new_labeled_tweets.to_csv("human_labeled_tweets_lc_rk.csv",
                                  header=True,
                                  index=False)
Example #29
0
def main():
    # Load net
    cnn = CNN()
    loss_func = nn.MultiLabelSoftMarginLoss()
    optimizer = optim.Adam(cnn.parameters(), lr=learning_rate)
    if torch.cuda.is_available():
        cnn.cuda()
        loss_func.cuda()

    # Load data
    train_dataloader = dataset.get_train_data_loader()
    test_dataloader = dataset.get_test_data_loader()

    # Train model
    for epoch in range(num_epochs):
        cnn.train()
        for i, (images, labels) in enumerate(train_dataloader):
            images = Variable(images)
            labels = Variable(labels.long())
            if torch.cuda.is_available():
                images = images.cuda()
                labels = labels.cuda()
            predict_labels = cnn(images)
            loss = loss_func(predict_labels, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (i + 1) % 100 == 0:
                print("epoch:", epoch, "step:", i, "loss:", loss.item())

        # Save and test model
        if (epoch + 1) % 10 == 0:
            filename = "model" + str(epoch + 1) + ".pkl"
            torch.save(cnn.state_dict(), filename)
            cnn.eval()
            correct = 0
            total = 0
            for (image, label) in test_dataloader:
                vimage = Variable(image)
                if torch.cuda.is_available():
                    vimage = vimage.cuda()
                output = cnn(vimage)
                predict_label = ""
                for k in range(4):
                    predict_label += config.CHAR_SET[np.argmax(
                        output[0, k * config.CHAR_SET_LEN:(k + 1) *
                               config.CHAR_SET_LEN].data.cpu().numpy())]
                true_label = one_hot.vec2text(label.numpy()[0])
                total += label.size(0)
                if predict_label == true_label:
                    correct += 1
                if total % 200 == 0:
                    print(
                        'Test Accuracy of the model on the %d test images: %f %%'
                        % (total, 100 * correct / total))
            print('Test Accuracy of the model on the %d test images: %f %%' %
                  (total, 100 * correct / total))
            print("save and test model...")
    torch.save(cnn.state_dict(), "./model.pkl")  # current is model.pkl
    print("save last model")
Example #30
0
def main(args):
    load, save, train, evaluate = args.load, args.save, not args.no_train, not args.no_evaluate
    del args.load
    del args.save
    del args.no_train
    del args.no_evaluate

    # MODEL

    if args.model_type == 'lstm':
        print "----LSTM----"
        model = LSTMRetrieval(args.input_size,
                              args.hidden_size,
                              args.num_layers,
                              args.pool,
                              batch_size=args.batch_size)
    elif args.model_type == 'cnn':
        print "----CNN----"
        model = CNN(args.input_size,
                    args.hidden_size,
                    args.pool,
                    batch_size=args.batch_size)
    else:
        raise RuntimeError('Unknown --model_type')

    if load != '':
        print "Loading Model state from 'saved_models/{}'".format(load)
        model.load_state_dict(torch.load('saved_models/{}'.format(load)))

    # CUDA

    if torch.cuda.is_available():
        print "Using CUDA"
        model = model.cuda()
        model.share_memory()

    # Loss function and Optimizer
    loss_function = MaxMarginCosineSimilarityLoss()

    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    if load != '' and train:
        print "Loading Optimizer state from 'saved_optimizers/{}'".format(load)
        optimizer.load_state_dict(
            torch.load('saved_optimizers/{}'.format(load)))
        optimizer.state = defaultdict(
            dict, optimizer.state
        )  # https://discuss.pytorch.org/t/saving-and-loading-sgd-optimizer/2536/5

    # Data

    # training_data = Ubuntu.load_training_data()
    for epoch in xrange(args.epochs):
        print "Initializing Ubuntu Dataset..."
        if train:
            train_dataset = UbuntuDataset(name=args.dataset, partition='train')
            train_dataloader = DataLoader(
                train_dataset,
                batch_size=args.batch_size,  # 100*n -> n questions.
                shuffle=False,
                num_workers=8,
                collate_fn=batchify)
        if evaluate:
            val_dataset = UbuntuDataset(name=args.dataset, partition='dev')
            val_dataloader = DataLoader(
                val_dataset,
                batch_size=args.batch_size,  # 100*n -> n questions.
                shuffle=False,
                num_workers=8,
                collate_fn=batchify)
        if train:
            run_epoch(args,
                      train_dataloader,
                      model,
                      loss_function,
                      optimizer,
                      epoch,
                      mode='train')
        if evaluate:
            if epoch % args.val_epoch == 0:
                run_epoch(args,
                          val_dataloader,
                          model,
                          loss_function,
                          optimizer,
                          epoch,
                          mode='val')

    if save:
        print "Saving Model state to 'saved_models/Model({}).pth'".format(args)
        torch.save(model.state_dict(),
                   'saved_models/Model({}).pth'.format(args))
        print "Saving Optimizer state to 'saved_optimizers/Optimizer({}).pth'".format(
            args)
        torch.save(optimizer.state_dict(),
                   'saved_optimizers/Optimizer({}).pth'.format(args))