Ejemplo n.º 1
0
def test_cca(net, cca_loss, optimizer, train_iter, val_iter, train_iterations,
             val_iterations):
    '''
    Return:
        test_loss: number
        acc: number
    '''
    train_losses = []
    train_data = []
    train_label = []
    net.train()
    for iteration in range(train_iterations):
        optimizer.zero_grad()
        imgs, texts, labels = Variable_(next(train_iter))
        o1, o2 = net.forward_cca(imgs, texts)
        loss = cca_loss.loss(o1, o2)
        train_losses.append(loss.item())
        loss.backward()
        torch.nn.utils.clip_grad_norm_(net.parameters(), 0.25)
        optimizer.step()
        train_data.append(o1)  # o1.clone().detach().cpu().numpy())
        train_label.append(labels)  # labels.clone().detach().cpu().numpy())
    train_loss = np.mean(train_losses)

    losses = []
    val_data = []
    val_label = []
    with torch.no_grad():
        net.eval()
        for iteration in range(val_iterations):
            imgs, texts, labels = Variable_(next(val_iter))
            o1, o2 = net.forward_cca(imgs, texts)
            val_data.append(o1)  # o1.detach().cpu().numpy())
            val_label.append(labels)  # labels.detach().cpu().numpy())
            loss = cca_loss.loss(o1, o2)
            losses.append(loss.item())
        train_data = torch.cat(train_data, 0)
        val_data = torch.cat(val_data, 0)
        train_label = torch.cat(train_label, 0)
        val_label = torch.cat(val_label, 0)
        train_data = train_data.cpu().numpy()
        val_data = val_data.cpu().numpy()
        train_label = train_label.cpu().numpy()
        val_label = val_label.cpu().numpy()
    test_loss = np.mean(losses)
    acc = svm_classify(train_data, val_data, train_label, val_label)

    return train_loss, test_loss, acc
Ejemplo n.º 2
0
	def fit(self, data):
		print('len(data.items()): ', len(data.items()))
		X, Y = [], []
		for idx, embs in data.items():
			for emb in embs:
				X.append(emb)
				Y.append(idx)
		pairs = list(zip(X, Y))
		shuffle(pairs)
		# print('pairs: ', pairs)
		X, Y = zip(*pairs)
		print('len(X), len(Y): ', len(X), len(Y))
		if len(X) > len(data.items())*2:
			self.classify_model = svm_classify(X, Y)
		else:
			from sklearn import svm
			clf = svm.SVC(gamma='scale')
			clf.fit(X, Y)
			self.classify_model = clf
Ejemplo n.º 3
0
    # if a linear CCA should get applied on the learned features extracted from the networks
    # it does not affect the performance on noisy MNIST significantly
    apply_linear_cca = True

    # end of parameters section
    ############

    # Each view is stored in a gzip file separately. They will get downloaded the first time the code gets executed.
    # Datasets get stored under the datasets folder of user's Keras folder
    # normally under [Home Folder]/.keras/datasets/
    data1 = load_data('noisymnist_view1.gz', 'https://www2.cs.uic.edu/~vnoroozi/noisy-mnist/noisymnist_view1.gz')
    data2 = load_data('noisymnist_view2.gz', 'https://www2.cs.uic.edu/~vnoroozi/noisy-mnist/noisymnist_view2.gz')

    # Building, training, and producing the new features by DCCA
    model = create_model(layer_sizes1, layer_sizes2, input_shape1, input_shape2,
                            learning_rate, reg_par, outdim_size, use_all_singular_values)
    model.summary()
    model = train_model(model, data1, data2, epoch_num, batch_size)
    new_data = test_model(model, data1, data2, outdim_size, apply_linear_cca)

    # Training and testing of SVM with linear kernel on the view 1 with new features
    [test_acc, valid_acc] = svm_classify(new_data, C=0.01)
    print("Accuracy on view 1 (validation data) is:", valid_acc * 100.0)
    print("Accuracy on view 1 (test data) is:", test_acc*100.0)

    # Saving new features in a gzip pickled file specified by save_to
    print('saving new features ...')
    f1 = gzip.open(save_to, 'wb')
    thepickle.dump(new_data, f1)
    f1.close()
Ejemplo n.º 4
0
    # if a linear CCA should get applied on the learned features extracted from the networks
    # it does not affect the performance on noisy MNIST significantly
    apply_linear_cca = True

    # end of parameters section
    ############

    # Each view is stored in a gzip file separately. They will get downloaded the first time the code gets executed.
    # Datasets get stored under the datasets folder of user's Keras folder
    # normally under [Home Folder]/.keras/datasets/
    data1 = load_data('noisymnist_view1.gz', 'https://www2.cs.uic.edu/~vnoroozi/noisy-mnist/noisymnist_view1.gz')
    data2 = load_data('noisymnist_view2.gz', 'https://www2.cs.uic.edu/~vnoroozi/noisy-mnist/noisymnist_view2.gz')

    # Building, training, and producing the new features by DCCA
    model = create_model(layer_sizes1, layer_sizes2, input_shape1, input_shape2,
                            learning_rate, reg_par, outdim_size, use_all_singular_values)
    model.summary()
    model = train_model(model, data1, data2, epoch_num, batch_size)
    new_data = test_model(model, data1, data2, outdim_size, apply_linear_cca)

    # Training and testing of SVM with linear kernel on the view 1 with new features
    [test_acc, valid_acc] = svm_classify(new_data, C=0.01)
    print("Accuracy on view 1 (validation data) is:", valid_acc * 100.0)
    print("Accuracy on view 1 (test data) is:", test_acc*100.0)

    # Saving new features in a gzip pickled file specified by save_to
    print('saving new features ...')
    f1 = gzip.open(save_to, 'wb')
    thepickle.dump(new_data, f1)
    f1.close()
Ejemplo n.º 5
0
    model, embed_generator = create_model(numNodes, factors)
    model.summary()

    model.compile(optimizer='rmsprop', loss={'left_right_dot': LINE_loss})

    model.fit_generator(data_gen,
                        samples_per_epoch=epoch_train_size,
                        nb_epoch=epoch_num,
                        verbose=1)

    new_X = []
    new_label = []

    keys = list(labels_dict.keys())
    np.random.shuffle(keys)

    for k in keys:
        v = labels_dict[k]
        x = embed_generator.predict_on_batch(
            [np.asarray([k]), np.asarray([k])])
        new_X.append(x[0][0] + x[1][0])
        new_label.append(labels_dict[k])

    new_X = np.asarray(new_X, dtype=np.float32)
    new_label = np.asarray(new_label, dtype=np.int32)

    [train_acc, valid_acc, test_acc] = svm_classify(new_X, new_label,
                                                    split_ratios, svm_C)

    print("Train Acc:", train_acc, " Validation Acc:", valid_acc, " Test Acc:",
          test_acc)
Ejemplo n.º 6
0
    #data1 = load_data('noisymnist_view1.gz', 'https://www2.cs.uic.edu/~vnoroozi/noisy-mnist/noisymnist_view1.gz')
    #data2 = load_data('noisymnist_view2.gz', 'https://www2.cs.uic.edu/~vnoroozi/noisy-mnist/noisymnist_view2.gz')

    # Building, training, and producing the new features by DCCA
    model = create_model(layer_sizes1, layer_sizes2, input_shape1,
                         input_shape2, learning_rate, reg_par, outdim_size,
                         use_all_singular_values, batch_size)
    model.summary()
    model = train_model(model, data1, data2, epoch_num, batch_size)
    new_data = tt_model(model, data1, data2, outdim_size, apply_linear_cca)

    # Training and testing of SVM with linear kernel on the view 1 with new features
    [test_acc, valid_acc], [test_p,
                            valid_p], [test_label,
                                       valid_label] = svm_classify(new_data,
                                                                   C=0.01,
                                                                   view=1)
    print("Accuracy on view 1 (validation data) is:", valid_acc * 100.0)
    print("Accuracy on view 1 (test data) is:", test_acc * 100.0)
    report_performance(test_label, test_p, 0.5)

    # Training and testing of SVM with linear kernel on the view 2 with new features
    [test_acc, valid_acc], [test_p,
                            valid_p], [test_label,
                                       valid_label] = svm_classify(new_data,
                                                                   C=0.01,
                                                                   view=2)
    print("Accuracy on view 2 (validation data) is:", valid_acc * 100.0)
    print("Accuracy on view 2 (test data) is:", test_acc * 100.0)
    report_performance(test_label, test_p, 0.5)