Example #1
0
def main(config):
    word2vec_model = gensim.models.Word2Vec.load(config.pretrained_word_vector)
    word2vec_model.wv["<pad>"] = np.zeros(word2vec_model.wv.vector_size)
    word2vec_model.wv["<unk>"] = np.zeros(word2vec_model.wv.vector_size)

    preprocessor = Preprocessor(word2vec_model)

    test_dataloader = get_dataloader(
        config.test_data, config.max_len, preprocessor, config.batch_size
    )

    net = CNN(word2vec_model.wv, config)
    checkpoint = torch.load(config.ckpt_path)
    net.load_state_dict(checkpoint["state_dict"])

    trainer = pl.Trainer(
        distributed_backend=config.distributed_backend,
        gpus=config.gpus,
    )
    res = trainer.test(net, test_dataloader)
Example #2
0
def main(args):

    # Build Models
    encoder = CNN(args.hidden_size)
    encoder.eval()  # evaluation mode (BN uses moving mean/variance)
    decoder = LSTM(args.embed_size, args.hidden_size,
                         len(vocab), args.num_layers)

    # Load the trained model parameters
    encoder.load_state_dict(torch.load(args.encoder_path))
    decoder.load_state_dict(torch.load(args.decoder_path))

    # load data set
    is_training = True
    testing_data = IDDataset(not is_training)

    # If use gpu
    if torch.cuda.is_available():
        encoder.cuda()
        decoder.cuda()

    test_acc = evaluation(testing_data, encoder, decoder)

    print("Accuracy is %.4f" % test_acc)
Example #3
0
def main():
    parser = argparse.ArgumentParser(description='pytorch example: MNIST')
    parser.add_argument('--batch',
                        '-b',
                        type=int,
                        default=100,
                        help='Number of images in each mini-batch')
    parser.add_argument('--model',
                        '-m',
                        default='model.pth',
                        help='Network Model')
    args = parser.parse_args()

    batch_size = args.batch

    print('show training log')

    df = pd.read_csv('train.log')
    plt.plot(df['epoch'], df['train/accuracy'], label='train/acc.', marker="o")
    plt.plot(df['epoch'], df['test/accuracy'], label='test/acc.', marker="o")

    plt.legend(loc='lower right')
    plt.ylim([0.8, 1.0])
    plt.savefig('accuracy.png')
    plt.show()

    transform = transforms.Compose([
        transforms.ToTensor(),  # transform to torch.Tensor
        transforms.Normalize(mean=(0.5, ), std=(0.5, ))
    ])

    trainset = torchvision.datasets.CIFAR10(root='../cifar10_root',
                                            train=True,
                                            download=True,
                                            transform=transform)
    testset = torchvision.datasets.CIFAR10(root='../cifar10_root',
                                           train=False,
                                           download=True,
                                           transform=transform)

    dataset = trainset + testset

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=2)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print('device:', device)

    # Load & Predict Test
    param = torch.load('model.pth')
    net = CNN()  #読み込む前にクラス宣言が必要
    net.to(device)  # for GPU
    net.load_state_dict(param)

    true_list = []
    pred_list = []
    with torch.no_grad():
        for data in dataloader:
            images, labels = data

            true_list.extend(labels.tolist())
            images, labels = images.to(device), labels.to(device)  # for GPU

            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            pred_list.extend(predicted.tolist())

    acc = accuracy_score(true_list, pred_list)
    print('Predict... all data acc.: {:.3f}'.format(acc))

    confmat = confusion_matrix(y_true=true_list, y_pred=pred_list)

    fig, ax = plt.subplots(figsize=(6, 6))
    ax.matshow(confmat, cmap=plt.cm.Purples, alpha=0.8)
    for i in range(confmat.shape[0]):
        for j in range(confmat.shape[1]):
            if confmat[i, j] > 0:
                ax.text(x=j, y=i, s=confmat[i, j], va='center', ha='center')

    plt.xlabel('predicted label')
    plt.ylabel('true label')
    plt.tight_layout()
    plt.savefig('confusion_matrix.png')
    plt.show()
Example #4
0
import torch
from net import MLP, CNN  #
from torchvision import datasets, transforms
from sklearn.metrics import multilabel_confusion_matrix
#
test_loader = torch.utils.data.DataLoader(datasets.FashionMNIST(
    './fashionmnist_data/',
    train=False,
    transform=transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])),
                                          batch_size=1,
                                          shuffle=True)
model = CNN()
device = torch.device('cpu')
model = model.to(device)
model.load_state_dict(torch.load('output/CNN.pt'))
model.eval()
pres = []
labels = []
i = 0
for data, target in test_loader:
    data, target = data.to(device), target.to(device)
    output = model(data)
    pred = output.argmax(
        dim=1, keepdim=True)  # get the index of the max log-probability
    pres.append(pred[0][0].item())
    labels.append(target[0].item())
mcm = multilabel_confusion_matrix(labels, pres)  #mcm
print(mcm)
Example #5
0
orig = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
#orig = cv2.resize(orig, (IMG_SIZE, IMG_SIZE))
img = orig.copy().astype(np.float32)
perturbation = np.empty_like(orig)

mean = [0.5]
std = [0.5]
img /= 255.0
img = (img - mean) / std

# load model
model1 = CNN(1, 10)

saved1 = torch.load('relu.pkl', map_location='cpu')

model1.load_state_dict(saved1)

model1.eval()

criterion = nn.CrossEntropyLoss()
device = 'cuda' if gpu else 'cpu'

# prediction before attack
inp = Variable(
    torch.from_numpy(img).to(device).float().unsqueeze(0).unsqueeze(0),
    requires_grad=True)

out1 = model1(inp)

pred1 = np.argmax(out1.data.cpu().numpy())