Пример #1
0
def export(model_name, ckpt_name):
    ckpt_path = ckpt_name + '.pth'
    onnx_model = ckpt_name + '.onnx'

    # model definition
    if model_name == 'lenet':
        from model.lenet import LeNet
        model = LeNet()
    else:
        from model.modelzoo import create_model
        model, input_size = create_model(model_name, n_classes=120)

    # load weights
    ckpt = torch.load(ckpt_path)
    model.load_state_dict(ckpt['state_dict'])

    # evaluation mode
    model.eval()

    # create the imput placeholder for the model
    # note: we have to specify the size of a batch of input images
    if model_name == 'lenet':
        input_placeholder = torch.randn(1, 1, 28, 28)
    elif model_name == 'inception_v3':
        input_placeholder = torch.randn(1, 3, 299, 299)
    else:
        input_placeholder = torch.randn(1, 3, 224, 224)

    # export
    torch.onnx.export(model, input_placeholder, onnx_model)
    print('{} exported!'.format(onnx_model))
Пример #2
0
def test(model_name, model_ckpt, dataset_name, data_folder):
    # model definition
    if model_name == 'lenet':
        from model.lenet import LeNet
        model = LeNet()
    else:
        from model.modelzoo import create_model
        model, input_size = create_model(model_name, n_classes=120)
    model = apply_cuda(model)

    # load weights
    ckpt = torch.load(model_ckpt)
    model.load_state_dict(ckpt['state_dict'])

    # data source
    batch_size = 200
    if dataset_name == 'mnist':
        test_loader = load_data('test', batch_size, data_folder, dataset_name)
    else:
        test_loader = load_data('test', batch_size, data_folder, dataset_name,
                                input_size)
    n_batches_test = len(test_loader)

    print('==== test phase ====')
    avg_acc = float(0)
    model.eval()
    images_export, labels_export = None, None
    for i, (images, labels) in enumerate(test_loader):
        if images_export is None or labels_export is None:
            images_export = images.data.numpy()
            labels_export = labels.data.numpy()
        else:
            images_export = np.concatenate(
                (images_export, images.data.numpy()), axis=0)
            labels_export = np.concatenate(
                (labels_export, labels.data.numpy()), axis=0)
        images, labels = apply_cuda(images), apply_cuda(labels)
        logits = model(images)
        _, pred = torch.max(logits.data, 1)
        if i == 0:
            print(images[0])
            print(logits[0], pred[0], labels[0])
        bs_ = labels.data.size()[0]
        match_count = (pred == labels.data).sum()
        accuracy = float(match_count) / float(bs_)
        print(
            datetime.now(),
            'batch {}/{} with shape={}, accuracy={:.4f}'.format(
                i + 1, n_batches_test, images.shape, accuracy))
        avg_acc += accuracy / float(n_batches_test)
    print(datetime.now(), 'test results: acc={:.4f}'.format(avg_acc))
    print(
        datetime.now(),
        'total batch to be exported with shape={}'.format(images_export.shape))
    export_test_data_to_numpy(images_export, labels_export, data_folder)
Пример #3
0
net.initialize_weights()

# ============================ step 3/5 损失函数 ============================
criterion = nn.CrossEntropyLoss()  # 选择损失函数

# ============================ step 4/5 优化器 ============================
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)  # 选择优化器
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=6,
                                            gamma=0.1)  # 设置学习率下降策略

# ============================ step 5+/5 断点恢复 ============================

path_checkpoint = "./checkpoint_4_epoch.pkl"
checkpoint = torch.load(path_checkpoint)

net.load_state_dict(checkpoint['model_state_dict'])

optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

start_epoch = checkpoint['epoch']

scheduler.last_epoch = start_epoch

# ============================ step 5/5 训练 ============================
train_curve = list()
valid_curve = list()

for epoch in range(start_epoch + 1, MAX_EPOCH):

    loss_mean = 0.
    correct = 0.
Пример #4
0
    from model.densenet import DenseNet
    net = DenseNet(growthRate=12,
                   depth=40,
                   reduction=0.5,
                   bottleneck=True,
                   nClasses=10)
elif model_name == 'vgg':
    from model.vgg import VGG
    net = VGG('VGG16', num_classes=10)

if resume:
    # Load checkpoint.
    print('==> Resuming from checkpoint..')
    assert os.path.isdir(save_path), 'Error: no checkpoint directory found!'
    checkpoint = torch.load(save_path + '/%s_ckpt.t7' % model_name)
    net.load_state_dict(checkpoint['net'])

if use_cuda:
    Device = int(sys.argv[3])
    #    Device = 0
    net.cuda(Device)
    cudnn.benchmark = True

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(
    net.parameters(),
    lr=0.1,
    momentum=0.9,
    weight_decay=1e-4,
    nesterov=True,
)