Esempio n. 1
0
def export(model_name, ckpt_name):
    ckpt_path = ckpt_name + '.pth'
    onnx_model = ckpt_name + '.onnx'

    # model definition
    if model_name == 'lenet':
        from model.lenet import LeNet
        model = LeNet()
    else:
        from model.modelzoo import create_model
        model, input_size = create_model(model_name, n_classes=120)

    # load weights
    ckpt = torch.load(ckpt_path)
    model.load_state_dict(ckpt['state_dict'])

    # evaluation mode
    model.eval()

    # create the imput placeholder for the model
    # note: we have to specify the size of a batch of input images
    if model_name == 'lenet':
        input_placeholder = torch.randn(1, 1, 28, 28)
    elif model_name == 'inception_v3':
        input_placeholder = torch.randn(1, 3, 299, 299)
    else:
        input_placeholder = torch.randn(1, 3, 224, 224)

    # export
    torch.onnx.export(model, input_placeholder, onnx_model)
    print('{} exported!'.format(onnx_model))
Esempio n. 2
0
def test(model_name, model_ckpt, dataset_name, data_folder):
    # model definition
    if model_name == 'lenet':
        from model.lenet import LeNet
        model = LeNet()
    else:
        from model.modelzoo import create_model
        model, input_size = create_model(model_name, n_classes=120)
    model = apply_cuda(model)

    # load weights
    ckpt = torch.load(model_ckpt)
    model.load_state_dict(ckpt['state_dict'])

    # data source
    batch_size = 200
    if dataset_name == 'mnist':
        test_loader = load_data('test', batch_size, data_folder, dataset_name)
    else:
        test_loader = load_data('test', batch_size, data_folder, dataset_name,
                                input_size)
    n_batches_test = len(test_loader)

    print('==== test phase ====')
    avg_acc = float(0)
    model.eval()
    images_export, labels_export = None, None
    for i, (images, labels) in enumerate(test_loader):
        if images_export is None or labels_export is None:
            images_export = images.data.numpy()
            labels_export = labels.data.numpy()
        else:
            images_export = np.concatenate(
                (images_export, images.data.numpy()), axis=0)
            labels_export = np.concatenate(
                (labels_export, labels.data.numpy()), axis=0)
        images, labels = apply_cuda(images), apply_cuda(labels)
        logits = model(images)
        _, pred = torch.max(logits.data, 1)
        if i == 0:
            print(images[0])
            print(logits[0], pred[0], labels[0])
        bs_ = labels.data.size()[0]
        match_count = (pred == labels.data).sum()
        accuracy = float(match_count) / float(bs_)
        print(
            datetime.now(),
            'batch {}/{} with shape={}, accuracy={:.4f}'.format(
                i + 1, n_batches_test, images.shape, accuracy))
        avg_acc += accuracy / float(n_batches_test)
    print(datetime.now(), 'test results: acc={:.4f}'.format(avg_acc))
    print(
        datetime.now(),
        'total batch to be exported with shape={}'.format(images_export.shape))
    export_test_data_to_numpy(images_export, labels_export, data_folder)
Esempio n. 3
0
def create_model_optimizer_scheduler(args, dataset_class, optimizer='adam', scheduler='steplr',
                                     load_optimizer_scheduler=False):
    if args.arch == 'wideresnet':
        model = WideResNet(depth=args.layers,
                           num_classes=dataset_class.num_classes,
                           widen_factor=args.widen_factor,
                           dropout_rate=args.drop_rate)
    elif args.arch == 'densenet':
        model = densenet121(num_classes=dataset_class.num_classes)
    elif args.arch == 'lenet':
        model = LeNet(num_channels=3, num_classes=dataset_class.num_classes,
                      droprate=args.drop_rate, input_size=dataset_class.input_size)
    elif args.arch == 'resnet':
        model = resnet18(num_classes=dataset_class.num_classes, input_size=dataset_class.input_size,
                         drop_rate=args.drop_rate)
    else:
        raise NotImplementedError

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    model = model.cuda()

    if optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    else:
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
                                    nesterov=args.nesterov, weight_decay=args.weight_decay)

    if scheduler == 'steplr':
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.2)
    else:
        args.iteration = args.fixmatch_k_img // args.batch_size
        args.total_steps = args.fixmatch_epochs * args.iteration
        scheduler = get_cosine_schedule_with_warmup(
            optimizer, args.fixmatch_warmup * args.iteration, args.total_steps)

    if args.resume:
        if load_optimizer_scheduler:
            model, optimizer, scheduler = resume_model(args, model, optimizer, scheduler)
        else:
            model, _, _ = resume_model(args, model)

    return model, optimizer, scheduler
Esempio n. 4
0
def get_model(model_name):
    if model_name == 'resnet':
        from model.resnet import ResNet18
        net = ResNet18(10)
    elif model_name == 'lenet':
        from model.lenet import LeNet
        net = LeNet(10)
    elif model_name == 'densenet':
        from model.densenet import DenseNet
        net = DenseNet(growthRate=12,
                       depth=40,
                       reduction=0.5,
                       bottleneck=True,
                       nClasses=10)
    elif model_name == 'vgg':
        from model.vgg import VGG
        net = VGG('VGG16', num_classes=10)

    return net
Esempio n. 5
0
    transforms.Resize((32, 32)),
    transforms.ToTensor(),
    transforms.Normalize(norm_mean, norm_std),
])

# 构建MyDataset实例
train_data = RMBDataset(data_dir=train_dir, transform=train_transform)
valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform)

# 构建DataLoder
train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)

# ============================ step 2/5 模型 ============================

net = LeNet(classes=2)
net.initialize_weights()

# ============================ step 3/5 损失函数 ============================
criterion = nn.CrossEntropyLoss()                                                   # 选择损失函数

# ============================ step 4/5 优化器 ============================
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)                        # 选择优化器
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)     # 设置学习率下降策略

# ============================ step 5/5 训练 ============================
train_curve = list()
valid_curve = list()

for epoch in range(MAX_EPOCH):
Esempio n. 6
0
    split_dir = "/home/liuhy/res/deep-learning/00.框架/pytorch/class2/01.rmb/dataset/rmb_split"
    train_dir = os.path.join(split_dir, "train")

    transform_compose = transforms.Compose([transforms.Resize((64, 128)), transforms.ToTensor()])
    train_data = RMBDataset(data_dir=train_dir, transform=transform_compose)
    train_loader = DataLoader(dataset=train_data, batch_size=16, shuffle=True)
    data_batch, label_batch = next(iter(train_loader))

    img_grid = vutils.make_grid(data_batch, nrow=4, normalize=True, scale_each=True)
    # img_grid = vutils.make_grid(data_batch, nrow=4, normalize=False, scale_each=False)
    writer.add_image("input img", img_grid, 0)

    writer.close()

# ----------------------------------- add_graph -----------------------------------
# flag = 0
flag = 1
if flag:
    writer = SummaryWriter(comment='_graph', filename_suffix="_graph")

    # 模型
    fake_img = torch.randn(1, 3, 32, 32)
    lenet = LeNet(classes=2)
    writer.add_graph(lenet, fake_img)
    writer.close()

    from torchsummary import summary

    print(summary(lenet, (3, 32, 32), device="cpu"))
Esempio n. 7
0
            if self.transform is not None:
                fake_data = self.transform(fake_data)

            return fake_data, fake_label

        def __len__(self):
            return self.num_data

    # ============================ step 1/5 数据 ============================
    channel = 3  # 1 3
    img_size = 32  # 36 32
    train_data = FooDataset(num_data=32, shape=(channel, img_size, img_size))
    train_loader = DataLoader(dataset=train_data, batch_size=16, shuffle=True)

    # ============================ step 2/5 模型 ============================
    net = LeNet(classes=2)
    # ============================ step 3/5 损失函数 ============================
    criterion = nn.CrossEntropyLoss()
    # ============================ step 4/5 优化器 ============================
    optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)  # 选择优化器
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10)
    # ============================ step 5/5 训练 ============================
    data, label = next(iter(train_loader))
    outputs = net(data)

# ========================== 5
# AttributeError: 'DataParallel' object has no attribute 'linear'
flag = 0
# flag = 1
if flag:
Esempio n. 8
0
    transforms.Resize((32, 32)),
    transforms.ToTensor(),
    transforms.Normalize(norm_mean, norm_std),
])

# 构建MyDataset实例
train_data = RMBDataset(data_dir=train_dir, transform=train_transform)
valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform)

# 构建DataLoder
train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)

# ============================ step 2/5 模型 ============================

net = LeNet(classes=2)
net.initialize_weights()

# ============================ step 3/5 损失函数 ============================
criterion = nn.CrossEntropyLoss()                                                   # 选择损失函数

# ============================ step 4/5 优化器 ============================
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)                        # 选择优化器
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=6, gamma=0.1)     # 设置学习率下降策略

# ============================ step 5/5 训练 ============================
train_curve = list()
valid_curve = list()

start_epoch = -1
for epoch in range(start_epoch+1, MAX_EPOCH):
Esempio n. 9
0
#Now, we perform the train_test data split by allocating 75% for training the data and the remaining 25% for testing the data partition the data
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels, test_size=0.25, random_state=42)

#Now, we convert the datatype of labels to  vectors from originally being integers                                                  
trainY = to_categorical(trainY, num_classes=2)
testY = to_categorical(testY, num_classes=2)

#Now, we construct the image generator to perform data augmentation
aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,
                         height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
                         horizontal_flip=True, fill_mode="nearest")

#After preprocessing the data, now we bring in the model to train it with the data
print("[UPDATE]: We are now compiling the model.")
model = LeNet.build(width=28, height=28, depth=3, classes=2)
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt,
              metrics=["accuracy"])
#Now that compiling the model is done, we will now train the data
print("[UPDATE]: We are now training the model.")
H = model.fit(x=aug.flow(trainX, trainY, batch_size=BS),
              validation_data=(testX, testY), steps_per_epoch=len(
                  trainX) // BS,
              epochs=EPOCHS, verbose=1)

#Now that the training is done, we will save the model to the same directory
print("[UPDATE]: Training is done. We are serialising the network.")
model.save(args["model"], save_format="h5")

# We are now trying to visualise the performance of the model using matplotlib
Esempio n. 10
0
testset = torchvision.datasets.STL10(root='stl10',
                                     split='test',
                                     download=False,
                                     transform=test_transform)
test_loader = torch.utils.data.DataLoader(testset,
                                          batch_size=50,
                                          shuffle=False,
                                          num_workers=8,
                                          pin_memory=True)

if model_name == 'resnet':
    from model.resnet import ResNet18
    net = ResNet18(10)
elif model_name == 'lenet':
    from model.lenet import LeNet
    net = LeNet(10)
elif model_name == 'densenet':
    from model.densenet import DenseNet
    net = DenseNet(growthRate=12,
                   depth=40,
                   reduction=0.5,
                   bottleneck=True,
                   nClasses=10)
elif model_name == 'vgg':
    from model.vgg import VGG
    net = VGG('VGG16', num_classes=10)

if resume:
    # Load checkpoint.
    print('==> Resuming from checkpoint..')
    assert os.path.isdir(save_path), 'Error: no checkpoint directory found!'
Esempio n. 11
0
def train(batch_size=50, lr=0.01, data_folder='data', dataset_name='mnist', model_name='lenet', max_epochs=10, log_freq=100):
  # model definition
  if model_name == 'lenet':
    from model.lenet import LeNet
    model = LeNet()
  else:
    from model.modelzoo import create_model
    model, input_size = create_model(model_name, n_classes=120)
  model = apply_cuda(model)
  
  # data source
  if dataset_name == 'mnist':
    train_loader = load_data('train', batch_size, data_folder, dataset_name)
    eval_loader = load_data('test', batch_size, data_folder, dataset_name)
  else:
    train_loader = load_data('train', batch_size, data_folder, dataset_name, input_size)
    eval_loader = load_data('test', batch_size, data_folder, dataset_name, input_size)
  n_batches_train = len(train_loader)
  n_batches_eval = len(eval_loader)
  print(
      datetime.now(),
      'batch size = {}'.format(batch_size),
      'number of batches for training = {}'.format(n_batches_train),
      'number of batches for evaluation = {}'.format(n_batches_eval))

  # optimizer and loss definition
  criterion = torch.nn.CrossEntropyLoss()
  optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
  
  for epoch in range(max_epochs):
    print(datetime.now(), 'epoch: {}/{}'.format(epoch+1, max_epochs))
    
    # training set
    print('==== training phase ====')
    avg_loss = float(0)
    avg_acc = float(0)
    model.train()
    for step, (images, labels) in enumerate(train_loader):
      optimizer.zero_grad()
      images, labels = apply_cuda(images), apply_cuda(labels)
      images, labels = apply_var(images), apply_var(labels)
      # forward pass
      if model_name == 'inception_v3':
        logits, aux_logits = model(images)
        loss1 = criterion(logits, labels)
        loss2 = criterion(aux_logits, labels)
        loss = loss1 + 0.4*loss2
      else:
        logits = model(images)
        loss = criterion(logits, labels)
      _, pred = torch.max(logits.data, 1)
      bs_ = labels.data.size()[0]
      match_count = (pred == labels.data).sum()
      accuracy = float(match_count)/float(bs_)
      avg_loss += loss.item()/float(n_batches_train)
      avg_acc += accuracy/float(n_batches_train)
      # backward pass
      loss.backward()
      optimizer.step()
      if (step+1) % log_freq == 0:
        print(
            datetime.now(),
            'training step: {}/{}'.format(step+1, n_batches_train),
            'loss={:.5f}'.format(loss.item()),
            'acc={:.4f}'.format(accuracy))
    print(
        datetime.now(),
        'training ends with avg loss={:.5f}'.format(avg_loss),
        'and avg acc={:.4f}'.format(avg_acc))
    # validation set
    print('==== validation phase ====')
    avg_acc = float(0)
    model.eval()
    for images, labels in eval_loader:
      images, labels = apply_cuda(images), apply_cuda(labels)
      logits = model(images)
      _, pred = torch.max(logits.data, 1)
      bs_ = labels.data.size()[0]
      match_count = (pred == labels.data).sum()
      accuracy = float(match_count)/float(bs_)
      avg_acc += accuracy/float(n_batches_eval)
    print(
        datetime.now(),
        'evaluation results: acc={:.4f}'.format(avg_acc))
    
    # save the model for every epoch
    ckpt_path = '{}_{}_bs{}_lr{}_ep{}.pth'.format(
        model_name,
        dataset_name,
        batch_size,
        lr,
        epoch)
    torch.save({
        'epoch': epoch,
        'state_dict': model.state_dict(),
        'avg_loss': avg_loss,
        'avg_acc': avg_acc}, ckpt_path)