Exemplo n.º 1
0
 def build_model(self):
     print ('==> Build model and setup loss and optimizer')
     #build model
     self.model = resnet101(pretrained=True, channel=3).cuda()
     #Loss function and optimizer
     self.criterion = nn.CrossEntropyLoss().cuda()
     self.optimizer = torch.optim.SGD(self.model.parameters(), self.lr, momentum=0.9)
     self.scheduler = ReduceLROnPlateau(self.optimizer, 'min', patience=5,verbose=True)
Exemplo n.º 2
0
def main():
    # load data
    print("Loading dataset...")
    train_data = COCO_motivations_Dataset(data_root, train=True)
    val_data = COCO_motivations_Dataset(data_root, train=False)

    batch_size = 2
    batch_size = batch_size if len(
        params.gpus) == 0 else batch_size * len(params.gpus)

    train_dataloader = DataLoader(train_data,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=num_workers)
    print('train dataset len: {}'.format(len(train_dataloader.dataset)))

    val_dataloader = DataLoader(val_data,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=num_workers)
    print('val dataset len: {}'.format(len(val_dataloader.dataset)))

    # models
    # model = resnet34(pretrained=False, modelpath=model_path, num_classes=1000)  # batch_size=120, 1GPU Memory < 7000M
    # model.fc = nn.Linear(512, 256)
    model = resnet101(pretrained=False, modelpath=model_path,
                      num_classes=1000)  # batch_size=60, 1GPU Memory > 9000M
    model.fc = nn.Linear(512 * 4, 256)

    # optimizer
    trainable_vars = [
        param for param in model.parameters() if param.requires_grad
    ]
    print("Training with sgd")
    params.optimizer = torch.optim.SGD(trainable_vars,
                                       lr=init_lr,
                                       momentum=momentum,
                                       weight_decay=weight_decay,
                                       nesterov=nesterov)

    # Train
    params.lr_scheduler = ReduceLROnPlateau(params.optimizer,
                                            'min',
                                            factor=lr_decay,
                                            patience=10,
                                            cooldown=10,
                                            verbose=True)
    trainer = Trainer(model, params, train_dataloader, val_dataloader)
    trainer.train()
 def build_model(self):
     print('==> Build model and setup loss and optimizer')
     #build model
     #self.model = resnet101_pretrain_UCF101(pretrained=True, channel=3).cuda()
     self.model = resnet101(pretrained=True, channel=3).cuda()
     # If only finetune the last fully connected layer
     #for param in self.model.parameters():
     #    param.requires_grad = False
     # Replace the last fully-connected layer
     # Parameters of newly constructed modules have requires_grad=True by default
     # If only finetune the last fully connected layer
     #self.model.fc_custom = nn.Linear(2048, 101).cuda()
     #Loss function and optimizer
     self.criterion = nn.CrossEntropyLoss().cuda()
     # If only finetune the last fully connected layer
     #self.optimizer = torch.optim.SGD(self.model.fc_custom.parameters(), self.lr, momentum=0.9)
     self.optimizer = torch.optim.SGD(self.model.parameters(),
                                      self.lr,
                                      momentum=0.9)
     #self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
     self.scheduler = ReduceLROnPlateau(self.optimizer,
                                        'min',
                                        patience=arg.lr_patience,
                                        verbose=True)
Exemplo n.º 4
0
print("Loading dataset...")
train_data = Hand(data_root,train=True)
val_data = Hand(data_root,train=False)

batch_size = batch_size if len(params.gpus) == 0 else batch_size*len(params.gpus)

train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers)
print('train dataset len: {}'.format(len(train_dataloader.dataset)))

val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=False, num_workers=num_workers)
print('val dataset len: {}'.format(len(val_dataloader.dataset)))

# models
# model = resnet34(pretrained=False, modelpath=model_path, num_classes=1000)  # batch_size=120, 1GPU Memory < 7000M
# model.fc = nn.Linear(512, 6)
model = resnet101(pretrained=False, modelpath=model_path, num_classes=1000)  # batch_size=60, 1GPU Memory > 9000M
model.fc = nn.Linear(512*4, 6)

# optimizer
trainable_vars = [param for param in model.parameters() if param.requires_grad]
print("Training with sgd")
params.optimizer = torch.optim.SGD(trainable_vars, lr=init_lr,
                                   momentum=momentum,
                                   weight_decay=weight_decay,
                                   nesterov=nesterov)

# Train
params.lr_scheduler = ReduceLROnPlateau(params.optimizer, 'min', factor=lr_decay, patience=10, cooldown=10, verbose=True)
trainer = Trainer(model, params, train_dataloader, val_dataloader)
trainer.train()
Exemplo n.º 5
0
transform_test = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])

testset = DogvsCat('./data/test',
                   transform=transform_test,
                   train=False,
                   test=True)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=batchSize,
                                         shuffle=False,
                                         num_workers=num_workers)
model = resnet101(pretrained=True)
model.fc = nn.Linear(2048, 2)
model.load_state_dict(torch.load('model/model.pth'))
model.cuda()
model.eval()
results = []

with torch.no_grad():
    for image, label in testloader:
        image = Variable(image.cuda())
        out = model(image)
        label = F.softmax(out, dim=1)
        label = label.numpy().tolist()
        _, predicted = torch.max(out.data, 1)
        predicted = predicted.data.cpu().numpy().tolist()
        results.extend([[i, ";".join(str(j))]