def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':

        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count -
                             opt.niter) / float(opt.niter_decay + 1)
            return lr_l

        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer,
                                        step_size=opt.lr_decay_iters,
                                        gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                                   mode='min',
                                                   factor=0.2,
                                                   threshold=0.01,
                                                   patience=5)
    else:
        return NotImplementedError(
            'learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler
Beispiel #2
0
        },
    ],
                                lr=lr,
                                momentum=momentum,
                                weight_decay=weight_decay)

    def cb():
        print("Best Model reloaded")
        stateDict = torch.load("./pth/bestModelLP" + fineTuneStr + pruneStr +
                               ".pth",
                               map_location=mapLoc)
        model.load_state_dict(stateDict)

    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               'min',
                                               factor=0.5,
                                               patience=patience,
                                               verbose=True,
                                               cb=cb)
    ploter = LinePlotter()

    bestLoss = 100
    bestAcc = 0
    bestIoU = 0
    bestTPA = 0
    bestConf = torch.zeros(numClass, numClass)

    modelSeg.eval()

    for epoch in range(epochs):

        model.train()
Beispiel #3
0
if use_cuda:
    net.cuda()
    assert torch.cuda.device_count() == 1, 'only support single gpu'
    net = torch.nn.DataParallel(net,
                                device_ids=range(torch.cuda.device_count()))
    cudnn.benchmark = True

criterion = my_loss.Wighted_L1_Loss().cuda()
optimizer = optim.SGD(net.parameters(),
                      lr=args.lr,
                      momentum=args.momentum,
                      weight_decay=args.weight_decay,
                      nesterov=args.nesterov,
                      dampening=args.dampening)

scheduler = lrs.ReduceLROnPlateau(optimizer, 'min')  # set up scheduler


# Training
def train(epoch):
    net.train()
    total_step_train = 0
    train_loss = 0.0
    error_sum_train = {'MSE':0, 'RMSE':0, 'ABS_REL':0, 'LG10':0, 'MAE':0,\
                       'DELTA1.02':0, 'DELTA1.05':0, 'DELTA1.10':0, \
                       'DELTA1.25':0, 'DELTA1.25^2':0, 'DELTA1.25^3':0,}

    tbar = tqdm(trainloader)
    for batch_idx, sample in enumerate(tbar):
        [inputs, targets] = [sample['rgbd'], sample['depth']]
        if use_cuda:
Beispiel #4
0
            'params': modelConv.parameters()
        },
        {
            'params': modelClass.parameters()
        },
        {
            'params': modelHess.parameters()
        },
    ],
                                lr=lr,
                                momentum=momentum,
                                weight_decay=weight_decay)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               'min',
                                               factor=0.2,
                                               patience=10,
                                               verbose=True,
                                               threshold=1e-3,
                                               cb=cb)

    ploter = LinePlotter()

    bestLoss = 100
    bestAcc = 0
    bestTest = 0

    for epoch in range(epochs):

        modelConv.train()
        modelClass.train()
        modelHess.train()
def main():
    # Hyper Parameters
    EPOCH = 50  # train the training data n times, to save time, we just train 1 epoch
    BATCH_SIZE = 64
    LR = 5e-3  # learning rate

    #load dictionary
    dic_training, dic_testing = load_dic()

    # change DIC -> LIST format due to time cost
    L_train_keys = dic_training.keys()
    L_train_values = dic_training.values()

    L_test_keys = dic_testing.keys()
    L_test_values = dic_testing.values()

    #Build training & testing set
    training_set = UCF101_rgb_data(keys=L_train_keys,
                                   values=L_train_values,
                                   root_dir='/store/ucf101/spatial/',
                                   transform=transforms.Compose([
                                       transforms.Scale(256),
                                       transforms.RandomCrop(224),
                                       transforms.RandomHorizontalFlip(),
                                       transforms.ToTensor(),
                                       transforms.Normalize(
                                           mean=[0.485, 0.456, 0.406],
                                           std=[0.229, 0.224, 0.225])
                                   ]))

    testing_set = UCF101_rgb_data(keys=L_test_keys,
                                  values=L_test_values,
                                  root_dir='/store/ucf101/spatial/',
                                  transform=transforms.Compose([
                                      transforms.Scale(256),
                                      transforms.CenterCrop(224),
                                      transforms.ToTensor(),
                                      transforms.Normalize(
                                          mean=[0.485, 0.456, 0.406],
                                          std=[0.229, 0.224, 0.225])
                                  ]))

    #Data Loader
    train_loader = DataLoader(dataset=training_set,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=8)

    test_loader = DataLoader(dataset=testing_set,
                             batch_size=BATCH_SIZE,
                             shuffle=False,
                             num_workers=8)

    #Model
    Model = models.resnet101(pretrained=True)

    #Replace fc1000 with fc101
    num_ftrs = Model.fc.in_features
    Model.fc = nn.Linear(num_ftrs, 101)

    #convert model to gpu
    Model = Model.cuda()

    #Loss function and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(Model.parameters(), LR, momentum=0.9)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               'min',
                                               patience=0,
                                               verbose=True)

    cudnn.benchmark = True

    #Training & testing
    best_prec1 = 0
    for epoch in range(1, EPOCH + 1):

        print('****' * 40)
        print('Epoch:[{0}/{1}][training stage]'.format(epoch, EPOCH))
        print('****' * 40)
        # train for one epoch
        train(train_loader, Model, criterion, optimizer, epoch)

        # evaluate on validation set
        print('****' * 40)
        print('Epoch:[{0}/{1}][validation stage]'.format(epoch, EPOCH))
        print('****' * 40)

        prec1, val_loss, dic_video_level_preds = validate(
            test_loader, Model, criterion, 0, L_test_keys, BATCH_SIZE)

        #Call lr_scheduler
        scheduler.step(val_loss)

        #Calculate Video level acc
        top1, top5 = video_level_acc(dic_video_level_preds)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': 'resnet101',
                'state_dict': Model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, dic_video_level_preds)
Beispiel #6
0
def main():

    global args, best_prec1
    args = parser.parse_args()

    # Hyper Parameters
    EPOCH = args.epochs
    BATCH_SIZE = args.batch_size
    LR = args.lr
    print '==> Create Model'
    #Create Model
    Model = models.resnet101(pretrained=True)
    #Replace fc1000 with fc101
    num_ftrs = Model.fc.in_features
    Model.fc = nn.Linear(num_ftrs, 101)
    #convert model to gpu
    Model = Model.cuda()

    #Loss function and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(Model.parameters(), LR, momentum=0.9)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               'min',
                                               patience=0,
                                               verbose=True)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("==> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("==> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("==> no checkpoint found at '{}'".format(args.resume))

    print '==> Preparing training and validation data'
    #load dictionary
    dic_training, dic_testing = load_dic()

    # change DIC -> LIST format due to time cost
    L_train_keys = dic_training.keys()
    L_train_values = dic_training.values()

    L_test_keys = dic_testing.keys()
    L_test_values = dic_testing.values()

    #Build training & testing set
    training_set = UCF101_rgb_data(keys=L_train_keys,
                                   values=L_train_values,
                                   root_dir='/store/ucf101/spatial/',
                                   transform=transforms.Compose([
                                       transforms.Scale(256),
                                       transforms.RandomCrop(224),
                                       transforms.RandomHorizontalFlip(),
                                       transforms.ToTensor(),
                                       transforms.Normalize(
                                           mean=[0.485, 0.456, 0.406],
                                           std=[0.229, 0.224, 0.225])
                                   ]))

    testing_set = UCF101_rgb_data(keys=L_test_keys,
                                  values=L_test_values,
                                  root_dir='/store/ucf101/spatial/',
                                  transform=transforms.Compose([
                                      transforms.Scale(256),
                                      transforms.CenterCrop(224),
                                      transforms.ToTensor(),
                                      transforms.Normalize(
                                          mean=[0.485, 0.456, 0.406],
                                          std=[0.229, 0.224, 0.225])
                                  ]))

    #Data Loader
    train_loader = DataLoader(dataset=training_set,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=8)

    test_loader = DataLoader(dataset=testing_set,
                             batch_size=BATCH_SIZE,
                             shuffle=False,
                             num_workers=8)

    cudnn.benchmark = True
    # Evaluation mode
    if args.evaluate:
        validate(test_loader, Model, criterion, L_test_keys, BATCH_SIZE)
        return

    print '==> Start training'
    #Training & testing
    for epoch in range(args.start_epoch, EPOCH):

        # train for one epoch
        print(' Epoch:[{0}/{1}][training stage]'.format(epoch, EPOCH))
        train(train_loader, Model, criterion, optimizer, epoch)

        # evaluate on validation set
        print(' Epoch:[{0}/{1}][validation stage]'.format(epoch, EPOCH))
        prec1, val_loss, dic_video_level_preds = validate(
            test_loader, Model, criterion, 0, L_test_keys, BATCH_SIZE)

        #Call lr_scheduler
        scheduler.step(val_loss)

        #Calculate Video level acc
        top1, top5 = video_level_acc(dic_video_level_preds)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': 'resnet101',
                'state_dict': Model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, dic_video_level_preds)
Beispiel #7
0
def main():
    # Hyper Parameters
    EPOCH = 50  # train the training data n times, to save time, we just train 1 epoch
    BATCH_SIZE = 64
    LR = 1e-3  # learning rate
    nb_classes = 101

    #load dictionary
    dic_training, dic_testing = load_dic()

    #change DIC into LIST format due to time cost
    L_train_keys = dic_training.keys()
    L_train_values = dic_training.values()

    L_test_keys = dic_testing.keys()
    L_test_values = dic_testing.values()

    #Build training & testing set
    training_set = UCF101_opf_data(
        keys=L_train_keys,
        values=L_train_values,
        root_dir='/home/jeffrey/data/tvl1_flow/',
        transform=transforms.Compose([
            transforms.Scale(256),
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            #transforms.Normalize((0.5,), (1.0,))
        ]))
    testing_set = UCF101_opf_data(
        keys=L_test_keys,
        values=L_test_values,
        root_dir='/home/jeffrey/data/tvl1_flow/',
        transform=transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            #transforms.Normalize((0.5,), (1.0,))
        ]))
    #Data Loader
    train_loader = DataLoader(dataset=training_set,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=8)
    test_loader = DataLoader(dataset=testing_set,
                             batch_size=BATCH_SIZE,
                             shuffle=False,
                             num_workers=8)

    # Build 3 channel ResNet101 with pre-trained weight
    ResNet101 = models.resnet101(pretrained=True)

    #Build 20 channel ResNet101
    Model = mR20.resnet101()

    #1.Get the weight of first convolution layer (torch.FloatTensor of size 64x3x7x7).
    conv1_weight = ResNet101.state_dict()['conv1.weight']
    dic = ResNet101.state_dict()

    #3.Average across rgb channel and replicate this average by the channel number of target network(20 in this case)
    conv1_weight20 = w3_to_w20(conv1_weight)
    dic['conv1.weight'] = conv1_weight20
    Model.load_state_dict(dic)

    #Replace fc1000 with fc101
    num_ftrs = Model.fc.in_features
    Model.fc = nn.Linear(num_ftrs, nb_classes)

    #convert model to gpu
    Model = Model.cuda()

    #Loss function
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer and lr_scheduler
    optimizer = torch.optim.SGD(Model.parameters(), LR, momentum=0.9)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               'min',
                                               patience=0,
                                               verbose=True)

    cudnn.benchmark = True

    #Training & testing
    best_prec1 = 0
    for epoch in range(1, EPOCH + 1):

        print('****' * 40)
        print('Epoch:[{0}/{1}][training stage]'.format(epoch, EPOCH))
        print('****' * 40)
        # train for one epoch
        train(train_loader, Model, criterion, optimizer, epoch)

        # evaluate on validation set
        print('****' * 40)
        print('Epoch:[{0}/{1}][validation stage]'.format(epoch, EPOCH))
        print('****' * 40)

        prec1, val_loss = validate(test_loader, Model, criterion, epoch)

        #Call lr_scheduler
        scheduler.step(val_loss)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch,
                'arch': 'resnet101',
                'state_dict': Model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best)