コード例 #1
0
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':

        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count -
                             opt.niter) / float(opt.niter_decay + 1)
            return lr_l

        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer,
                                        step_size=opt.lr_decay_iters,
                                        gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                                   mode='min',
                                                   factor=0.2,
                                                   threshold=0.01,
                                                   patience=5)
    else:
        return NotImplementedError(
            'learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler
コード例 #2
0
else:
    optim = Optim(config.optim,
                  config.learning_rate,
                  config.max_grad_norm,
                  lr_decay=config.learning_rate_decay,
                  start_decay_at=config.start_decay_at)

if config.use_center_loss:
    optim.set_parameters(
        list(model.parameters()) + list(center_loss.parameters()))
else:
    optim.set_parameters(list(model.parameters()))

if config.schedule:
    # scheduler = L.CosineAnnealingLR(optim.optimizer, T_max=config.epoch)
    scheduler = L.StepLR(optim.optimizer, step_size=15, gamma=0.2)

# total number of parameters
param_count = 0
for param in model.parameters():
    param_count += param.view(-1).size()[0]

# logging modeule
if not os.path.exists(config.log):
    os.mkdir(config.log)
if opt.log == '':
    log_path = config.log + utils.format_time(time.localtime()) + '/'
else:
    log_path = config.log + opt.log + '/'
if not os.path.exists(log_path):
    os.mkdir(log_path)
コード例 #3
0
# Load model
model_ft = models.resnet18(pretrained=True)
# for param in model_ft.parameters():
#     param.requires_grad = False
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2)

model_ft.load_state_dict(torch.load('savedInitialModel.pt'))

# Data mining until we are satisfied with our accuracy
accuracyHistory = []

criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# optimizer_ft = optim.SGD(model_ft.fc.parameters(), lr=0.001, momentum=0.9)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)

acc = checkAccuracy(model_ft, criterion, optimizer_ft, exp_lr_scheduler)
accuracyHistory.append(acc)

cv2.waitKey(0)
dataMine = 1
while True:
    try:
        dataMine = int(raw_input("Continue data mining? (type 1) else (type 0)"))
    except ValueError:
        print("Please type either 1 to continue or 0 to exit.")
        continue
    else:
        if dataMine != 0 and dataMine != 1:
            print("Please type either 1 to continue or 0 to exit.")