コード例 #1
0
ファイル: train.py プロジェクト: tunanzhichi/faceSwapProjects
                              batch_size=BATCH_SIZE,
                              num_workers=4,
                              shuffle=True)
val_dataloader = DataLoader(val_set,
                            batch_size=BATCH_SIZE,
                            num_workers=4,
                            shuffle=False)

# -------------------------- Model loading ------------------------------
model = BaseModel(IF_PRETRAINED=True)
model.to(device)
if MODEL_LOAD_PATH is not None:
    model.load_state_dict(torch.load(MODEL_LOAD_PATH)['model'])

# -------------------------- Optimizer loading --------------------------
optimizer = optim.Adam(model.parameters(), lr=LR)
lr_schduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                   factor=0.2,
                                                   patience=5)
if MODEL_LOAD_PATH is not None:
    optimizer.load_state_dict(torch.load(MODEL_LOAD_PATH)['optimizer'])

# ------------------------- Loss loading --------------------------------
camera_distance = 2.732
elevation = 0
azimuth = 0

renderer = sr.SoftRenderer(image_size=224,
                           sigma_val=1e-4,
                           aggr_func_rgb='hard',
                           camera_mode='look_at',
コード例 #2
0
train_metrics = init_metrics()
test_metrics = init_metrics()

print('--- SETTINGS ---')
print('Number of sentiments to classify:', args.num_sentiments)
print('Learning rate:', INITIAL_LR)
print('Num of epochs per fold:', NUM_EPOCHS)
print('Use gaze features:', args.use_gaze)

print('\n> Starting 10-fold CV.')
for k, (train_loader, test_loader) in enumerate(dataset.split_cross_val(10)):
    # initialize model and optimizer every fold
    model = BaseModel(lstm_units,
                      dataset.max_sentence_length, args.num_sentiments,
                      initial_word_embedding.clone(), args.use_gaze)
    optimizer = SGD(model.parameters(),
                    lr=INITIAL_LR,
                    momentum=0.95,
                    nesterov=True)

    # optimizer_scheduler = lr_scheduler.StepLR(
    #     optimizer, step_size=halve_lr_every_passes, gamma=0.5)
    if USE_CUDA:
        model = model.cuda()

    for e in range(NUM_EPOCHS):
        train_loss, train_results = iterate(train_loader)

    # save the training metrics of last epoch
    for metric, value in train_results.items():
        train_metrics[metric].append(value)
コード例 #3
0
    if args.train:
        # prepare dataloader
        train_loader = DataLoader(dataset=CustomData('train', dir_path=args.data_dir),
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True)

        val_loader = DataLoader(dataset=CustomData('val', dir_path=args.data_dir),
                                batch_size=args.batch_size,
                                num_workers=args.num_workers,
                                shuffle=False)
        # prepare optimizer
        optimizer = None
        if args.optim == 'sgd':
            optimizer = torch.optim.SGD([
                {'params': network.parameters()}
            ], lr=args.lr, weight_decay=1e-4, momentum=0.9)
        elif args.optim == 'adam':
            optimizer = torch.optim.Adam([
                {'params': network.parameters()}
            ], lr=args.lr, weight_decay=1e-5)

        scheduler = None
        if args.scheduler == 'poly':
            func = lambda epoch: (1 - epoch / args.n_epochs)**0.9  # poly
            scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, func)

        # prepare criterion(s)
        criterion_cls = nn.CrossEntropyLoss()

        # start training