Exemplo n.º 1
0
    model = MeshNet(cfg=cfg['MeshNet'],
                    num_classes=num_classes,
                    require_fea=True)
    model = nn.DataParallel(model)

    if 'pretrained' in cfg.keys():
        try:
            ret = model.load_state_dict(torch.load(cfg['pretrained']),
                                        strict=False)
        except RuntimeError as e:
            pass

    model = model.cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.AdamW(model.parameters(),
                            lr=cfg['lr'],
                            weight_decay=cfg['weight_decay'])
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=cfg['step_size'],
                                          gamma=cfg['gamma'])
    metrics = [
        AccuracyMetric(),
        BalancedAccuracyMetric(num_classes=num_classes),
        F1ScoreMetric(n_classes=num_classes),
    ]

    if not os.path.exists(cfg['saved_path']):
        os.mkdir(cfg['saved_path'])

    best_values = train_model(model, metrics, criterion, optimizer, scheduler,
Exemplo n.º 2
0
                print('{} Loss: {:.4f}'.format(phrase, epoch_loss))

        save_loss_plot(train_losses, val_losses, root_path)

    return best_model_wts


if __name__ == '__main__':

    model = MeshNet(cfg=cfg['MeshNet'], require_fea=True)
    if use_gpu:
        model.cuda()
    model = nn.DataParallel(model)
    #model.load_state_dict(torch.load(os.path.join(root_path, cfg['ckpt_root'], 'MeshNet_best.pkl')))
    criterion = nn.L1Loss()
    optimizer = optim.SGD(model.parameters(),
                          lr=cfg['lr'],
                          momentum=cfg['momentum'],
                          weight_decay=cfg['weight_decay'])
    #optimizer = optim.Adam(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=cfg['milestones'],
                                               gamma=cfg['gamma'])

    for f in os.listdir(root_path + '/ckpt_root/'):
        os.remove(os.path.join(root_path + '/ckpt_root/', f))

    best_model_wts = train_model(model, criterion, optimizer, scheduler, cfg)
    torch.save(best_model_wts,
               os.path.join(root_path, cfg['ckpt_root'], 'MeshNet_best.pkl'))
Exemplo n.º 3
0
    return best_model_wts


if __name__ == '__main__':

    # prepare model
    model = MeshNet(cfg=cfg['MeshNet'], require_fea=True)
    model.cuda()
    model = nn.DataParallel(model)

    # criterion
    criterion = nn.CrossEntropyLoss()

    # optimizer
    if cfg['optimizer'] == 'sgd':
        optimizer = optim.SGD(model.parameters(), lr=cfg['lr'], momentum=cfg['momentum'], weight_decay=cfg['weight_decay'])
    else:
        optimizer = optim.AdamW(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])
    
    # scheduler
    if cfg['scheduler'] == 'step':
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg['milestones'])
    else:
        scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg['max_epoch'])

    # start training
    if not os.path.exists(cfg['ckpt_root']):
        os.mkdir(cfg['ckpt_root'])
    best_model_wts = train_model(model, criterion, optimizer, scheduler, cfg)
    torch.save(best_model_wts, os.path.join(cfg['ckpt_root'], 'MeshNet_best.pkl'))
Exemplo n.º 4
0
                    np.concatenate(labels_list, axis=None),
                    np.concatenate(preds_list, axis=None))
                print(clsf_rpt)
                writer.add_scalar('data/test_loss', epoch_loss, epoch)
                writer.add_scalar('data/test_acc', epoch_acc, epoch)

    writer.close()
    return best_model_wts


if __name__ == '__main__':

    model = MeshNet(cfg=cfg['MeshNet'], require_fea=True)
    model.cuda()
    model = nn.DataParallel(model)
    num_total_params = sum(p.numel() for p in model.parameters())
    num_trainable_params = sum(p.numel() for p in model.parameters()
                               if p.requires_grad)
    print('Number of total paramters: %d, number of trainable parameters: %d' %
          (num_total_params, num_trainable_params))

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=cfg['lr'],
                          momentum=cfg['momentum'],
                          weight_decay=cfg['weight_decay'])
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=cfg['milestones'],
                                               gamma=cfg['gamma'])

    t = time.time()