def main():
    args = parse_args()
    update_config(cfg, args)
    if args.load_path is None:
        raise AttributeError("Please specify load path.")

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    # Set the random seed manually for reproducibility.
    np.random.seed(cfg.SEED)
    torch.manual_seed(cfg.SEED)
    torch.cuda.manual_seed_all(cfg.SEED)

    # model and optimizer
    if cfg.MODEL.NAME == 'model':
        if args.load_path and os.path.exists(args.load_path):
            checkpoint = torch.load(args.load_path)
            genotype = checkpoint['genotype']
        else:
            raise AssertionError('Please specify the model to evaluate')
        model = Network(cfg.MODEL.INIT_CHANNELS, cfg.MODEL.NUM_CLASSES, cfg.MODEL.LAYERS, genotype)
        model.drop_path_prob = 0.0
    else:
        model = eval('resnet.{}(num_classes={})'.format(cfg.MODEL.NAME, cfg.MODEL.NUM_CLASSES))
    model = model.cuda()

    # resume && make log dir and logger
    if args.load_path and os.path.exists(args.load_path):
        checkpoint = torch.load(args.load_path)

        # load checkpoint
        model.load_state_dict(checkpoint['state_dict'])
        args.path_helper = checkpoint['path_helper']

        logger = create_logger(os.path.dirname(args.load_path))
        logger.info("=> loaded checkpoint '{}'".format(args.load_path))
    else:
        raise AssertionError('Please specify the model to evaluate')
    logger.info(args)
    logger.info(cfg)

    # dataloader
    test_dataset_verification = VoxcelebTestset(
        Path(cfg.DATASET.DATA_DIR), cfg.DATASET.PARTIAL_N_FRAMES
    )
    test_loader_verification = torch.utils.data.DataLoader(
        dataset=test_dataset_verification,
        batch_size=1,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=False,
        drop_last=False,
    )

    validate_verification(cfg, model, test_loader_verification)
Example #2
0
def main():
    args = parse_args()
    update_config(cfg, args)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    # Set the random seed manually for reproducibility.
    np.random.seed(cfg.SEED)
    torch.manual_seed(cfg.SEED)
    torch.cuda.manual_seed_all(cfg.SEED)

    # Loss
    criterion = CrossEntropyLoss(cfg.MODEL.NUM_CLASSES).cuda()

    # load arch
    genotype = eval(args.text_arch)

    model = Network(cfg.MODEL.INIT_CHANNELS, cfg.MODEL.NUM_CLASSES,
                    cfg.MODEL.LAYERS, genotype)
    model = model.cuda()

    optimizer = optim.Adam(
        model.parameters(),
        lr=cfg.TRAIN.LR,
        weight_decay=cfg.TRAIN.WD,
    )

    # resume && make log dir and logger
    if args.load_path and os.path.exists(args.load_path):
        checkpoint_file = os.path.join(args.load_path, 'Model',
                                       'checkpoint_best.pth')
        assert os.path.exists(checkpoint_file)
        checkpoint = torch.load(checkpoint_file)

        # load checkpoint
        begin_epoch = checkpoint['epoch']
        last_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        best_acc1 = checkpoint['best_acc1']
        optimizer.load_state_dict(checkpoint['optimizer'])
        args.path_helper = checkpoint['path_helper']

        logger = create_logger(args.path_helper['log_path'])
        logger.info("=> loaded checkloggpoint '{}'".format(checkpoint_file))
    else:
        exp_name = args.cfg.split('/')[-1].split('.')[0]
        args.path_helper = set_path('logs_scratch', exp_name)
        logger = create_logger(args.path_helper['log_path'])
        begin_epoch = cfg.TRAIN.BEGIN_EPOCH
        best_acc1 = 0.0
        last_epoch = -1
    logger.info(args)
    logger.info(cfg)
    logger.info(f"selected architecture: {genotype}")
    logger.info("Number of parameters: {}".format(count_parameters(model)))

    # copy model file
    this_dir = os.path.dirname(__file__)
    shutil.copy2(os.path.join(this_dir, './models', cfg.MODEL.NAME + '.py'),
                 args.path_helper['ckpt_path'])

    # dataloader
    train_dataset = DeepSpeakerDataset(Path(cfg.DATASET.DATA_DIR),
                                       cfg.DATASET.PARTIAL_N_FRAMES, 'train')
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )
    test_dataset = DeepSpeakerDataset(Path(cfg.DATASET.DATA_DIR),
                                      cfg.DATASET.PARTIAL_N_FRAMES,
                                      'test',
                                      is_test=True)
    test_loader = torch.utils.data.DataLoader(
        dataset=test_dataset,
        batch_size=1,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )

    # training setting
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': begin_epoch * len(train_loader),
        'valid_global_steps': begin_epoch // cfg.VAL_FREQ,
    }

    # training loop
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        cfg.TRAIN.END_EPOCH,
        cfg.TRAIN.LR_MIN,
        last_epoch=last_epoch)

    for epoch in tqdm(range(begin_epoch, cfg.TRAIN.END_EPOCH),
                      desc='train progress'):
        model.train()
        model.drop_path_prob = cfg.MODEL.DROP_PATH_PROB * epoch / cfg.TRAIN.END_EPOCH

        train_from_scratch(cfg, model, optimizer, train_loader, criterion,
                           epoch, writer_dict)

        if epoch % cfg.VAL_FREQ == 0 or epoch == cfg.TRAIN.END_EPOCH - 1:
            acc = validate_identification(cfg, model, test_loader, criterion)

            # remember best acc@1 and save checkpoint
            is_best = acc > best_acc1
            best_acc1 = max(acc, best_acc1)

            # save
            logger.info('=> saving checkpoint to {}'.format(
                args.path_helper['ckpt_path']))
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                    'path_helper': args.path_helper,
                    'genotype': genotype,
                }, is_best, args.path_helper['ckpt_path'],
                'checkpoint_{}.pth'.format(epoch))

        lr_scheduler.step(epoch)
Example #3
0
train_loaders = [None] * len(HEADS)

for i in range(len(HEADS)):
    train_datasets[i] = loadedDataset(train_dir, HEADS[i], args.frame_num, args.time_freq, args.patch_size)
    train_loaders[i] = torch.utils.data.DataLoader(
        train_datasets[i], batch_size=args.batch_size, shuffle=True, pin_memory=True)

model = Network(in_channels=3, out_channels=3).cuda()
rmse_loss = RMSE_Loss()
criterion = nn.MSELoss()

if os.path.exists(os.path.join(save_dir, 'checkpoint.pth.tar')):
    # load existing model
    print('==> loading existing model')
    model_info = torch.load(os.path.join(save_dir, 'checkpoint.pth.tar'))
    model.load_state_dict(model_info['state_dict'])
    optimizer = torch.optim.Adam(model.parameters())
    optimizer.load_state_dict(model_info['optimizer'])
    cur_epoch = model_info['epoch']
else:
    if not os.path.isdir(save_dir):
        os.makedirs(save_dir)
    # create model
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    cur_epoch = 0


for epoch in range(cur_epoch, args.epochs + 1):
    rmse = AverageMeter()
    losses = AverageMeter()