Exemple #1
0
def main():
    args = get_args(mode='demo')

    model_name = args.model.lower()
    print('==========Model===========')
    print(f'model name: {model_name}')
    model = MODEL_DICT[model_name](args)

    model.load(args.pretrained)

    print('\n========Generating========')
    dataset = DemoDataset(img_channels=args.img_channels,
                          r_mode=args.r_mode,
                          pre_upscale=args.pre_upscale,
                          upscale=args.upscale)

    lr_img = imgread(args.input, mode='unchanged')
    lr_img = dataset.forward(lr_img).to(model.device)

    model.model.eval()
    output = model.model(lr_img)

    output = dataset.backward(output)
    imgwrite(args.output, output)
    print(f'Finish! Saving at {args.output}')
Exemple #2
0
def main():
    args = get_args()
    print(args)
    torch.manual_seed(args.seed)
    trainer = TemporalTrainer(args)
    print('Starting Epoch:', trainer.args.start_epoch)
    print('Total Epoches:', trainer.args.epochs)
    for epoch in range(trainer.args.start_epoch, trainer.args.epochs):
        trainer.training(epoch)
        if not trainer.args.no_val and epoch % args.eval_interval == (args.eval_interval - 1):
            trainer.validation(epoch)

    trainer.writer.close()
Exemple #3
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    args = get_args()
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    trainer = MultiViewTrainer(args)
    print('Starting Epoch:', trainer.args.start_epoch)
    print('Total Epoches:', trainer.args.epochs)

    for epoch in range(trainer.args.start_epoch, trainer.args.epochs):
        trainer.training(epoch)
        if not trainer.args.no_val and epoch % args.eval_interval == (
                args.eval_interval - 1):
            trainer.validation(epoch)

    trainer.writer.close()
Exemple #4
0
def main():
    args = get_args()

    model = get_model()

    # precompute validation Features
    valid_dataset = FaceDataset(args.data_dir,
                                "valid",
                                img_size=cfg.MODEL.IMG_SIZE,
                                augment=False)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=1,
                              shuffle=True,
                              num_workers=cfg.TRAIN.WORKERS,
                              drop_last=True)

    valid_features, valid_labels = preconvfeat(valid_loader, model)

    with open('valid_features.pkl', 'wb') as f:
        pickle.dump(valid_features, f)

    with open('valid_labels.pkl', 'wb') as f:
        pickle.dump(valid_labels, f)

    # precompute training Features
    train_dataset = FaceDataset(args.data_dir,
                                "train",
                                img_size=cfg.MODEL.IMG_SIZE,
                                augment=True,
                                age_stddev=cfg.TRAIN.AGE_STDDEV)
    train_loader = DataLoader(train_dataset,
                              batch_size=1,
                              shuffle=True,
                              num_workers=cfg.TRAIN.WORKERS,
                              drop_last=True)

    train_features, train_labels = preconvfeat(train_loader, model)

    with open('train_features.pkl', 'wb') as f:
        pickle.dump(train_features, f)
    with open('train_labels.pkl', 'wb') as f:
        pickle.dump(train_labels, f)
Exemple #5
0
def main():
    args = get_args(mode='val')
    os.environ["CUDA_VISIBLE_DEVICES"] = args.ngpu

    model_name = args.model.lower()
    print('==========Model===========')
    print(f'model name: {model_name}')
    model = MODEL_DICT[model_name](args)

    model.load(args.pretrained)
    model.tomultigpu()

    valloader = loaddata(args, train=False)
    model.fit(valloader=valloader)

    print('\n========Evaluating========')
    avg_ssim, avg_psnr = model.eval()
    print(
        f'Finish evaluate! avg_psnr: {avg_psnr:.010f}, avg_ssim: {avg_ssim:.010f}'
    )
Exemple #6
0
def main():
    args = get_args()
    n_runs = 50

    ouput_path = os.path.join(args.checkpoint_path, args.task, args.language,
                              args.representation)
    results_fname = os.path.join(ouput_path, 'all_results.txt')
    done_fname = os.path.join(ouput_path, 'finished.txt')

    curr_iter = util.file_len(results_fname) - 1
    util.mkdir(ouput_path)

    if curr_iter == -1:
        res_columns = [
            'hidden_size', 'nlayers', 'dropout', 'pca_size', 'train_loss',
            'dev_loss', 'test_loss', 'train_acc', 'dev_acc', 'test_acc'
        ]
        append_result(results_fname, res_columns)
        curr_iter = 0

    search = get_hyperparameters_search(n_runs, args.representation)

    for hyper in tqdm(search[curr_iter:], initial=curr_iter, total=n_runs):
        hyperparameters = get_hyperparameters(hyper)

        my_env = os.environ.copy()
        cmd = ['python', 'src/h02_learn/train.py'
               ] + args2list(args) + hyperparameters
        tqdm.write(str(hyperparameters))
        process = subprocess.Popen(cmd,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   env=my_env)
        out, err = process.communicate()

        results = get_results(out, err)
        append_result(results_fname, [str(x) for x in hyper] + results)

    write_done(done_fname)
Exemple #7
0
        img = np.transpose(img, (1, 2, 0))
        imgs_np.append(img)

        img = Image.fromarray(img)
        imgs.append(img)

    return imgs, imgs_np, masks
    # return imgs, imgs_np, masks, flow

def signal_handler(sig, frame, video_writer):
    print('You pressed Ctrl+C!')
    video_writer.release()
    sys.exit(0)

if __name__ == "__main__":
    args = get_args()
    model = load_model(args, nclass=11, temporal=args.demo_temporal)

    if args.demo_img_folder is not None:
        # rgb_demo_dataset = DeepSightDemoRGB(args.demo_img_folder)
        rgb_demo_dataset = DeepSightDemoDepth(args.demo_img_folder)
        data_loader = DataLoader(rgb_demo_dataset, batch_size=32, shuffle=True)
        pred_dir = os.path.join(args.demo_img_folder, "pred")
        transform_dir = os.path.join(args.demo_img_folder, "transform")
        create_directory(pred_dir)
        create_directory(transform_dir)
        for i, sample in enumerate(tqdm(data_loader)):
            image, target, names = sample['image'], sample['label'], sample['id']
            imgs, imgs_np, masks, flow = inference(image, model)
            save_image(flow, os.path.join(pred_dir, "flow.png"))
            for i in range(len(imgs)):
Exemple #8
0
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()
    start_epoch = 0
    checkpoint_dir = Path(args.checkpoint)
    checkpoint_dir.mkdir(parents=True, exist_ok=True)

    # display nb of workers
    print(f"number of train workers {cfg.TRAIN.WORKERS}")

    # fetch features
    with open('train_features.pkl', 'rb') as f:
        train_features = pickle.load(f)
    with open('train_labels.pkl', 'rb') as f:
        train_labels = pickle.load(f)

    with open('valid_features.pkl', 'rb') as f:
        valid_features = pickle.load(f)
    with open('valid_labels.pkl', 'rb') as f:
        valid_labels = pickle.load(f)

    n_features = train_features.shape[1] * train_features.shape[
        2] * train_features.shape[3]
    n_classes = 101

    # create model
    if cfg.MODEL.ARCH_STYLE == 'classifier':
        print("=> creating classifier")
        model = get_classifier(n_features=n_features, n_classes=n_classes)
    else:
        print("=> creating regressor")
        model = get_regressor(n_features=n_features, n_classes=n_classes)

    # Create optimizer
    if cfg.TRAIN.OPT == "sgd":
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=cfg.TRAIN.LR,
                                    momentum=cfg.TRAIN.MOMENTUM,
                                    weight_decay=cfg.TRAIN.WEIGHT_DECAY)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=cfg.TRAIN.LR)

    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # GPU config
    if args.multi_gpu:
        model = nn.DataParallel(model)

    if device == "cuda":
        cudnn.benchmark = True

    # criterion
    if cfg.MODEL.ARCH_STYLE == 'classifier':
        if cfg.MODEL.SMOOTHING == True:
            print("=> using label smoothing")
            criterion = LabelSmoothingLoss(
                std_smoothing=cfg.MODEL.STD_SMOOTHING,
                n_classes=n_classes).to(device)
        else:
            criterion = nn.CrossEntropyLoss().to(device)
    else:
        if cfg.MODEL.ALEATORIC:
            criterion = HeteroscedasticGaussianLoss().to(device)
        else:
            criterion = nn.L1Loss(reduction="sum").to(device)

    # loaders
    train_loader = get_feature_loader(train_features,
                                      train_labels,
                                      batch_size=cfg.TEST.BATCH_SIZE,
                                      shuffle=True,
                                      num_workers=cfg.TRAIN.WORKERS,
                                      drop_last=True)

    val_loader = get_feature_loader(valid_features,
                                    valid_labels,
                                    batch_size=cfg.TEST.BATCH_SIZE,
                                    shuffle=False,
                                    num_workers=cfg.TRAIN.WORKERS,
                                    drop_last=False)

    scheduler = StepLR(optimizer,
                       step_size=cfg.TRAIN.LR_DECAY_STEP,
                       gamma=cfg.TRAIN.LR_DECAY_RATE,
                       last_epoch=start_epoch - 1)
    best_val_mae = 10000.0
    train_writer = None

    if args.tensorboard is not None:
        opts_prefix = "_".join(args.opts)
        train_writer = SummaryWriter(log_dir=args.tensorboard + "/" +
                                     opts_prefix + "_train")
        val_writer = SummaryWriter(log_dir=args.tensorboard + "/" +
                                   opts_prefix + "_val")

    print('=> Start training')
    for epoch in range(start_epoch, cfg.TRAIN.EPOCHS):
        # train
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, device)

        # validate
        val_loss, val_acc, val_mae = validate(val_loader, model, criterion,
                                              epoch, device)

        if args.tensorboard is not None:
            train_writer.add_scalar("loss", train_loss, epoch)
            train_writer.add_scalar("acc", train_acc, epoch)
            val_writer.add_scalar("loss", val_loss, epoch)
            val_writer.add_scalar("acc", val_acc, epoch)
            val_writer.add_scalar("mae", val_mae, epoch)

        # checkpoint
        if val_mae < best_val_mae:
            print(
                f"=> [epoch {epoch:03d}] best val mae was improved from {best_val_mae:.3f} to {val_mae:.3f}"
            )
            model_state_dict = model.module.state_dict(
            ) if args.multi_gpu else model.state_dict()
            torch.save(
                {
                    'epoch': epoch + 1,
                    'arch': cfg.MODEL.ARCH,
                    'state_dict': model_state_dict,
                    'optimizer_state_dict': optimizer.state_dict()
                },
                str(
                    checkpoint_dir.joinpath(
                        "epoch{:03d}_{:.5f}_{:.4f}.pth".format(
                            epoch, val_loss, val_mae))))
            best_val_mae = val_mae
        else:
            print(
                f"=> [epoch {epoch:03d}] best val mae was not improved from {best_val_mae:.3f} ({val_mae:.3f})"
            )

        # adjust learning rate
        scheduler.step()

    print("=> training finished")
    print(f"additional opts: {args.opts}")
    print(f"best val mae: {best_val_mae:.3f}")