def main():
    args = parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = 'cuda' if use_cuda else 'cpu'

    with open(args.config, 'r') as f:
        config = yaml.safe_load(f)

    window_size = config['model']['window_size']

    series_size = 1 \
        if config['model']['architecture'] != 'sequential' else \
        config['model'][config['model']['architecture']]['series_size']

    denoising_model = build_model_from_config(config)

    model = Model(denoising_model, device)

    print('Count of model trainable parameters: {}'.format(
        model.get_parameters_count()))

    model.load(args.model_weights)

    print('Model out shape: {}'.format(
        model.model(*tuple([
            torch.FloatTensor(np.zeros((1, 3, window_size,
                                        window_size))).to(device)
        ] * series_size)).shape))

    video_source = VideoFramesGenerator(args.input_video)

    fps = video_source.get_fps()
    frame_height, frame_width = video_source.get_resolution()

    out_video = cv2.VideoWriter(args.output_video,
                                cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                                fps, (frame_width, frame_height))

    for i in tqdm.tqdm(range(len(video_source))):
        frame = video_source.get_next_frame()['frame']
        predicted_frame = model.predict(frame, window_size=window_size)
        predicted_frame = cv2.cvtColor(predicted_frame, cv2.COLOR_RGB2BGR)
        out_video.write(predicted_frame)

    out_video.release()
def main():
    args = parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = 'cuda' if use_cuda else 'cpu'

    with open(args.config, 'r') as f:
        config = yaml.safe_load(f)

    window_size = config['model']['window_size']

    series_size = 1 \
        if config['model']['architecture'] != 'sequential' else \
        config['model'][config['model']['architecture']]['series_size']

    denoising_model = build_model_from_config(config)

    model = Model(denoising_model, device)

    print('Count of model trainable parameters: {}'.format(
        model.get_parameters_count()))

    model.load(args.model_weights)

    print('Model out shape: {}'.format(
        model.model.inference(*tuple([
            torch.FloatTensor(np.zeros((1, 3, window_size,
                                        window_size))).to(device)
        ] * series_size)).shape))

    inp_image = np.array(Image.open(args.input_image).convert('RGB'))
    start_time = time()
    out_image = model.predict(image=inp_image,
                              window_size=window_size,
                              batch_size=args.batch_size,
                              verbose=True)
    finish_time = time()
    Image.fromarray(out_image).save(args.output_image)

    print('Inference time: {:.2f} sec'.format(finish_time - start_time))
Exemple #3
0
def main():
    args = parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = 'cuda' if use_cuda else 'cpu'

    with open(args.config, 'r') as f:
        config = yaml.safe_load(f)

    assert (config['model']['architecture'] != 'sequential') or \
           (
                   config['model']['architecture'] == 'sequential' and
                   config['dataset']['type'] == 'sequential'
           ) or \
           (
                   config['dataset']['type'] == 'pair' and
                   config['model']['architecture'] == 'sequential' and
                   config['model']['sequential']['series_size'] == 1
           )

    batch_size = config['train']['batch_size']
    n_jobs = config['train']['number_of_processes']
    epochs = config['train']['epochs']
    window_size = config['model']['window_size']

    if not os.path.isdir(config['train']['save']['model']):
        os.makedirs(config['train']['save']['model'])

    copyfile(
        args.config,
        os.path.join(config['train']['save']['model'],
                     os.path.basename(args.config)))

    optimizers = {
        'adam': torch.optim.Adam,
        'nadam': Nadam,
        'radam': Radam,
        'sgd': torch.optim.SGD,
    }

    losses = {
        'mse':
        F.mse_loss,
        'l1':
        F.l1_loss,
        'fourier_loss':
        FourierImagesLoss(loss_sum_coeffs=(1, 0.5),
                          image_shape=(window_size, window_size),
                          four_normalized=True)
    }

    denoising_model = build_model_from_config(config)

    model = Model(denoising_model,
                  device,
                  distributed_learning=config['train']['distribution_learning']
                  ['enable'],
                  distributed_devices=config['train']['distribution_learning']
                  ['devices'])

    callbacks = []

    callbacks.append(
        SaveModelPerEpoch(
            os.path.join(os.path.dirname(__file__),
                         config['train']['save']['model']),
            config['train']['save']['every']))

    callbacks.append(
        SaveOptimizerPerEpoch(
            os.path.join(os.path.dirname(__file__),
                         config['train']['save']['model']),
            config['train']['save']['every']))

    if config['visualization']['use_visdom']:
        plots = VisPlot('Image denoising train',
                        server=config['visualization']['visdom_server'],
                        port=config['visualization']['visdom_port'])

        plots.register_scatterplot('train loss per_batch', 'Batch number',
                                   'Loss', [
                                       '{} between predicted and ground truth'
                                       ''.format(config['train']['loss']),
                                       '{} between predicted and input'
                                       ''.format(config['train']['loss'])
                                   ])

        plots.register_scatterplot(
            'train validation loss per_epoch', 'Batch number', 'Loss', [
                '{} train loss'.format(config['train']['loss']),
                'double {} train loss'.format(config['train']['loss'])
            ])

        callbacks.append(plots)

        callbacks.append(
            VisImageForAE('Image visualisation',
                          config['visualization']['visdom_server'],
                          config['visualization']['visdom_port'],
                          config['visualization']['image']['every'],
                          scale=config['visualization']['image']['scale']))

    model.set_callbacks(callbacks)

    series_size = 1 \
        if config['model']['architecture'] != 'sequential' else \
        config['model'][config['model']['architecture']]['series_size']

    dataset_loader = build_dataset_from_config(config)

    start_epoch = 0
    if config['train']['optimizer'] != 'sgd':
        optimizer = optimizers[config['train']['optimizer']](
            model.model.parameters(),
            lr=config['train']['lr'],
            weight_decay=config['train']['weight_decay'])
    else:
        optimizer = torch.optim.SGD(
            model.model.parameters(),
            lr=config['train']['lr'],
            weight_decay=config['train']['weight_decay'],
            momentum=0.9,
            nesterov=True)

    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           patience=2,
                                                           verbose=True)

    if config['train']['load_model'] or config['train']['load_optimizer']:
        weight_path, optim_path, start_epoch = get_last_epoch_weights_path(
            os.path.join(os.path.dirname(__file__),
                         config['train']['save']['model']), print)

        if weight_path is not None:
            if config['train']['load_model']:
                model.load(weight_path)

            if config['train']['load_optimizer']:
                optimizer.load_state_dict(
                    torch.load(optim_path, map_location='cpu'))

    train_data = DataLoader(dataset_loader,
                            batch_size=batch_size,
                            num_workers=n_jobs,
                            drop_last=True)

    print('Count of model trainable parameters: {}'.format(
        model.get_parameters_count()))

    print('Model out shape: {}'.format(
        model.model(*tuple([
            torch.FloatTensor(np.zeros((1, 3, window_size,
                                        window_size))).to(device)
        ] * series_size)).shape))

    model.fit(train_data, (optimizer, scheduler),
              epochs,
              losses[config['train']['loss']],
              init_start_epoch=start_epoch + 1,
              validation_loader=None,
              is_epoch_scheduler=False)