Exemple #1
0
    ])

    video_transforms = functools.partial(video_transform,
                                         image_transform=image_transforms)

    video_length = int(args['--video_length'])
    image_batch = int(args['--image_batch'])
    video_batch = int(args['--video_batch'])

    dim_z_content = int(args['--dim_z_content'])
    dim_z_motion = int(args['--dim_z_motion'])
    dim_z_category = int(args['--dim_z_category'])

    # dataset = data.VideoFolderDataset(args['<dataset>'], cache=os.path.join(args['<dataset>'], 'local.db'))
    dataset = data.VideoFolderDataset(args['<dataset>'], cache=None)
    image_dataset = data.ImageDataset(dataset, image_transforms)
    image_loader = DataLoader(image_dataset,
                              batch_size=image_batch,
                              drop_last=True,
                              num_workers=2,
                              shuffle=True)

    video_dataset = data.VideoDataset(dataset, 16, 2, video_transforms)
    video_loader = DataLoader(video_dataset,
                              batch_size=video_batch,
                              drop_last=True,
                              num_workers=2,
                              shuffle=True)

    generator = models.VideoGenerator(n_channels, dim_z_content,
                                      dim_z_category, dim_z_motion,
Exemple #2
0
else:
    logging.info('Saving model parameters to %s...' % params_path)

    with open(params_path, 'w') as f:
        json.dump(params, f)

logging.info('Loading training dataset...')

train_set = data.PatchDataset(params['train_partitions'], params['batch_size'],
                              params['temporal_patch_size'],
                              params['spatial_patch_size'],
                              params['spatial_stride'])

logging.info('Loading validation dataset...')

validation_set = data.ImageDataset(params['validation_partitions'],
                                   params['temporal_patch_size'])

logging.info('Loading test dataset...')

test_set = data.ImageDataset(params['test_partitions'],
                             params['temporal_patch_size'])

inputs = tf.placeholder(tf.float32)
ground_truth = tf.placeholder(tf.float32)
global_step = tf.Variable(0, trainable=False, name='global_step')
network = get_network(inputs, params)
base_loss = tf.losses.mean_squared_error(network.outputs, ground_truth)
weight_loss = params['weight_decay'] * tf.reduce_sum(
    tf.stack([tf.nn.l2_loss(weight) for weight in network.weights]))
loss = base_loss + weight_loss
Exemple #3
0
        lambda x: x[:n_channels, ::],
        transforms.Normalize((0.5, 0.5, .5), (0.5, 0.5, 0.5)),
    ])

    video_transforms = functools.partial(video_transform, image_transform=image_transforms)

    video_length = int(args['--video_length'])
    image_batch = int(args['--image_batch'])
    video_batch = int(args['--video_batch'])

    dim_z_content = int(args['--dim_z_content'])
    dim_z_motion = int(args['--dim_z_motion'])
    dim_z_category = int(args['--dim_z_category'])

    dataset = data.VideoFolderDataset(args['<dataset>'], cache=os.path.join(args['<dataset>'], 'local.db'))
    image_dataset = data.ImageDataset(dataset,audio_dir, image_transforms)
    image_loader = DataLoader(image_dataset, batch_size=image_batch, drop_last=True, num_workers=6, shuffle=True)
    print('args[<dataset>',args['<dataset>'])
    print('args[<dataset_test>',args['<dataset_test>'])
    dataset_test = data_test.VideoFolderDataset(args['<dataset_test>'], cache=os.path.join(args['<dataset_test>'], 'local_test.db'))
    image_dataset_test = data_test.ImageDataset(dataset_test,audio_dir_test, image_transforms)
    image_loader_test = DataLoader(image_dataset_test, batch_size=image_batch, drop_last=True, num_workers=6, shuffle=False)


    ImageModel = models.ImageConvNet().cuda()

    audio_encoder = WaveGANDiscriminator512(model_size=model_size, ngpus=ngpus)

    # video_dataset = data.VideoDataset(dataset, 16, 2, video_transforms)
    # video_loader = DataLoader(video_dataset, batch_size=video_batch, drop_last=True, num_workers=6, shuffle=True)