Esempio n. 1
0
        network_output_downscale = 4
    else:
        validation_set = 0
        output_downscale = 8
        path = '../../dataset/ST_partA_' + args.dataset.replace('parta_',
                                                                '') + '/'

    model_save_dir = './models_BSDR'

    batch_size = args.batch_size
    count_density_threshold = str(
        args.count_thresh).split(',') if args.count_thresh != '' else []
    count_density_threshold = list(
        map(lambda x: float(x), count_density_threshold))

    num_density_categories = len(count_density_threshold) + 1
    dataset = CrowdDataset(path,
                           name=args.dataset,
                           valid_set_size=validation_set,
                           gt_downscale_factor=output_downscale,
                           density_map_sigma=density_map_sigma,
                           image_size_multiple=output_downscale *
                           network_output_downscale,
                           image_size_min=image_size_min,
                           image_crop_size=image_crop_size)
    #print(dataset.data_files['test_valid'], len(dataset.data_files['test_valid']))
    print(dataset.data_files['train'], len(dataset.data_files['train']))

    # -- Train the model
    train_network()
Esempio n. 2
0
from model import Generator, JointCNN, load_trainer, save_trainer

train_transform = torchvision.transforms.Compose([
    transforms.RandomlySelectPatchAndRescale(),
    transforms.RandomHorizontalFlip(),
    transforms.NegativeOneToOneNormalizeImage(),
    transforms.NumpyArraysToTorchTensors()
])
validation_transform = torchvision.transforms.Compose([
    transforms.RandomlySelectPatchAndRescale(),
    transforms.NegativeOneToOneNormalizeImage(),
    transforms.NumpyArraysToTorchTensors()
])

train_dataset = CrowdDataset(settings.train_dataset_path,
                             'train',
                             transform=train_transform)
train_dataset_loader = torch.utils.data.DataLoader(
    train_dataset,
    batch_size=settings.batch_size,
    shuffle=True,
    num_workers=settings.number_of_data_loader_workers)
validation_dataset = CrowdDataset(settings.validation_dataset_path,
                                  'validation',
                                  transform=validation_transform)
validation_dataset_loader = torch.utils.data.DataLoader(
    validation_dataset,
    batch_size=settings.batch_size,
    shuffle=False,
    num_workers=settings.number_of_data_loader_workers)
Esempio n. 3
0
    torch.manual_seed(11)
    torch.backends.cudnn.enabled = False
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.cuda.manual_seed(11)
    torch.cuda.manual_seed_all(11)

    # -- Dataset paths
    if args.dataset == "parta":
        validation_set = 30
        path = '../dataset/ST_partA/'
        output_downscale = 4
    elif args.dataset == "ucfqnrf":
        validation_set = 240
        output_downscale = 4
        path = '../dataset/UCF-QNRF_ECCV18/'

    model_save_dir = './models'
    batch_size = args.batch_size

    dataset = CrowdDataset(path,
                           name=args.dataset,
                           valid_set_size=validation_set,
                           gt_downscale_factor=output_downscale)
    print(dataset.data_files['test_valid'],
          len(dataset.data_files['test_valid']))
    print(dataset.data_files['train'], len(dataset.data_files['train']))

    # -- Train the model
    train_network()
Esempio n. 4
0
def train(settings=None):
    """Main script for training the semi-supervised GAN."""
    if not settings:
        settings = Settings()
    train_transform = torchvision.transforms.Compose([transforms.RandomlySelectPatchAndRescale(),
                                                      transforms.RandomHorizontalFlip(),
                                                      transforms.NegativeOneToOneNormalizeImage(),
                                                      transforms.NumpyArraysToTorchTensors()])
    validation_transform = torchvision.transforms.Compose([transforms.RandomlySelectPatchAndRescale(),
                                                           transforms.NegativeOneToOneNormalizeImage(),
                                                           transforms.NumpyArraysToTorchTensors()])

    train_dataset = CrowdDataset(settings.train_dataset_path, 'train', transform=train_transform)
    train_dataset_loader = torch.utils.data.DataLoader(train_dataset, batch_size=settings.batch_size, shuffle=True,
                                                       num_workers=settings.number_of_data_loader_workers)
    validation_dataset = CrowdDataset(settings.validation_dataset_path, 'validation', transform=validation_transform)
    validation_dataset_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=settings.batch_size,
                                                            shuffle=False,
                                                            num_workers=settings.number_of_data_loader_workers)

    gan = GAN()
    gpu(gan)
    D = gan.D
    G = gan.G
    P = gan.P
    gpu(P)
    discriminator_optimizer = Adam(D.parameters())
    generator_optimizer = Adam(G.parameters())
    predictor_optimizer = Adam(P.parameters())

    step = 0
    epoch = 0

    if settings.load_model_path:
        d_model_state_dict, d_optimizer_state_dict, epoch, step = load_trainer(prefix='discriminator')
        D.load_state_dict(d_model_state_dict)
        discriminator_optimizer.load_state_dict(d_optimizer_state_dict)
    discriminator_optimizer.param_groups[0].update({'lr': 1e-4, 'weight_decay': settings.weight_decay})
    if settings.load_model_path:
        g_model_state_dict, g_optimizer_state_dict, _, _ = load_trainer(prefix='generator')
        G.load_state_dict(g_model_state_dict)
        generator_optimizer.load_state_dict(g_optimizer_state_dict)
    generator_optimizer.param_groups[0].update({'lr': 1e-4})

    running_scalars = defaultdict(float)
    validation_running_scalars = defaultdict(float)
    running_example_count = 0
    datetime_string = datetime.datetime.now().strftime("y%Ym%md%dh%Hm%Ms%S")
    trial_directory = os.path.join(settings.log_directory, settings.trial_name + ' ' + datetime_string)
    os.makedirs(trial_directory, exist_ok=True)
    summary_writer = SummaryWriter(os.path.join(trial_directory, 'train'))
    validation_summary_writer = SummaryWriter(os.path.join(trial_directory, 'validation'))
    print('Starting training...')
    step_time_start = datetime.datetime.now()
    while epoch < settings.number_of_epochs:
        for examples in train_dataset_loader:
            # Real image discriminator processing.
            discriminator_optimizer.zero_grad()
            images, labels, _ = examples
            images, labels = Variable(gpu(images)), Variable(gpu(labels))
            predicted_labels, predicted_counts = D(images)
            real_feature_layer = D.feature_layer
            density_loss = torch.abs(predicted_labels - labels).pow(settings.loss_order).sum(1).sum(1).mean()
            count_loss = torch.abs(predicted_counts - labels.sum(1).sum(1)).pow(settings.loss_order).mean()
            loss = count_loss + (density_loss * 10)
            loss.backward()
            running_scalars['Labeled/Loss'] += loss.data[0]
            running_scalars['Labeled/Count Loss'] += count_loss.data[0]
            running_scalars['Labeled/Density Loss'] += density_loss.data[0]
            running_scalars['Labeled/Count ME'] += (predicted_counts - labels.sum(1).sum(1)).mean().data[0]
            # Predictor.
            predictor_optimizer.zero_grad()
            predictor_predicted_counts = P(predicted_counts.detach())
            predictor_count_loss = torch.abs(predictor_predicted_counts - labels.sum(1).sum(1)
                                             ).pow(settings.loss_order).mean()
            predictor_count_loss.backward()
            predictor_optimizer.step()
            running_scalars['Predictor/Count Loss'] += predictor_count_loss.data[0]
            running_scalars['Predictor/Count MAE'] += torch.abs(predictor_predicted_counts - labels.sum(1).sum(1)
                                                                ).mean().data[0]
            running_scalars['Predictor/Count ME'] += (predictor_predicted_counts - labels.sum(1).sum(1)).mean().data[0]
            running_scalars['Predictor/Exponent'] += P.exponent.data[0]
            # Discriminator update.
            discriminator_optimizer.step()

            running_example_count += images.size()[0]
            if step % settings.summary_step_period == 0 and step != 0:
                comparison_image = viewer.create_crowd_images_comparison_grid(cpu(images), cpu(labels),
                                                                              cpu(predicted_labels))
                summary_writer.add_image('Comparison', comparison_image, global_step=step)
                print('\rStep {}, {}...'.format(step, datetime.datetime.now() - step_time_start), end='')
                step_time_start = datetime.datetime.now()
                for name, running_scalar in running_scalars.items():
                    mean_scalar = running_scalar / running_example_count
                    summary_writer.add_scalar(name, mean_scalar, global_step=step)
                    running_scalars[name] = 0
                running_example_count = 0
                for validation_examples in validation_dataset_loader:
                    images, labels, _ = validation_examples
                    images, labels = Variable(gpu(images)), Variable(gpu(labels))
                    predicted_labels, predicted_counts = D(images)
                    density_loss = torch.abs(predicted_labels - labels).pow(settings.loss_order).sum(1).sum(1).mean()
                    count_loss = torch.abs(predicted_counts - labels.sum(1).sum(1)).pow(settings.loss_order).mean()
                    count_mae = torch.abs(predicted_counts - labels.sum(1).sum(1)).mean()
                    count_me = (predicted_counts - labels.sum(1).sum(1)).mean()
                    validation_running_scalars['Labeled/Density Loss'] += density_loss.data[0]
                    validation_running_scalars['Labeled/Count Loss'] += count_loss.data[0]
                    validation_running_scalars['Labeled/Count MAE'] += count_mae.data[0]
                    validation_running_scalars['Labeled/Count ME'] += count_me.data[0]
                    predictor_predicted_counts = P(predicted_counts.detach())
                    validation_running_scalars['Predictor/Count MAE'] += torch.abs(predictor_predicted_counts -
                                                                                   labels.sum(1).sum(1)).mean().data[0]
                    validation_running_scalars['Predictor/Count ME'] += (predictor_predicted_counts -
                                                                         labels.sum(1).sum(1)).mean().data[0]
                comparison_image = viewer.create_crowd_images_comparison_grid(cpu(images), cpu(labels),
                                                                              cpu(predicted_labels))
                validation_summary_writer.add_image('Comparison', comparison_image, global_step=step)
                for name, running_scalar in validation_running_scalars.items():
                    mean_scalar = running_scalar / len(validation_dataset)
                    validation_summary_writer.add_scalar(name, mean_scalar, global_step=step)
                    validation_running_scalars[name] = 0
            step += 1
        epoch += 1
        if epoch != 0 and epoch % settings.save_epoch_period == 0:
            save_trainer(trial_directory, D, discriminator_optimizer, epoch, step, prefix='discriminator')
            save_trainer(trial_directory, G, generator_optimizer, epoch, step, prefix='generator')
    save_trainer(trial_directory, D, discriminator_optimizer, epoch, step, prefix='discriminator')
    save_trainer(trial_directory, G, generator_optimizer, epoch, step, prefix='generator')
    print('Finished Training')
    return trial_directory
Esempio n. 5
0
def train(settings=None):
    """Main script for training the semi-supervised GAN."""
    if not settings:
        settings = settings_
    train_transform = torchvision.transforms.Compose([
        transforms.RandomlySelectPatchAndRescale(),
        transforms.RandomHorizontalFlip(),
        transforms.NegativeOneToOneNormalizeImage(),
        transforms.NumpyArraysToTorchTensors()
    ])
    validation_transform = torchvision.transforms.Compose([
        transforms.RandomlySelectPatchAndRescale(),
        transforms.NegativeOneToOneNormalizeImage(),
        transforms.NumpyArraysToTorchTensors()
    ])

    train_dataset = CrowdDatasetWithUnlabeled(settings.train_dataset_path,
                                              'train',
                                              transform=train_transform)
    train_dataset_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=settings.batch_size,
        shuffle=True,
        num_workers=settings.number_of_data_loader_workers)
    validation_dataset = CrowdDataset(settings.validation_dataset_path,
                                      'validation',
                                      transform=validation_transform)
    validation_dataset_loader = torch.utils.data.DataLoader(
        validation_dataset,
        batch_size=settings.batch_size,
        shuffle=False,
        num_workers=settings.number_of_data_loader_workers)

    gan = GAN()
    gpu(gan)
    D = gan.D
    G = gan.G
    P = gan.P
    gpu(P)
    discriminator_optimizer = Adam(D.parameters())
    generator_optimizer = Adam(G.parameters())
    predictor_optimizer = Adam(P.parameters())

    step = 0
    epoch = 0

    if settings.load_model_path:
        d_model_state_dict, d_optimizer_state_dict, epoch, step = load_trainer(
            prefix='discriminator')
        D.load_state_dict(d_model_state_dict)
        discriminator_optimizer.load_state_dict(d_optimizer_state_dict)
    discriminator_optimizer.param_groups[0].update({
        'lr':
        1e-3,
        'weight_decay':
        settings.weight_decay
    })
    if settings.load_model_path:
        g_model_state_dict, g_optimizer_state_dict, _, _ = load_trainer(
            prefix='generator')
        G.load_state_dict(g_model_state_dict)
        generator_optimizer.load_state_dict(g_optimizer_state_dict)
    generator_optimizer.param_groups[0].update({'lr': 1e-3})

    running_scalars = defaultdict(float)
    validation_running_scalars = defaultdict(float)
    running_example_count = 0
    datetime_string = datetime.datetime.now().strftime("y%Ym%md%dh%Hm%Ms%S")
    trial_directory = os.path.join(settings.log_directory,
                                   settings.trial_name + ' ' + datetime_string)
    os.makedirs(trial_directory, exist_ok=True)
    summary_writer = SummaryWriter(os.path.join(trial_directory, 'train'))
    validation_summary_writer = SummaryWriter(
        os.path.join(trial_directory, 'validation'))
    print('Starting training...')
    while epoch < settings.number_of_epochs:
        for examples, unlabeled_examples in train_dataset_loader:
            # Real image discriminator processing.
            discriminator_optimizer.zero_grad()
            images, labels, _ = examples
            images, labels = Variable(gpu(images)), Variable(gpu(labels))
            predicted_labels, predicted_counts = D(images)
            real_feature_layer = D.feature_layer
            density_loss = torch.abs(predicted_labels - labels).pow(
                settings.loss_order).sum(1).sum(1).mean()
            count_loss = torch.abs(predicted_counts -
                                   labels.sum(1).sum(1)).pow(
                                       settings.loss_order).mean()
            loss = count_loss + (density_loss * 10)
            loss.backward()
            running_scalars['Labeled/Loss'] += loss.data[0]
            running_scalars['Labeled/Count Loss'] += count_loss.data[0]
            running_scalars['Labeled/Density Loss'] += density_loss.data[0]
            running_scalars['Labeled/Count ME'] += (
                predicted_counts - labels.sum(1).sum(1)).mean().data[0]
            # Predictor.
            predictor_optimizer.zero_grad()
            predictor_predicted_counts = P(predicted_counts.detach())
            predictor_count_loss = torch.abs(predictor_predicted_counts -
                                             labels.sum(1).sum(1)).pow(
                                                 settings.loss_order).mean()
            predictor_count_loss.backward()
            predictor_optimizer.step()
            running_scalars[
                'Predictor/Count Loss'] += predictor_count_loss.data[0]
            running_scalars['Predictor/Count MAE'] += torch.abs(
                predictor_predicted_counts -
                labels.sum(1).sum(1)).mean().data[0]
            running_scalars['Predictor/Count ME'] += (
                predictor_predicted_counts -
                labels.sum(1).sum(1)).mean().data[0]
            running_scalars['Predictor/Exponent'] += P.exponent.data[0]
            # Unlabeled image discriminator processing.
            unlabeled_images, _, _ = unlabeled_examples
            unlabeled_images = Variable(gpu(unlabeled_images))
            unlabeled_predicted_labels, unlabeled_predicted_counts = D(
                unlabeled_images)
            label_count_mean = labels.sum(1).sum(1).mean()
            count_mean = labels.sum(1).sum(1).mean()
            unlabeled_predicted_count_mean = unlabeled_predicted_counts.mean()
            unlabeled_predicted_label_count_mean = unlabeled_predicted_labels.sum(
                1).sum(1).mean()
            beta = 2.0
            # noinspection PyArgumentList
            zero = Variable(gpu(torch.FloatTensor([0])))
            unlabeled_count_loss_min = torch.max(
                zero, count_mean / beta - unlabeled_predicted_count_mean)
            unlabeled_count_loss_max = torch.max(
                zero, unlabeled_predicted_count_mean - count_mean * beta)
            unlabeled_label_loss_min = torch.max(
                zero,
                label_count_mean / beta - unlabeled_predicted_label_count_mean)
            unlabeled_label_loss_max = torch.max(
                zero,
                unlabeled_predicted_label_count_mean - label_count_mean * beta)
            unlabeled_density_loss = unlabeled_label_loss_max + unlabeled_label_loss_min
            unlabeled_count_loss = unlabeled_count_loss_max + unlabeled_count_loss_min
            unlabeled_loss = unlabeled_count_loss + (unlabeled_density_loss *
                                                     10)
            running_scalars['Unlabeled/Count ME'] += (
                unlabeled_predicted_count_mean - count_mean).data[0]
            running_scalars[
                'Unlabeled/Count'] += unlabeled_predicted_count_mean.data[0]
            running_scalars['Unlabeled/Loss'] += unlabeled_loss.data[0]
            unlabeled_loss.backward()
            # Fake image discriminator processing.
            current_batch_size = images.data.shape[0]
            z = torch.randn(current_batch_size, 100)
            fake_images = G(Variable(gpu(z)))
            fake_predicted_labels, fake_predicted_counts = D(fake_images)
            fake_density_loss = torch.abs(fake_predicted_labels).pow(
                settings.loss_order).sum(1).sum(1).mean()
            fake_count_loss = torch.abs(fake_predicted_counts).pow(
                settings.loss_order).mean()
            fake_mean_count = fake_predicted_counts.mean()
            fake_discriminator_loss = fake_count_loss + (fake_density_loss *
                                                         10)
            running_scalars['Fake/Count'] += fake_mean_count.data[0]
            running_scalars['Fake/Loss'] += fake_discriminator_loss.data[0]
            fake_discriminator_loss.backward(retain_graph=True)
            # Gradient penalty.
            alpha = Variable(gpu(torch.rand(3, current_batch_size, 1, 1, 1)))
            alpha = alpha / alpha.sum(0)
            interpolates = alpha[0] * images + alpha[
                1] * unlabeled_images + alpha[2] * fake_images
            interpolates_labels, interpolates_counts = D(interpolates)
            density_gradients = torch.autograd.grad(
                outputs=interpolates_labels,
                inputs=interpolates,
                grad_outputs=gpu(torch.ones(interpolates_labels.size())),
                create_graph=True,
                retain_graph=True,
                only_inputs=True)[0]
            density_gradients = density_gradients.view(current_batch_size, -1)
            density_gradient_penalty = (
                (density_gradients.norm(2, dim=1) - 1)**2).mean() * 10
            count_gradients = torch.autograd.grad(
                outputs=interpolates_counts,
                inputs=interpolates,
                grad_outputs=gpu(torch.ones(interpolates_counts.size())),
                create_graph=True,
                retain_graph=True,
                only_inputs=True)[0]
            count_gradients = count_gradients.view(current_batch_size, -1)
            count_gradients_penalty = (
                (count_gradients.norm(2, dim=1) - 1)**2).mean() * 10
            gradient_penalty = count_gradients_penalty + density_gradient_penalty * 10
            gradient_penalty.backward()
            # Discriminator update.
            discriminator_optimizer.step()
            # Generator image processing.
            generator_optimizer.zero_grad()
            z = torch.randn(current_batch_size, 100)
            fake_images = G(Variable(gpu(z)))
            _, _ = D(fake_images)  # Produces feature layer for next line.
            fake_feature_layer = D.feature_layer
            detached_predicted_counts = predicted_counts.detach()
            detached_predicted_labels = predicted_labels.detach()
            detached_real_feature_layer = real_feature_layer.detach()
            # noinspection PyArgumentList
            epsilon = Variable(gpu(torch.FloatTensor([1e-10])))
            count_weights = (detached_predicted_counts / torch.max(
                detached_predicted_counts.sum(), epsilon)).view(-1, 1, 1, 1)
            labels_weights = (
                detached_predicted_labels.sum(1).sum(1) /
                torch.max(detached_predicted_labels.sum(), epsilon)).view(
                    -1, 1, 1, 1)
            feature_weights = (count_weights + (labels_weights * 10)) / 11
            weighted_real_feature_layer = feature_weights * detached_real_feature_layer
            generator_loss = (weighted_real_feature_layer.mean(0) -
                              fake_feature_layer.mean(0)).abs().sum()
            running_scalars['Generator/Loss'] += generator_loss.data[0]
            # Generator update.
            if step % 5 == 0:
                generator_loss.backward()
                generator_optimizer.step()

            running_example_count += images.size()[0]
            if step % settings.summary_step_period == 0 and step != 0:
                comparison_image = viewer.create_crowd_images_comparison_grid(
                    cpu(images), cpu(labels), cpu(predicted_labels))
                summary_writer.add_image('Comparison',
                                         comparison_image,
                                         global_step=step)
                fake_images_image = torchvision.utils.make_grid(
                    fake_images.data[:9], nrow=3)
                summary_writer.add_image('Fake',
                                         fake_images_image,
                                         global_step=step)
                mean_loss = running_scalars[
                    'Labeled/Loss'] / running_example_count
                print('[Epoch: {}, Step: {}] Loss: {:g}'.format(
                    epoch, step, mean_loss))
                for name, running_scalar in running_scalars.items():
                    mean_scalar = running_scalar / running_example_count
                    summary_writer.add_scalar(name,
                                              mean_scalar,
                                              global_step=step)
                    running_scalars[name] = 0
                running_example_count = 0
                for validation_examples in validation_dataset_loader:
                    images, labels, _ = validation_examples
                    images, labels = Variable(gpu(images)), Variable(
                        gpu(labels))
                    predicted_labels, predicted_counts = D(images)
                    density_loss = torch.abs(predicted_labels - labels).pow(
                        settings.loss_order).sum(1).sum(1).mean()
                    count_loss = torch.abs(predicted_counts -
                                           labels.sum(1).sum(1)).pow(
                                               settings.loss_order).mean()
                    count_mae = torch.abs(predicted_counts -
                                          labels.sum(1).sum(1)).mean()
                    count_me = (predicted_counts - labels.sum(1).sum(1)).mean()
                    validation_running_scalars[
                        'Labeled/Density Loss'] += density_loss.data[0]
                    validation_running_scalars[
                        'Labeled/Count Loss'] += count_loss.data[0]
                    validation_running_scalars[
                        'Labeled/Count MAE'] += count_mae.data[0]
                    validation_running_scalars[
                        'Labeled/Count ME'] += count_me.data[0]
                    predictor_predicted_counts = P(predicted_counts.detach())
                    validation_running_scalars[
                        'Predictor/Count MAE'] += torch.abs(
                            predictor_predicted_counts -
                            labels.sum(1).sum(1)).mean().data[0]
                    validation_running_scalars['Predictor/Count ME'] += (
                        predictor_predicted_counts -
                        labels.sum(1).sum(1)).mean().data[0]
                comparison_image = viewer.create_crowd_images_comparison_grid(
                    cpu(images), cpu(labels), cpu(predicted_labels))
                validation_summary_writer.add_image('Comparison',
                                                    comparison_image,
                                                    global_step=step)
                for name, running_scalar in validation_running_scalars.items():
                    mean_scalar = running_scalar / len(validation_dataset)
                    validation_summary_writer.add_scalar(name,
                                                         mean_scalar,
                                                         global_step=step)
                    validation_running_scalars[name] = 0
            step += 1
        epoch += 1
        if epoch != 0 and epoch % settings.save_epoch_period == 0:
            save_trainer(trial_directory,
                         D,
                         discriminator_optimizer,
                         epoch,
                         step,
                         prefix='discriminator')
            save_trainer(trial_directory,
                         G,
                         generator_optimizer,
                         epoch,
                         step,
                         prefix='generator')
    save_trainer(trial_directory,
                 D,
                 discriminator_optimizer,
                 epoch,
                 step,
                 prefix='discriminator')
    save_trainer(trial_directory,
                 G,
                 generator_optimizer,
                 epoch,
                 step,
                 prefix='generator')
    print('Finished Training')
    return trial_directory
Esempio n. 6
0
def train(settings=None):
    """Main script for training the semi-supervised GAN."""
    if not settings:
        settings = Settings()
    train_transform = torchvision.transforms.Compose([transforms.RandomlySelectPatchAndRescale(),
                                                      transforms.RandomHorizontalFlip(),
                                                      transforms.NegativeOneToOneNormalizeImage(),
                                                      transforms.NumpyArraysToTorchTensors()])
    validation_transform = torchvision.transforms.Compose([transforms.RandomlySelectPatchAndRescale(),
                                                           transforms.NegativeOneToOneNormalizeImage(),
                                                           transforms.NumpyArraysToTorchTensors()])

    train_dataset = CrowdDatasetWithUnlabeled(settings.train_dataset_path, 'train', transform=train_transform)
    train_dataset_loader = torch.utils.data.DataLoader(train_dataset, batch_size=settings.batch_size, shuffle=True,
                                                       num_workers=settings.number_of_data_loader_workers)
    validation_dataset = CrowdDataset(settings.validation_dataset_path, 'validation', transform=validation_transform)
    validation_dataset_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=settings.batch_size,
                                                            shuffle=False,
                                                            num_workers=settings.number_of_data_loader_workers)

    gan = GAN()
    gpu(gan)
    D = gan.D
    G = gan.G
    discriminator_optimizer = Adam(D.parameters())
    generator_optimizer = Adam(G.parameters())

    step = 0
    epoch = 0

    if settings.load_model_path:
        d_model_state_dict, d_optimizer_state_dict, epoch, step = load_trainer(prefix='discriminator',
                                                                               settings=settings)
        D.load_state_dict(d_model_state_dict)
        discriminator_optimizer.load_state_dict(d_optimizer_state_dict)
    discriminator_optimizer.param_groups[0].update({'lr': settings.learning_rate, 'weight_decay': settings.weight_decay})
    if settings.load_model_path:
        g_model_state_dict, g_optimizer_state_dict, _, _ = load_trainer(prefix='generator',
                                                                        settings=settings)
        G.load_state_dict(g_model_state_dict)
        generator_optimizer.load_state_dict(g_optimizer_state_dict)
    generator_optimizer.param_groups[0].update({'lr': settings.learning_rate})

    running_scalars = defaultdict(float)
    validation_running_scalars = defaultdict(float)
    running_example_count = 0
    datetime_string = datetime.datetime.now().strftime("y%Ym%md%dh%Hm%Ms%S")
    trial_directory = os.path.join(settings.log_directory, settings.trial_name + ' ' + datetime_string)
    os.makedirs(trial_directory, exist_ok=True)
    summary_writer = SummaryWriter(os.path.join(trial_directory, 'train'))
    validation_summary_writer = SummaryWriter(os.path.join(trial_directory, 'validation'))
    print('Starting training...')
    step_time_start = datetime.datetime.now()
    while epoch < settings.number_of_epochs:
        for examples, unlabeled_examples in train_dataset_loader:
            unlabeled_images = unlabeled_examples[0]
            # Real image discriminator processing.
            discriminator_optimizer.zero_grad()
            images, labels, _ = examples
            images, labels = Variable(gpu(images)), Variable(gpu(labels))
            current_batch_size = images.data.shape[0]
            predicted_labels, predicted_counts = D(images)
            real_feature_layer = D.feature_layer
            density_loss = torch.abs(predicted_labels - labels).pow(settings.loss_order).sum(1).sum(1).mean()
            count_loss = torch.abs(predicted_counts - labels.sum(1).sum(1)).pow(settings.loss_order).mean()
            loss = count_loss + (density_loss * 10)
            loss.backward()
            running_scalars['Labeled/Loss'] += loss.data[0]
            running_scalars['Labeled/Count Loss'] += count_loss.data[0]
            running_scalars['Labeled/Density Loss'] += density_loss.data[0]
            running_scalars['Labeled/Count ME'] += (predicted_counts - labels.sum(1).sum(1)).mean().data[0]
            # Unlabeled.
            _ = D(gpu(images))
            labeled_feature_layer = D.feature_layer
            _ = D(gpu(Variable(unlabeled_images)))
            unlabeled_feature_layer = D.feature_layer
            unlabeled_loss = feature_distance_loss(unlabeled_feature_layer, labeled_feature_layer,
                                                   scale=False) * settings.unlabeled_loss_multiplier
            unlabeled_loss.backward()
            # Fake.
            _ = D(gpu(Variable(unlabeled_images)))
            unlabeled_feature_layer = D.feature_layer
            z = torch.from_numpy(MixtureModel([norm(-settings.mean_offset, 1), norm(settings.mean_offset, 1)]).rvs(
                size=[current_batch_size, 100]).astype(np.float32))
            # z = torch.randn(settings.batch_size, noise_size)
            fake_examples = G(gpu(Variable(z)))
            _ = D(fake_examples.detach())
            fake_feature_layer = D.feature_layer
            fake_loss = feature_distance_loss(unlabeled_feature_layer, fake_feature_layer,
                                              order=1).neg() * settings.fake_loss_multiplier
            fake_loss.backward()
            # Feature norm loss.
            _ = D(gpu(Variable(unlabeled_images)))
            unlabeled_feature_layer = D.feature_layer
            feature_norm_loss = (unlabeled_feature_layer.norm(dim=1).mean() - 1).pow(2)
            feature_norm_loss.backward()
            # Gradient penalty.
            if settings.gradient_penalty_on:
                alpha = gpu(Variable(torch.rand(2)))
                alpha = alpha / alpha.sum(0)
                interpolates = (alpha[0] * gpu(Variable(unlabeled_images, requires_grad=True)) +
                                alpha[1] * gpu(Variable(fake_examples.detach().data, requires_grad=True)))
                _ = D(interpolates)
                interpolates_predictions = D.feature_layer
                gradients = torch.autograd.grad(outputs=interpolates_predictions, inputs=interpolates,
                                                grad_outputs=gpu(torch.ones(interpolates_predictions.size())),
                                                create_graph=True, only_inputs=True)[0]
                gradient_penalty = ((gradients.norm(dim=1) - 1) ** 2).mean() * settings.gradient_penalty_multiplier
                gradient_penalty.backward()
            # Discriminator update.
            discriminator_optimizer.step()
            # Generator.
            if step % 1 == 0:
                generator_optimizer.zero_grad()
                _ = D(gpu(Variable(unlabeled_images)))
                unlabeled_feature_layer = D.feature_layer.detach()
                z = torch.randn(current_batch_size, 100)
                fake_examples = G(gpu(Variable(z)))
                _ = D(fake_examples)
                fake_feature_layer = D.feature_layer
                generator_loss = feature_distance_loss(unlabeled_feature_layer, fake_feature_layer)
                generator_loss.backward()
                generator_optimizer.step()

            running_example_count += images.size()[0]
            if step % settings.summary_step_period == 0 and step != 0:
                comparison_image = viewer.create_crowd_images_comparison_grid(cpu(images), cpu(labels),
                                                                              cpu(predicted_labels))
                summary_writer.add_image('Comparison', comparison_image, global_step=step)
                fake_images_image = torchvision.utils.make_grid(fake_examples.data[:9], nrow=3)
                summary_writer.add_image('Fake', fake_images_image, global_step=step)
                print('\rStep {}, {}...'.format(step, datetime.datetime.now() - step_time_start), end='')
                step_time_start = datetime.datetime.now()
                for name, running_scalar in running_scalars.items():
                    mean_scalar = running_scalar / running_example_count
                    summary_writer.add_scalar(name, mean_scalar, global_step=step)
                    running_scalars[name] = 0
                running_example_count = 0
                for validation_examples in validation_dataset_loader:
                    images, labels, _ = validation_examples
                    images, labels = Variable(gpu(images)), Variable(gpu(labels))
                    predicted_labels, predicted_counts = D(images)
                    density_loss = torch.abs(predicted_labels - labels).pow(settings.loss_order).sum(1).sum(1).mean()
                    count_loss = torch.abs(predicted_counts - labels.sum(1).sum(1)).pow(settings.loss_order).mean()
                    count_mae = torch.abs(predicted_counts - labels.sum(1).sum(1)).mean()
                    count_me = (predicted_counts - labels.sum(1).sum(1)).mean()
                    validation_running_scalars['Labeled/Density Loss'] += density_loss.data[0]
                    validation_running_scalars['Labeled/Count Loss'] += count_loss.data[0]
                    validation_running_scalars['Test/Count MAE'] += count_mae.data[0]
                    validation_running_scalars['Labeled/Count ME'] += count_me.data[0]
                comparison_image = viewer.create_crowd_images_comparison_grid(cpu(images), cpu(labels),
                                                                              cpu(predicted_labels))
                validation_summary_writer.add_image('Comparison', comparison_image, global_step=step)
                for name, running_scalar in validation_running_scalars.items():
                    mean_scalar = running_scalar / len(validation_dataset)
                    validation_summary_writer.add_scalar(name, mean_scalar, global_step=step)
                    validation_running_scalars[name] = 0
            step += 1
        epoch += 1
        if epoch != 0 and epoch % settings.save_epoch_period == 0:
            save_trainer(trial_directory, D, discriminator_optimizer, epoch, step, prefix='discriminator')
            save_trainer(trial_directory, G, generator_optimizer, epoch, step, prefix='generator')
    save_trainer(trial_directory, D, discriminator_optimizer, epoch, step, prefix='discriminator')
    save_trainer(trial_directory, G, generator_optimizer, epoch, step, prefix='generator')
    print('Finished Training')
    return trial_directory
Esempio n. 7
0
    # -- Setting seeds for reproducability
    np.random.seed(11)
    random.seed(11)
    torch.manual_seed(11)
    torch.backends.cudnn.enabled = False
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.cuda.manual_seed(11)
    torch.cuda.manual_seed_all(11)

    # -- Dataset paths
    if args.dataset == "VisDrone":
        validation_set = 30
        path = "./VisDrone2020-CC"
        output_downscale = 4
        dataset = CrowdDataset(path, name=args.dataset, valid_set_size=validation_set,
                           gt_downscale_factor=output_downscale, stage_1=True)
    elif args.dataset == "ucfqnrf":
        validation_set = 240
        output_downscale = 4
        path = "../../dataset/UCF-QNRF_ECCV18"
        dataset = CrowdDataset(path, name=args.dataset, valid_set_size=validation_set,
                           gt_downscale_factor=output_downscale, stage_1=True, image_size_max=768)

    model_save_dir = './models'
    batch_size = args.batch_size

    print(dataset.data_files['test_valid'],
          len(dataset.data_files['test_valid']))
    print(dataset.data_files['train'], len(dataset.data_files['train']))

    # -- Train the model
Esempio n. 8
0
def test(settings=None):
    """Main script for testing a model."""
    if not settings:
        settings = settings_

    # test_dataset = CrowdDataset(settings.test_dataset_path, 'test')

    net = JointCNN()
    model_state_dict, _, _, _ = load_trainer(prefix='discriminator')
    net.load_state_dict(model_state_dict)
    gpu(net)
    net.eval()

    count_errors = []
    density_errors = []
    # print('Starting test...')
    # scene_number = 1
    # running_count = 0
    # running_count_error = 0
    # running_density_error = 0
    # for full_example_index, full_example in enumerate(test_dataset):
    #     print('Processing example {}'.format(full_example_index), end='\r')
    #     sum_density_label = np.zeros_like(full_example.label, dtype=np.float32)
    #     sum_count_label = np.zeros_like(full_example.label, dtype=np.float32)
    #     hit_predicted_label = np.zeros_like(full_example.label, dtype=np.int32)
    #     for batch in batches_of_examples_with_meta(full_example):
    #         images = torch.stack([example_with_meta.example.image for example_with_meta in batch])
    #         rois = torch.stack([example_with_meta.example.roi for example_with_meta in batch])
    #         images = Variable(gpu(images))
    #         predicted_labels, predicted_counts = net(images)
    #         predicted_labels = predicted_labels * Variable(gpu(rois))
    #         predicted_labels = cpu(predicted_labels.data).numpy()
    #         predicted_counts = cpu(predicted_counts.data).numpy()
    #         for example_index, example_with_meta in enumerate(batch):
    #             predicted_label = predicted_labels[example_index]
    #             predicted_count = predicted_counts[example_index]
    #             x, y = example_with_meta.x, example_with_meta.y
    #             half_patch_size = example_with_meta.half_patch_size
    #             predicted_label_sum = np.sum(predicted_label)
    #             original_patch_dimensions = ((2 * half_patch_size) + 1, (2 * half_patch_size) + 1)
    #             predicted_label = scipy.misc.imresize(predicted_label, original_patch_dimensions, mode='F')
    #             unnormalized_predicted_label_sum = np.sum(predicted_label)
    #             if unnormalized_predicted_label_sum != 0:
    #                 density_label = predicted_label * predicted_label_sum / unnormalized_predicted_label_sum
    #                 count_label = predicted_label * predicted_count / unnormalized_predicted_label_sum
    #             else:
    #                 density_label = predicted_label
    #                 count_label = np.full(predicted_label.shape, predicted_count / predicted_label.size)
    #             y_start_offset = 0
    #             if y - half_patch_size < 0:
    #                 y_start_offset = half_patch_size - y
    #             y_end_offset = 0
    #             if y + half_patch_size >= full_example.label.shape[0]:
    #                 y_end_offset = y + half_patch_size + 1 - full_example.label.shape[0]
    #             x_start_offset = 0
    #             if x - half_patch_size < 0:
    #                 x_start_offset = half_patch_size - x
    #             x_end_offset = 0
    #             if x + half_patch_size >= full_example.label.shape[1]:
    #                 x_end_offset = x + half_patch_size + 1 - full_example.label.shape[1]
    #             sum_density_label[y - half_patch_size + y_start_offset:y + half_patch_size + 1 - y_end_offset,
    #                               x - half_patch_size + x_start_offset:x + half_patch_size + 1 - x_end_offset
    #                               ] += density_label[y_start_offset:density_label.shape[0] - y_end_offset,
    #                                                  x_start_offset:density_label.shape[1] - x_end_offset]
    #             sum_count_label[y - half_patch_size + y_start_offset:y + half_patch_size + 1 - y_end_offset,
    #                             x - half_patch_size + x_start_offset:x + half_patch_size + 1 - x_end_offset
    #                             ] += count_label[y_start_offset:count_label.shape[0] - y_end_offset,
    #                                              x_start_offset:count_label.shape[1] - x_end_offset]
    #             hit_predicted_label[y - half_patch_size + y_start_offset:y + half_patch_size + 1 - y_end_offset,
    #                                 x - half_patch_size + x_start_offset:x + half_patch_size + 1 - x_end_offset
    #                                 ] += 1
    #     sum_density_label *= full_example.roi
    #     sum_count_label *= full_example.roi
    #     full_predicted_label = sum_density_label / hit_predicted_label.astype(np.float32)
    #     full_predicted_count = np.sum(sum_count_label / hit_predicted_label.astype(np.float32))
    #     label_in_roi = full_example.label * full_example.roi
    #     density_loss = np.abs(full_predicted_label - label_in_roi).sum()
    #     count_loss = np.abs(full_predicted_count - label_in_roi.sum())
    #     running_count += full_example.label.sum()
    #     running_count_error += count_loss
    #     running_density_error += density_loss
    #     if ((full_example_index + 1) % 120) == 0:
    #         print('Scene {}'.format(scene_number))
    #         print('Total count: {}'.format(running_count))
    #         count_error = running_count_error / 120
    #         print('Mean count error: {}'.format(count_error))
    #         density_error = running_density_error / 120
    #         print('Mean density error: {}'.format(density_error))
    #         count_errors.append(count_error)
    #         density_errors.append(density_error)
    #         running_count = 0
    #         running_count_error = 0
    #         running_density_error = 0
    #         scene_number += 1

    validation_dataset = CrowdDataset(settings.validation_dataset_path,
                                      '200608 Time Lapse Demo')

    print('Starting test...')
    running_count = 0
    running_count_error = 0
    running_density_error = 0
    initial_label = validation_dataset[0].label
    full_predicted_labels = np.zeros(shape=(len(validation_dataset),
                                            initial_label.shape[0],
                                            initial_label.shape[1]),
                                     dtype=np.float32)
    for full_example_index, full_example in enumerate(validation_dataset):
        print('Processing example {}'.format(full_example_index), end='\r')
        sum_density_label = np.zeros_like(full_example.label, dtype=np.float32)
        sum_count_label = np.zeros_like(full_example.label, dtype=np.float32)
        hit_predicted_label = np.zeros_like(full_example.label, dtype=np.int32)
        for batch in batches_of_examples_with_meta(full_example):
            images = torch.stack([
                example_with_meta.example.image for example_with_meta in batch
            ])
            rois = torch.stack(
                [example_with_meta.example.roi for example_with_meta in batch])
            images = Variable(gpu(images))
            predicted_labels, predicted_counts = net(images)
            predicted_labels = predicted_labels * Variable(gpu(rois))
            predicted_labels = cpu(predicted_labels.data).numpy()
            predicted_counts = cpu(predicted_counts.data).numpy()
            for example_index, example_with_meta in enumerate(batch):
                predicted_label = predicted_labels[example_index]
                predicted_count = predicted_counts[example_index]
                x, y = example_with_meta.x, example_with_meta.y
                half_patch_size = example_with_meta.half_patch_size
                predicted_label_sum = np.sum(predicted_label)
                original_patch_dimensions = ((2 * half_patch_size) + 1,
                                             (2 * half_patch_size) + 1)
                predicted_label = scipy.misc.imresize(
                    predicted_label, original_patch_dimensions, mode='F')
                unnormalized_predicted_label_sum = np.sum(predicted_label)
                if unnormalized_predicted_label_sum != 0:
                    density_label = predicted_label * predicted_label_sum / unnormalized_predicted_label_sum
                    count_label = predicted_label * predicted_count / unnormalized_predicted_label_sum
                else:
                    density_label = predicted_label
                    count_label = np.full(
                        predicted_label.shape,
                        predicted_count / predicted_label.size)
                y_start_offset = 0
                if y - half_patch_size < 0:
                    y_start_offset = half_patch_size - y
                y_end_offset = 0
                if y + half_patch_size >= full_example.label.shape[0]:
                    y_end_offset = y + half_patch_size + 1 - full_example.label.shape[
                        0]
                x_start_offset = 0
                if x - half_patch_size < 0:
                    x_start_offset = half_patch_size - x
                x_end_offset = 0
                if x + half_patch_size >= full_example.label.shape[1]:
                    x_end_offset = x + half_patch_size + 1 - full_example.label.shape[
                        1]
                sum_density_label[y - half_patch_size + y_start_offset:y +
                                  half_patch_size + 1 - y_end_offset,
                                  x - half_patch_size + x_start_offset:x +
                                  half_patch_size + 1 -
                                  x_end_offset] += density_label[
                                      y_start_offset:density_label.shape[0] -
                                      y_end_offset,
                                      x_start_offset:density_label.shape[1] -
                                      x_end_offset]
                sum_count_label[y - half_patch_size + y_start_offset:y +
                                half_patch_size + 1 - y_end_offset,
                                x - half_patch_size + x_start_offset:x +
                                half_patch_size + 1 -
                                x_end_offset] += count_label[
                                    y_start_offset:count_label.shape[0] -
                                    y_end_offset,
                                    x_start_offset:count_label.shape[1] -
                                    x_end_offset]
                hit_predicted_label[y - half_patch_size + y_start_offset:y +
                                    half_patch_size + 1 - y_end_offset,
                                    x - half_patch_size + x_start_offset:x +
                                    half_patch_size + 1 - x_end_offset] += 1
        hit_predicted_label[hit_predicted_label == 0] = 1
        sum_density_label *= full_example.roi
        sum_count_label *= full_example.roi
        full_predicted_label = sum_density_label / hit_predicted_label.astype(
            np.float32)
        full_predicted_count = np.sum(sum_count_label /
                                      hit_predicted_label.astype(np.float32))
        label_in_roi = full_example.label * full_example.roi
        density_loss = np.abs(full_predicted_label - label_in_roi).sum()
        count_loss = np.abs(full_predicted_count - label_in_roi.sum())
        running_count += full_example.label.sum()
        running_count_error += count_loss
        running_density_error += density_loss
        full_predicted_labels[full_example_index] = full_predicted_label
    validation_count_error = running_count_error / len(validation_dataset)

    csv_file_path = os.path.join(settings.log_directory, 'Test Results.csv')
    if not os.path.isfile(csv_file_path):
        with open(csv_file_path, 'w') as csv_file:
            writer = csv.writer(csv_file)
            writer.writerow([
                'Run Name', 'Scene 1', 'Scene 2', 'Scene 3', 'Scene 4',
                'Scene 5', 'Mean', 'Scene 1 Density', 'Scene 2 Density',
                'Scene 3 Density', 'Scene 4 Density', 'Scene 5 Density',
                'Mean Density', 'Mean Validation'
            ])
    with open(csv_file_path, 'a') as csv_file:
        writer = csv.writer(csv_file)
        path_list = os.path.normpath(settings.load_model_path).split(os.sep)
        model_name = os.path.join(*path_list[-2:])
        test_results = [
            model_name, *count_errors,
            np.mean(count_errors), *density_errors,
            np.mean(density_errors), validation_count_error
        ]
        writer.writerow(test_results)

    np.save(
        os.path.join(settings.log_directory,
                     'predicted_labels_time_lapse.npy'), full_predicted_labels)

    print('Finished test.')
    settings.load_model_path = None
    return np.mean(count_errors)