コード例 #1
0
def make_step(model, input_image, control=None, step_size=0.1, end=28, jitter=None):
    if jitter:
        ox, oy = np.random.randint(-jitter, jitter+1, 2)
        input_image = np.roll(np.roll(input_image, ox, -1), oy, -2)
    tensor = torch.Tensor(input_image).unsqueeze(0)
    image_var = Variable(tensor.to(torch_utils.get_device()), requires_grad=True)
    model.zero_grad()
    x = image_var
    for index, layer in enumerate(model.features.children()):
        x = layer(x)
        if index == end:
            break

    delta = objective(x, control)
    x.backward(delta)

    # L2 Regularization on gradients
    mean_square = torch.Tensor([torch.mean(image_var.grad.data ** 2)]).to(torch_utils.get_device())
    image_var.grad.data /= torch.sqrt(mean_square)
    image_var.data.add_(image_var.grad.data * step_size)

    result = image_var.squeeze().data.cpu().numpy()
    if jitter:
        result = np.roll(np.roll(result, -ox, -1), -oy, -2)
    return torch.Tensor(result)
コード例 #2
0
def generate_deep_dream_video(contentImgPath,
                              featureImgPaths,
                              musicPath,
                              outfilePath,
                              duration=None,
                              resolution=(124, 124),
                              frameLenght=1014
                              ):

    featureImgs = [dataloader.image_loader(path, resolution) for path in featureImgPaths]
    contentImg = dataloader.image_loader(contentImgPath, resolution)
    vgg = models.vgg19(pretrained=True).to(torch_utils.get_device()).eval()
    if duration:
        audio_video_utils.cut_song(musicPath, 0, duration)
    specm, gradm = audio_video_utils.generate_spec_vector(musicPath, 1024)
    chroma, chromasort = audio_video_utils.generate_chroma_vector(musicPath, 1024)
    classVec = audio_video_utils.generate_class_vector(chroma, chromasort)
    lr_vector = gradm/300+specm/300
    lr_vector[lr_vector < 2e-4] = 0
    lr_vector = np.convolve(lr_vector, np.ones((10,))/10, mode='same')
    frames = []
    for i in range(len(gradm)):
        print(i)
        frame = deepdream.run_deep_dream(resolution, contentImg, featureImgs[classVec[i]], vgg, 34, 40, lr=lr_vector[i] +0.0001)
        frames.append(frame)
    audio_video_utils.make_movie(frames, musicPath, outfilePath, frameLength=frameLenght)
コード例 #3
0
def get_nst_model_and_losses(model, content_img, style_img, content_layers,
                             style_layers):
    """Creates the Neural Style Transfer model and losses.

    We assume the model was pretrained on ImageNet and normalize all inputs using
    the ImageNet mean and stddev.

    Args:
    model: The model to use for Neural Style Transfer. ContentLoss and StyleLoss
      modules will be inserted after each layer in content_layers and
      style_layers respectively.
    content_img: The content image to use when creating the ContentLosses.
    style_img: The style image to use when creating the StyleLosses.
    content_layers: The name of the layers after which a ContentLoss module will
      be inserted.
    style_layers: The name of the layers after which a StyleLoss module will be
      inserted.
    Returns: A three item tuple of the NST model with ContentLoss and StyleLoss
    modules inserted, the ContentLosses modules, and the StyleLosses modules.
    """
    nst_model = nn.Sequential(ImageNetNormalize())
    content_losses, style_losses, last_layer = [], [], 0
    for i, (name, layer) in enumerate(copy.deepcopy(model).named_children()):
        nst_model.add_module(name, layer)
    if name in content_layers:
        content_loss = ContentLoss(nst_model(content_img))
        nst_model.add_module(f'{name}_ContentLoss', content_loss)
        content_losses.append(content_loss)
        last_layer = i
    if name in style_layers:
        style_loss = StyleLoss(nst_model(style_img))
        nst_model.add_module(f'{name}_StyleLoss', style_loss)
        style_losses.append(style_loss)
        last_layer = i
    # Sanity check that we have the desired number of style and content layers.
    assert len(content_losses) == len(
        content_layers), 'Not all content layers found.'
    assert len(style_losses) == len(
        style_layers), 'Not all style layers found.'
    # Remove the layers after the last StyleLoss and ContentLoss since they will
    # not be used for style transfer. To get the correct last_layer index, we
    # take into account the ImageNetNormalization layer at the front and the
    # ContentLoss and StyleLoss layers.
    last_layer += 1 + len(content_losses) + len(style_losses)
    nst_model = nst_model[:last_layer + 1].to(torch_utils.get_device())
    return nst_model, content_losses, style_losses
コード例 #4
0
def rename_vgg_layers(model):
    """Renames VGG model layers to match those in the paper."""
    block, number = 1, 1
    renamed = nn.Sequential()
    for layer in model.children():
        if isinstance(layer, nn.Conv2d):
            name = f'conv{block}_{number}'
        elif isinstance(layer, nn.ReLU):
            name = f'relu{block}_{number}'
            # The inplace ReLU version doesn't play nicely with NST.
            layer = nn.ReLU(inplace=False)
            number += 1
        elif isinstance(layer, nn.MaxPool2d):
            name = f'pool_{block}'
            # Average pooling was found to generate images of higher quality than
            # max pooling by Gatys et al.
            layer = nn.AvgPool2d(layer.kernel_size, layer.stride)
            block += 1
            number = 1
        else:
            raise RuntimeError(
                f'Unrecognized layer "{layer.__class__.__name__}""')
        renamed.add_module(name, layer)
    return renamed.to(torch_utils.get_device())
コード例 #5
0
ファイル: benchmarker.py プロジェクト: ndeutschmann/zunis
    def run(self, dimensions, integrand, *,
            base_integrand_params, base_integrator_config=None,
            integrand_params_grid=None, integrator_config_grid=None,
            n_batch=100000, debug=True, cuda=0,
            sql_dtypes=None, dbname=None, experiment_name="benchmark", keep_history=False):
        """Run benchmarks over a grid of parameters for the integrator and the integrand."""

        if debug:
            set_benchmark_logger_debug(zunis_integration_level=logging.DEBUG,
                                       zunis_training_level=logging.DEBUG, zunis_level=logging.DEBUG)
        else:
            set_benchmark_logger(experiment_name)

        device = get_device(cuda_ID=cuda)

        if isinstance(dimensions, int):
            dimensions = [dimensions]
        assert isinstance(dimensions, Sequence) and all([isinstance(d, int) for d in dimensions]) and len(
            dimensions) > 0, \
            "argument dimensions must be an integer or a list of integers"

        if sql_dtypes is None:
            sql_dtypes = get_sql_types()

        if integrand_params_grid is None and integrator_config_grid is None and len(dimensions) == 1:
            result, integrator = self.benchmark_method(dimensions[0], integrand=integrand,
                                           integrand_params=base_integrand_params,
                                           integrator_config=base_integrator_config,
                                           n_batch=n_batch, device=device, keep_history=keep_history)
            result = result.as_dataframe()

            if dbname is not None:
                append_dataframe_to_sqlite(result, dbname=dbname, tablename=experiment_name, dtypes=sql_dtypes)
            return result, integrator

        else:
            if integrand_params_grid is None:
                integrand_params_grid = dict()
            if integrator_config_grid is None:
                integrator_config_grid = dict()
            if base_integrator_config is None:
                base_integrator_config = get_default_integrator_config()

            integrator_config = deepcopy(base_integrator_config)
            integrand_params = deepcopy(base_integrand_params)

            benchmarks = self.generate_config_samples(dimensions, integrator_config_grid, integrand_params_grid)

            for d, integrator_config_update, integrand_params_update in benchmarks:
                logger.info("Benchmarking with:")
                logger.info(f"d = {d}")
                logger.info(f"integrator update: {integrator_config_update}")
                logger.info(f"integrand update: {integrand_params_update}")
                integrator_config.update(integrator_config_update)
                integrand_params.update(integrand_params_update)

                try:
                    result, _ = self.benchmark_method(d, integrand=integrand,
                                                   integrand_params=integrand_params,
                                                   integrator_config=integrator_config,
                                                   n_batch=n_batch, device=device,
                                                   keep_history=keep_history)
                    
                except Exception as e:
                    logger.exception(e)
                    result = NestedMapping()
                    result["d"] = d
                    result.update(integrator_config)
                    result.update(integrand_params)
                    result["extra_data"] = e
                    result = result.as_dataframe()

                if dbname is not None:
                    append_dataframe_to_sqlite(result.as_dataframe(), dbname=dbname, tablename=experiment_name,
                                               dtypes=sql_dtypes)
コード例 #6
0
def train(flags):
    # General options
    num_workers = flags.num_workers

    model_out_path = flags.out

    data_cfg = flags.data_cfg
    batch_size = flags.batch_size
    seq_len = flags.seq_len
    num_pts = flags.num_pts

    augment_quad = flags.augment_quad
    augment_pairs = flags.augment_pairs

    pretrain_tnocs = flags.pretrain_tnocs

    model_in_path = flags.weights
    radii_list = flags.radii
    local_feat_size = flags.local_feat_size
    latent_feat_size = flags.latent_feat_size
    ode_hidden_size = flags.ode_hidden_size
    motion_feat_size = flags.motion_feat_size
    cnf_blocks = flags.cnf_blocks
    regress_tnocs = flags.regress_tnocs

    cnf_loss_weight = flags.cnf_loss
    tnocs_loss_weight = flags.tnocs_loss

    # Train-only options
    parallel_train = flags.use_parallel

    num_epochs = flags.epochs
    val_every = flags.val_every
    save_every = flags.save_every
    print_stats_every = flags.print_every

    lr = flags.lr
    betas = (flags.beta1, flags.beta2)
    eps = flags.eps
    weight_decay = flags.decay

    # prepare output
    if not os.path.exists(model_out_path):
        os.mkdir(model_out_path)
    log_out = os.path.join(model_out_path, 'train_log.txt')
    log(log_out, flags)

    # load train and validation sets
    train_dataset = DynamicPCLDataset(data_cfg,
                                      split='train',
                                      train_frac=0.8,
                                      val_frac=0.1,
                                      num_pts=num_pts,
                                      seq_len=seq_len,
                                      shift_time_to_zero=(not pretrain_tnocs),
                                      random_point_sample=True)
    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=num_workers,
                              pin_memory=True,
                              drop_last=True,
                              worker_init_fn=lambda _: np.random.seed()
                              )  # get around numpy RNG seed bug

    val_dataset = DynamicPCLDataset(data_cfg,
                                    split='val',
                                    train_frac=0.8,
                                    val_frac=0.1,
                                    num_pts=num_pts,
                                    seq_len=seq_len,
                                    shift_time_to_zero=(not pretrain_tnocs),
                                    random_point_sample=False)
    val_loader = DataLoader(val_dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers,
                            pin_memory=True,
                            drop_last=True,
                            worker_init_fn=lambda _: np.random.seed())

    if parallel_train:
        log(log_out,
            'Attempting to use all available GPUs for parallel training...')
    # gets GPU 0 if available, else CPU
    device = get_device()

    # create caspr model
    model = CaSPR(radii_list=radii_list,
                  local_feat_size=local_feat_size,
                  latent_feat_size=latent_feat_size,
                  ode_hidden_size=ode_hidden_size,
                  pretrain_tnocs=pretrain_tnocs,
                  augment_quad=augment_quad,
                  augment_pairs=augment_pairs,
                  cnf_blocks=cnf_blocks,
                  motion_feat_size=motion_feat_size,
                  regress_tnocs=regress_tnocs)

    if pretrain_tnocs and model_in_path != '':
        # load in only pretrained tnocs weights
        print('Loading weights for pre-trained canonicalizer from %s...' %
              (model_in_path))
        loaded_state_dict = torch.load(model_in_path, map_location=device)
        load_encoder_weights_from_full(model, loaded_state_dict)
    elif model_in_path != '':
        print('Loading model weights from %s...' % (model_in_path))
        loaded_state_dict = torch.load(model_in_path, map_location=device)
        load_weights(model, loaded_state_dict)

    if parallel_train:
        model = nn.DataParallel(model)
    model.to(device)

    optimizer = optim.Adam(model.parameters(),
                           lr=lr,
                           betas=betas,
                           eps=eps,
                           weight_decay=weight_decay)

    params = count_params(model)
    log(log_out, 'Num model params: ' + str(params))

    loss_tracker = TrainLossTracker()

    for epoch in range(num_epochs):
        # train
        run_one_epoch(model,
                      train_loader,
                      device,
                      optimizer,
                      cnf_loss_weight,
                      tnocs_loss_weight,
                      epoch,
                      loss_tracker,
                      log_out,
                      mode='train',
                      print_stats_every=print_stats_every)

        # validate
        if epoch % val_every == 0:
            with torch.no_grad(
            ):  # must do this to avoid running out of memory
                val_stat_tracker = TestStatTracker()

                run_one_epoch(model,
                              val_loader,
                              device,
                              None,
                              cnf_loss_weight,
                              tnocs_loss_weight,
                              epoch,
                              val_stat_tracker,
                              log_out,
                              mode='val',
                              print_stats_every=print_stats_every)

                # get final aggregate stats
                mean_losses = val_stat_tracker.get_mean_stats()
                total_loss_out, mean_cnf_err, mean_tnocs_pos_err, mean_tnocs_time_err, mean_nfe = mean_losses

                # early stopping - save if it's the best so far
                if not math.isnan(total_loss_out):
                    if len(loss_tracker.val_losses) == 0:
                        min_loss_so_far = True
                    else:
                        min_loss_so_far = total_loss_out < min(
                            loss_tracker.val_losses)

                    # record loss curve and print stats
                    loss_tracker.record_val_step(total_loss_out,
                                                 epoch * len(train_loader))
                    print_stats(log_out, epoch, 0, 0, total_loss_out,
                                mean_cnf_err, mean_tnocs_pos_err,
                                mean_tnocs_time_err, 'VAL', mean_nfe)

                    if min_loss_so_far:
                        log(log_out,
                            'BEST Val loss so far! Saving checkpoint...')
                        save_name = 'BEST_time_model.pth'
                        save_file = os.path.join(model_out_path, save_name)
                        torch.save(model.state_dict(), save_file)

            # viz loss curve
            loss_tracker.plot_cur_loss_curves(model_out_path)

        if epoch % save_every == 0:
            # save model parameters
            save_name = 'time_model_%d.pth' % (epoch)
            save_file = os.path.join(model_out_path, save_name)
            torch.save(model.state_dict(), save_file)
コード例 #7
0
from utils.flat_integrals import evaluate_integral_flat
from utils.integral_validation import compare_integral_result
from zunis.integration import Integrator

#############################################################
#       DEBUG FLAG: set to False to log and save to file
#############################################################
debug = True
#############################################################

if debug:
    logger = get_benchmark_logger_debug("benchmark_reg_gaussian")
else:
    logger = get_benchmark_logger("benchmark_reg_gaussian")

device = get_device(cuda_ID=0)


def benchmark_reg_gaussian(d, s=0.3, reg=1.e-6):
    logger.info(f"Benchmarking a regulated gaussian with d={d} and s={s:.1f}")
    gaussian = RegulatedDiagonalGaussianIntegrand(d=d,
                                                  device=device,
                                                  s=s,
                                                  reg=reg)

    @vegas.batchintegrand
    def vgaussian(x):
        return gaussian(torch.tensor(x).to(device)).cpu()

    integrator = Integrator(
        d=d,
コード例 #8
0
ファイル: viz.py プロジェクト: ywcmaike/caspr
def viz(flags):
    # General options
    num_workers = flags.num_workers

    data_cfg = flags.data_cfg
    seq_len = flags.seq_len
    num_pts = flags.num_pts

    augment_quad = flags.augment_quad
    augment_pairs = flags.augment_pairs

    pretrain_tnocs = flags.pretrain_tnocs

    model_in_path = flags.weights
    radii_list = flags.radii
    local_feat_size = flags.local_feat_size
    latent_feat_size = flags.latent_feat_size
    ode_hidden_size = flags.ode_hidden_size
    motion_feat_size = flags.motion_feat_size
    cnf_blocks = flags.cnf_blocks
    regress_tnocs = flags.regress_tnocs

    # Viz-specific options
    shuffle_test = flags.shuffle_test

    viz_tnocs = flags.viz_tnocs
    viz_observed = flags.viz_observed
    viz_interpolated = flags.viz_interpolated

    device = get_device()

    print('Setting batch size to 1 for visualization...')
    batch_size = 1

    # create caspr model
    model = CaSPR(radii_list=radii_list,
                  local_feat_size=local_feat_size,
                  latent_feat_size=latent_feat_size,
                  ode_hidden_size=ode_hidden_size,
                  pretrain_tnocs=pretrain_tnocs,
                  augment_quad=augment_quad,
                  augment_pairs=augment_pairs,
                  cnf_blocks=cnf_blocks,
                  motion_feat_size=motion_feat_size,
                  regress_tnocs=regress_tnocs)

    if pretrain_tnocs and model_in_path != '':
        # load in only pretrained tnocs weights
        print('Loading weights for pre-trained canonicalizer from %s...' %
              (model_in_path))
        loaded_state_dict = torch.load(model_in_path, map_location=device)
        load_encoder_weights_from_full(model, loaded_state_dict)
    elif model_in_path != '':
        print('Loading model weights from %s...' % (model_in_path))
        loaded_state_dict = torch.load(model_in_path, map_location=device)
        load_weights(model, loaded_state_dict)

    model.to(device)

    # visualize results on test set
    test_dataset = DynamicPCLDataset(data_cfg,
                                     split='test',
                                     train_frac=0.8,
                                     val_frac=0.1,
                                     num_pts=num_pts,
                                     seq_len=seq_len,
                                     shift_time_to_zero=(not pretrain_tnocs),
                                     random_point_sample=False)
    test_loader = DataLoader(test_dataset,
                             batch_size=batch_size,
                             shuffle=shuffle_test,
                             num_workers=num_workers,
                             worker_init_fn=lambda _: np.random.seed()
                             )  # get around numpy RNG seed bug

    # visualize predictions
    viz_cfg = VizConfig(flags)
    with torch.no_grad():
        test_viz(viz_cfg, model, test_dataset, test_loader, device)
コード例 #9
0
def run_style_transfer(size,
                       content_img_path,
                       style_img_path,
                       model,
                       content_layers,
                       style_layers,
                       input_img=None,
                       num_steps=128,
                       content_weight=1.,
                       style_weight=1e9,
                       log_steps=50):
    """Runs Neural Style Transfer.

  Args:
    model: The Neural Style Transfer model to use.
    content_image: The image whose content to match during the optimization.
    style_image: The image whose style to match during the optimization.
    content_layers: The names of the layers whose output will be used to compute
      the content losses.
    style_layers: The names of the layers whose output will be used to compute
      the style losses.
    input_img: The image which will be optimized to match the content and style
      of the content_img and style_img respectively. If None, defaults to random
      Gaussian noise.
    num_steps: The number of steps to run the optimization for.
    content_weight: A weight to multiply the content loss by.
    style_weight: A weight to multiply the style loss by.
    log_steps: The number of consecutive training steps to run before logging.
  Returns:
    The optimized input_img.
  """
    content_img = image_loader(content_img_path, size)
    style_img = image_loader(style_img_path, size)
    n, c, h, w = content_img.data.size()
    if input_img is None:
        input_img = torch.randn((n, c, h, w), device=torch_utils.get_device())
        input_img = input_img * .01  # Scale the noise variance down.
    model, content_losses, style_losses = get_nst_model_and_losses(
        model, content_img, style_img, content_layers, style_layers)
    optimizer = optim.Adam([input_img.requires_grad_()], lr=.05)
    # NOTE(eugenhotaj): Making the generated image robust to minor transformations
    # was shown in https://distill.pub/2017/feature-visualization to produce more
    # visually appealing results. We observe the same thing but note that our
    # transformations are a lot more mild as aggresive transformations produce
    # rotation and scaling artifacts in the generated image.
    transform = nn.Sequential(
        kornia.augmentation.RandomResizedCrop(size=(w, h),
                                              scale=(.97, 1.),
                                              ratio=(.97, 1.03)),
        kornia.augmentation.RandomRotation(degrees=1.))
    for step in range(num_steps):
        optimizer.zero_grad()
        input_img.data.clamp_(0, 1)
        model(transform(input_img))
        content_loss, style_loss = 0, 0
        for cl in content_losses:
            content_loss += content_weight * cl.loss
        for sl in style_losses:
            style_loss += style_weight * sl.loss
        loss = content_loss + style_loss
        loss.backward()
        optimizer.step()
        if (step > 0 and step % log_steps == 0) or (step + 1) == num_steps:
            print(f'[{step}]: content_loss={content_loss.item()},'
                  f' style_loss={style_loss.item():4f}')
            #colab_utils.imshow(input_img.data.clamp_(0, 1), figsize=(10, 10))

    return np.asarray(IMAGE_UNLOADER(input_img))