示例#1
0
    def run(self, images):
        assert images.dtype == np.uint8
        assert 3 <= images.ndim <= 4

        orig_ndim = images.ndim
        if images.ndim == 3:
            images = images[None, ...]

        images = torch.from_numpy(images)
        images = nhwc_to_nchw(images)
        images = images.to(torch.float32) / 255

        with self.device():
            with torch.no_grad():
                images = self.to_device(images)
                images = preprocess_for_reconet(images)
                styled_images = self.model(images)
                styled_images = postprocess_reconet(styled_images)
                styled_images = styled_images.cpu()
                styled_images = torch.clamp(styled_images * 255, 0, 255).to(torch.uint8)
                styled_images = nchw_to_nhwc(styled_images)
                styled_images = styled_images.numpy()
                if orig_ndim == 3:
                    styled_images = styled_images[0]
                return styled_images
示例#2
0
def stylize_image(image, model):
    if isinstance(image, Image.Image):
        image = transforms.ToTensor()(image)
    image = image.cuda().unsqueeze_(0)
    image = preprocess_for_reconet(image)
    styled_image = model(image).squeeze()
    styled_image = postprocess_reconet(styled_image)
    return styled_image
示例#3
0
                # Compute ReCoNet features and output

                reconet_input = preprocess_for_reconet(sample["frame"])
                feature_maps = model.encoder(reconet_input)
                output_frame = model.decoder(feature_maps)

                previous_reconet_input = preprocess_for_reconet(
                    sample["previous_frame"])
                previous_feature_maps = model.encoder(previous_reconet_input)
                previous_output_frame = model.decoder(previous_feature_maps)

                # Compute VGG features

                vgg_input_frame = preprocess_for_vgg(sample["frame"])
                vgg_output_frame = preprocess_for_vgg(
                    postprocess_reconet(output_frame))
                input_vgg_features = vgg(vgg_input_frame)
                output_vgg_features = vgg(vgg_output_frame)

                vgg_previous_input_frame = preprocess_for_vgg(
                    sample["previous_frame"])
                vgg_previous_output_frame = preprocess_for_vgg(
                    postprocess_reconet(previous_output_frame))
                previous_input_vgg_features = vgg(vgg_previous_input_frame)
                previous_output_vgg_features = vgg(vgg_previous_output_frame)

                # Compute losses

                alpha = args.alpha
                beta = args.beta
                gamma = args.gamma