Example #1
0
    def _load_viewer(config: Dict) -> Viewer:
        '''
        Loads a Viewer from the given Viewer configuration.

        Args:
            config: Viewer configuration.
        
        Returns:
            Viewer instance as specified by the Viewer configuration.
        '''
        assert 'Type' in config, 'Scope "Viewer" in configuration file is missing key "Type".'
        if config['Type'] == 'Orthographic':
            assert 'Direction' in config, 'Scope "Viewer" in configuration file is missing key "Direction".'
            return OrthographicViewer(
                direction=torch.tensor(config['Direction'],
                                       dtype=torch.float,
                                       device=utils.get_device_name()))
        elif config['Type'] == 'Perspective':
            assert 'Position' in config, 'Scope "Viewer" in configuration file is missing key "Position".'
            return PerspectiveViewer(
                position=torch.tensor(config['Position'],
                                      dtype=torch.float,
                                      device=utils.get_device_name()))
        else:
            raise ValueError('Viewer type "%s" is not supported.' %
                             config['Type'])
Example #2
0
    def _load_camera(config: Dict) -> Camera:
        '''
        Loads a Camera from the given Camera configuration.

        Args:
            config: Camera configuration.
        
        Returns:
            Camera instance as specified by the Camera configuration.
        '''
        assert 'Type' in config, 'Scope "Camera" in configuration file is missing key "Type".'
        if config['Type'] == 'Identity':
            return IdentityCamera()
        if config['Type'] == 'Perspective':
            for key in ('Position', 'Direction', 'Field of View', 'Resolution',
                        'Exposure'):
                assert key in config, f'Scope "Camera" in configuration file is missing key "{key}".'
            return PerspectiveCamera(
                position=torch.tensor(config['Position'],
                                      dtype=torch.float,
                                      device=utils.get_device_name()),
                direction=torch.tensor(config['Direction'],
                                       dtype=torch.float,
                                       device=utils.get_device_name()),
                field_of_view=torch.tensor(config['Field of View'],
                                           dtype=torch.float,
                                           device=utils.get_device_name()),
                resolution=torch.tensor(config['Resolution'],
                                        device=utils.get_device_name()),
                exposure=config['Exposure'])
        else:
            raise ValueError('Camera type "%s" is not supported.' %
                             config['Type'])
Example #3
0
    def _load_network(path: str) -> SVBRDFAutoencoder:
        '''
        Loads an SVBRDFAutoencoder from the given SVBRDFAutoencoder configuration.

        Args:
            path: Path to the SVBRDFAutoencoder YAML configuration.
        
        Returns:
            SVBRDFAutoencoder instance as specified by the SVBRDFAutoencoder configuration.
        '''
        with open(path, 'r') as file:
            config = yaml.safe_load(file)
        for key in ('Dimensions', 'Parameters', 'Encoders', 'Decoder'):
            assert key in config, f'Scope "root" in configuration file is missing key "{key}".'
        for key in ('Path', 'Load'):
            assert key in config[
                'Parameters'], f'Scope "Parameters" in configuration file is missing key "{key}".'
        path = config['Parameters']['Path']
        load = config['Parameters']['Load']
        dims = config['Dimensions']
        encoders = {}
        for key in ('Local', 'Global', 'Periodic'):
            encoders[key] = Configuration._load_subnetwork(
                config=config['Encoders'][key])
        decoder = Configuration._load_subnetwork(config=config['Decoder'])
        device = utils.get_device_name()
        autoencoder = SVBRDFAutoencoder(dims=dims,
                                        path=path,
                                        encoders=encoders,
                                        decoder=decoder).to(device)
        if load:
            autoencoder.load()
        return autoencoder
Example #4
0
def index():
    """Video streaming"""
    # now = datetime.now()
    # timeString = now.strftime("%Y-%m-%d %H:%M")

    register_url = get_device_registration_url()
    with requests.Session() as s:
        data = {
            'DeviceName': get_device_name(),
            'DateCreated': get_current_time()
        }
        headers = {"Content-Type": "application/json"}
        r = s.post(register_url, headers=headers, data=data)

        json_response = r.json()
        print("json response == " + json_response["data"]["ipAddress"])

    session['ip_address'] = json_response["data"]["ipAddress"]

    templateData = {
        'title': 'HELLO!',
        # 'time': timeString,
        'time': "2020-03-06",
        'response': json_response,
        'ipaddress': json_response["data"]["ipAddress"]
    }

    print(session)
    return render_template('index.html', **templateData)
 def load(self) -> None:
     '''
     Loads the parameters of this SVBRDFAutoencoder.
     '''
     device = utils.get_device_name()
     self.load_state_dict(
         torch.load(self._path, map_location=torch.device(device)))
     logging.debug('Loaded network parameters from "%s"', self._path)
Example #6
0
    def set_from_file(self, file):
        self.file = file
        self.connected = True

        devfile = open(file, "rb")
        self.name = U.get_device_name(devfile)
        self.axis_map, self.axis_states = U.get_joystick_axis_map(devfile)
        self.button_map, self.button_states = U.get_joystick_button_map(devfile)

        self.watch_id = GObject.io_add_watch(devfile, GObject.IO_IN, self._watch_cb)
Example #7
0
    def __init__(self, path: str, num_samples: int, intensity: float) -> None:
        '''
        Constructs a new ImageLight from the given image path, number of samples, and intensity.

        Args:
            path: Path to an environment map image.
            num_samples: Number of samples to take from the environment map.
            intensity: Scalar applied to each environment map radiance sample.
        '''
        assert path, "Path cannot be empty or set to None."
        assert num_samples > 0, "Number of samples must be greater than zero."
        self._image = image.load(path, 'sRGB')
        self._intensity = intensity
        # Construct a grid of azimuth (ϕ) and zenith (θ) angles uniformly distributed over the upper unit hemisphere.
        self._samples = torch.stack([
            2 * math.pi *
            torch.rand(num_samples, device=utils.get_device_name()),
            torch.acos(torch.rand(num_samples, device=utils.get_device_name()))
        ],
                                    dim=1)
Example #8
0
    def _load_light(config: Dict) -> Light:
        '''
        Loads a Light from the given Light configuration.

        Args:
            config: Light configuration.
        
        Returns:
            Light instance as specified by the Light configuration.
        '''
        assert 'Type' in config, 'Scope "Light" in configuration file is missing key "Type".'
        if config['Type'] == 'Directional':
            for key in ('Direction', 'Lumens'):
                assert key in config, f'Scope "Light" in configuration file is missing key "{key}".'
            return DirectionalLight(
                direction=torch.tensor(config['Direction'],
                                       dtype=torch.float,
                                       device=utils.get_device_name()),
                lumens=torch.tensor(config['Lumens'],
                                    dtype=torch.float,
                                    device=utils.get_device_name()))
        elif config['Type'] == 'Punctual':
            for key in ('Position', 'Lumens'):
                assert key in config, f'Scope "Light" in configuration file is missing key "{key}".'
            return PunctualLight(
                position=torch.tensor(config['Position'],
                                      dtype=torch.float,
                                      device=utils.get_device_name()),
                lumens=torch.tensor(config['Lumens'],
                                    dtype=torch.float,
                                    device=utils.get_device_name()))
        elif config['Type'] == 'Image':
            for key in ('Path', 'Samples', 'Intensity'):
                assert key in config, f'Scope "Light" in configuration file is missing key "{key}".'
            return ImageLight(path=config['Path'],
                              num_samples=config['Samples'],
                              intensity=config['Intensity'])
        else:
            raise ValueError('Light type "%s" is not supported.' %
                             config['Type'])
 def apply(self, normals: Tensor, parameters: Tensor, lights: List[Light],
           viewer: Viewer) -> Tuple[Tensor, Tensor, List[Light], Viewer]:
     '''See Transform.apply().'''
     replaced_parameters = parameters.clone().detach()
     for i, target in enumerate(self._targets):
         index = target['Index']
         min_value = target['Min Value']
         max_value = target['Max Value']
         uniform_random_field = torch.rand_like(
             parameters[:, :, :, index], device=utils.get_device_name())
         replaced_parameters[:, :, :, index] = uniform_random_field * (
             max_value - min_value) + min_value
     return normals, replaced_parameters, lights, viewer
Example #10
0
 def radiance(self, points: Tensor) -> Tensor:
     '''See Light.radiance().'''
     num_samples = self.samples.size(0)
     # The bilinear sampling function operates over [N, C, H, W] Tensors.
     sample_input = self._image.permute(2, 0, 1).unsqueeze(0)
     # Convert the sampled angles into environment map coordinates.
     sample_grid = torch.zeros(size=(1, 1, num_samples, 2),
                               device=utils.get_device_name())
     sample_grid[0, 0, :, 0] = self.samples[:, 0] / math.pi - 1
     sample_grid[0, 0, :, 1] = self.samples[:, 1] / math.pi * 2 - 1
     # Return the incident radiance from the environment map.
     incident_radiance = torch.nn.functional.grid_sample(
         sample_input, sample_grid, mode='bilinear',
         align_corners=False).permute(0, 2, 3, 1)
     return self.intensity / num_samples * incident_radiance.view(
         -1, 3).expand(points.size(0), points.size(1), -1, 3)
def compute_radiance(network_normals: Tensor, network_svbrdf: SVBRDF,
                     dataset_normals: Tensor,
                     dataset_svbrdf: SVBRDF) -> Tuple[Tensor, Tensor]:
    '''
    Computes the radiance from the given normals and SVBRDFs with respect to a random point Light and Viewer.

    Args:
        network_normals: Tensor [B, R, C, 3] of SVBRDF autoencoder normals.
        network_svbrdf: SVBRDF with embedded SVBRDF autoencoder parameters.
        dataset_normals: Tensor [B, R, C, 3] of ground-truth normals.
        dataset_svbrdf: SVBRDF with embedded ground-truth parameters.

    Returns:
        Tuple containing the SVBRDF autoencoder and Dataset radiance Tensors.
    '''
    # There is no harm in sharing the same drawing canvas for both the network and dataset renderings.
    texture_rows = dataset_normals.size(1)
    texture_cols = dataset_normals.size(2)
    surface = utils.create_grid(num_rows=texture_rows, num_cols=texture_cols)

    # The Light and Viewer are sampled from a cosine-weighted distribution following the Single-Image SVBRDF Capture
    # with a Rendering-Aware Deep Network paper.
    origin = torch.tensor([0.5, 0.5, 0.0], device=utils.get_device_name())
    lights = [
        PunctualLight(position=utils.sample_cosine_hemisphere(origin),
                      lumens=torch.rand(1).expand(3) * 2 + 0.5)
    ]
    viewer = PerspectiveViewer(position=utils.sample_cosine_hemisphere(origin))

    network_radiance = shader.shade(surface=surface,
                                    normals=network_normals,
                                    lights=lights,
                                    viewer=viewer,
                                    svbrdf=network_svbrdf)
    dataset_radiance = shader.shade(surface=surface,
                                    normals=dataset_normals,
                                    lights=lights,
                                    viewer=viewer,
                                    svbrdf=dataset_svbrdf)
    return network_radiance, dataset_radiance
 def apply(self, normals: Tensor, parameters: Tensor, lights: List[Light],
           viewer: Viewer) -> Tuple[Tensor, Tensor, List[Light], Viewer]:
     '''See Transform.apply().'''
     device = utils.get_device_name()
     scalar = torch.rand(1) * (self._max_scalar -
                               self._min_scalar) + self._min_scalar
     # Take care not to modify the original list of Lights.
     elevated_lights = [light for light in lights]
     for i, light in enumerate(lights):
         # Only punctual Lights can be raised or lowered.
         if isinstance(light, PunctualLight):
             elevated_position = light.position * torch.tensor(
                 [1.0, 1.0, scalar], device=device)
             elevated_lights[i] = PunctualLight(position=elevated_position,
                                                lumens=light.lumens)
     elevated_viewer = viewer
     # Similarly, only perspective Viewers can be raised or lowered.
     if isinstance(viewer, PerspectiveViewer):
         elevated_position = viewer.position * torch.tensor(
             [1.0, 1.0, scalar], device=device)
         elevated_viewer = PerspectiveViewer(position=elevated_position)
     return normals, parameters, elevated_lights, elevated_viewer
Example #13
0
def load(path: str, encoding: str = 'RGB') -> Tensor:
    '''
    Loads the image at the given path using the supplied encoding.

    Args:
        path: Path to the image.
        encoding: Encoding of the image.

    Returns:
        Tensor [R, C, X] representing the normalized pixel values in the image.
    '''
    assert path, "Path cannot be empty or set to None."
    array = imageio.imread(path)
    device = utils.get_device_name()
    image = torchvision.transforms.ToTensor()(array).to(device).permute(
        1, 2, 0)[:, :, :3]
    if encoding == 'sRGB':
        image = convert_sRGB_to_RGB(image)
    elif encoding == 'Greyscale':
        image = convert_RGB_to_greyscale(image)
    elif encoding != 'RGB':
        raise Exception(f'Image encoding "{encoding}" is not supported."')
    logging.debug('Loaded image from "%s"', path)
    return image
def _merge_flow(config: Configuration) -> None:
    '''
    The "merge" flow melds two overlapping textures by smoothly blending their latent fields.

    Args:
        config: Configuration specifying the parameters of the flow.
    '''
    with torch.no_grad():
        autoencoder, svbrdf, lights, viewer, camera, overlap, input_paths, output_path = config.load_merge_flow(
        )
        autoencoder.eval()

        # It is assumed that the dimensions of the input images will be accepted by the network.
        input_images = torch.stack([
            image.load(path=input_path, encoding='sRGB')
            for input_path in input_paths
        ],
                                   dim=0)
        # The radial distance field should be the same for both input images.
        num_texture_rows = autoencoder.dimensions['Texture']['Input'][0]
        num_texture_cols = autoencoder.dimensions['Texture']['Input'][1]
        input_distance = utils.create_radial_distance_field(
            num_rows=num_texture_rows, num_cols=num_texture_cols)
        # By convention, PyTorch expects Tensors to be in [B, D, R, C] format.
        input_batch = torch.cat(
            [input_images, input_distance.expand(2, -1, -1, -1)],
            dim=3).permute(0, 3, 1, 2)

        # The width and height of the SVBRDF autoencoder latent are shared between all latent components.
        num_latent_rows = autoencoder.dimensions['Latent']['Local'][0]
        num_latent_cols = autoencoder.dimensions['Latent']['Local'][1]

        # The latent field corresponding to each texture must be padded in the region where it has no influence.
        device = utils.get_device_name()
        channels = {
            key: autoencoder.dimensions['Latent'][key][2]
            for key in ('Local', 'Global', 'Periodic')
        }
        padding = torch.zeros(
            (num_latent_rows, num_latent_cols - overlap, sum(
                channels.values())),
            device=device)

        # The latent field is blended smoothly across the overlapping region as follows:
        #     +------------+---------------------+------------+
        #     |  α = 0.00  |  α = 0.00 ... 1.00  |  α = 1.00  |
        #     +------------+---------------------+------------+
        #                   <----- Overlap ----->
        texture_latents = autoencoder.encode(input_batch).permute(0, 2, 3, 1)
        widened_latents = torch.stack([
            torch.cat([texture_latents[0], padding], dim=1),
            torch.cat([padding, texture_latents[1]], dim=1)
        ],
                                      dim=0)
        alphas = torch.cat([
            torch.zeros(num_latent_cols - overlap, device=device),
            torch.linspace(0, 1, overlap, device=device),
            torch.ones(num_latent_cols - overlap, device=device)
        ]).expand(num_latent_rows, -1).unsqueeze(-1)
        blended_latents = torch.lerp(widened_latents[0], widened_latents[1],
                                     alphas).permute(2, 0, 1)

        # The periodic component should be replaced to be consistent with the blended global field..
        global_field = blended_latents[channels['Local']:channels['Local'] +
                                       channels['Global'], :, :]
        blended_latents[
            -channels['Periodic']:, :, :] = autoencoder.derive_periodic_field(
                global_field.unsqueeze(0)).squeeze(0)

        # The fully-convolutional nature of the SVBRDF decoder trivializes the creation of textures with arbitrary sizes.
        normals, svbrdf.parameters = SVBRDFAutoencoder.interpret(
            autoencoder.decode(blended_latents.unsqueeze(0)))
        _shade_render_save(normals=normals,
                           svbrdf=svbrdf,
                           lights=lights,
                           viewer=viewer,
                           camera=camera,
                           path=output_path)
def _shuffle_flow(config: Configuration) -> None:
    '''
    The "shuffle" flow expands the SVBRDF parameters of an image to fill an arbitrary plane by shuffling latent tiles.

    Args:
        config: Configuration specifying the parameters of the flow.
    '''
    with torch.no_grad():
        autoencoder, svbrdf, lights, viewer, camera, tile_size, output_size, input_path, output_path = config.load_shuffle_flow(
        )
        autoencoder.eval()

        # Continuing to index sizes with 0 and 1 is simultaneously confusing and a potential debugging nightmare.
        num_tile_rows, num_tile_cols = tile_size
        num_output_rows, num_output_cols = output_size

        # Similarly, it is worthwhile to give names to the otherwise-generic SVBRDF autoencoder dimensions.
        num_latent_rows = autoencoder.dimensions['Latent']['Local'][0]
        num_latent_cols = autoencoder.dimensions['Latent']['Local'][1]
        num_texture_rows = autoencoder.dimensions['Texture']['Input'][0]
        num_texture_cols = autoencoder.dimensions['Texture']['Input'][1]
        row_expansion_ratio = autoencoder.dimensions['Texture']['Output'][
            0] // num_latent_rows
        col_expansion_ratio = autoencoder.dimensions['Texture']['Output'][
            1] // num_latent_cols

        # These sanity checks may seem obvious but you never know...
        assert num_tile_rows <= num_latent_rows, 'Tile height cannot exceed the height of the latent field.'
        assert num_tile_cols <= num_latent_cols, 'Tile width cannot exceed the width of the latent field.'
        assert num_output_rows % (
            row_expansion_ratio * num_tile_rows
        ) == 0, 'Latent height inferred from the output height must be a multiple of the tile height.'
        assert num_output_cols % (
            col_expansion_ratio * num_tile_cols
        ) == 0, 'Latent width inferred from the output width must be a multiple of the tile width.'

        # It is assumed that the dimensions of the input images will be accepted by the network.
        input_images = image.load(path=input_path,
                                  encoding='sRGB').unsqueeze(0)
        input_distance = utils.create_radial_distance_field(
            num_rows=num_texture_rows, num_cols=num_texture_cols).unsqueeze(0)
        input_batch = torch.cat([input_images, input_distance],
                                dim=3).permute(0, 3, 1, 2)
        input_latent = autoencoder.encode(input_batch)

        # As mentioned in the assertions, the size of the shuffled latent field can be inferred from the desired output texture size.
        num_shuffled_rows = num_output_rows // row_expansion_ratio
        num_shuffled_cols = num_output_cols // col_expansion_ratio
        shuffled_latent = torch.zeros(
            (1, input_latent.size(1), num_shuffled_rows, num_shuffled_cols),
            device=utils.get_device_name())

        # The shuffled latent is populated with random tiles from the input image latent.
        for row in range(0, shuffled_latent.size(2), num_tile_rows):
            for col in range(0, shuffled_latent.size(3), num_tile_cols):
                original_row_crop, original_col_crop = utils.sample_embedded_rectangle(
                    num_outer_rows=input_latent.size(2),
                    num_inner_rows=num_tile_rows,
                    num_outer_cols=input_latent.size(3),
                    num_inner_cols=num_tile_cols)
                shuffled_row_crop, shuffled_col_crop = slice(
                    row, row + num_tile_rows), slice(col, col + num_tile_cols)
                shuffled_latent[:, :, shuffled_row_crop,
                                shuffled_col_crop] = input_latent[:, :,
                                                                  original_row_crop,
                                                                  original_col_crop]

        # The periodic latent component needs to be aligned with its relative position in the field.
        channels = {
            key: autoencoder.dimensions['Latent'][key][2]
            for key in ('Local', 'Global', 'Periodic')
        }
        global_field = shuffled_latent[:, channels['Local']:channels['Local'] +
                                       channels['Global'], :, :]
        shuffled_latent[:, -channels[
            'Periodic']:, :, :] = autoencoder.derive_periodic_field(
                global_field)

        # The fully-convolutional nature of the SVBRDF decoder trivializes the creation of textures with arbitrary sizes.
        normals, svbrdf.parameters = SVBRDFAutoencoder.interpret(
            autoencoder.decode(shuffled_latent))
        _shade_render_save(normals=normals,
                           svbrdf=svbrdf,
                           lights=lights,
                           viewer=viewer,
                           camera=camera,
                           path=output_path)
Example #16
0
 def load(self, file_name=None):
     if file_name is None:
         file_name = self.get_file_name()
     path = os.path.join(self.model_dir, file_name)
     self.load_state_dict(torch.load(path, map_location=utils.get_device_name()))
     print(f"Loaded model state dict from {path}")
def optimize(
        autoencoder: SVBRDFAutoencoder,
        svbrdf: SVBRDF,
        datasets: Dict[str, Dataset],
        optimizer: torch.optim.Optimizer,  # type: ignore
        epochs: int,
        cycles: int,
        samples: int,
        frequencies: Dict[str, int],
        loss_weights: Weights,
        early_stopping: Dict,
        experiment: str) -> None:
    '''
    Optimizes the given SVBRDF autoencoder using the provided SVBRDF, Datasets, Optimizer, and hyperparameters.

    Args:
        autoencoder: SVBRDFAutoencoder to be optimized.
        svbrdf: SVBRDF intended for the output of the SVBRDF autoencoder.
        datasets: Mapping between Dataset names (e.g., "Training") and, well, Datasets.
        optimizer: Optimizer that updates the parameters in the SVBRDF autoencoder.
        epochs: Number of training epochs.
        cycles: Number of training steps to execute during each epoch.
        samples: Size of a training batch.
        frequencies: Mapping between event names (e.g., "Parameter Checkpoint") and the number of training steps between
                     executions of these events.
        loss_weights: Mapping between loss types (e.g., "Reconstruction") and dictionaries that associate loss components
                      (e.g., "Style") with their corresponding weights.
        experiment: Name of the current experiment.
    '''
    # The structure of the given dictionaries are checked here rather than the Configuration to avoid bloating distant
    # code and keep the relevant implementation in one place.
    for key in ('Training', 'Testing'):
        assert key in datasets, f'Dataset dictionary is missing key {key}.'
    for key in ('Reconstruction', ):
        assert key in loss_weights, f'Loss weights dictionary is missing key {key}.'
    # for key in ('Content', 'Style'):
    #     assert key in loss_weights['Diversity'], f'Loss weights dictionary is missing key {key} under scope "Diversity".'
    for key in ('Content', 'Style', 'Texel'):
        assert key in loss_weights[
            'Reconstruction'], f'Loss weights dictionary is missing key {key} under scope "Reconstruction".'
    for key in ('Tests Publication', 'Image Publication',
                'Parameter Checkpoint'):
        assert key in frequencies, f'Frequencies dictionary is missing key {key}.'
    for key in ('Epsilon', 'Patience'):
        assert key in early_stopping, f'Early stopping dictionary is missing key {key}.'

    def zero_backward_step(loss: Tensor) -> None:
        '''Updates the parameters of the SVBRDF autoencoder using the given loss Tensor, taking care to clear gradients beforehand.'''
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    # Sharing the same VGG-19 network instance across all invocations is absolutely critical to performance.
    # Furthermore, the VGG-19 network must be initialized after the primary computation device has been set.
    vgg19 = VGG19().to(utils.get_device_name())
    vgg19.eval()

    # A SummaryWriter can be used to publish data to TensorBoard.
    run_name = datetime.datetime.now().strftime(f'%Y-%m-%d_%H-%M-%S')
    dashboard = SummaryWriter(f'results/runs/{run_name} - {experiment}')

    # Tracking the number of training steps is helpful for TensorBoard tracking and implementing "every-X-step" behaviour.
    steps = 0

    # The best testing loss record tracks the minimum testing loss achieved so far.
    best_testing_loss = torch.tensor(float('inf'))
    # The early stopping counter tracks the number of early stopping checks that have transpired since the best testing loss was updated.
    early_stopping_counter = 0

    # Each epoch introduces a new material to the training loop.
    for epoch in range(epochs):
        for cycle in tqdm.tqdm(range(cycles),
                               desc=f'Epoch {epoch} Progress',
                               total=cycles):
            # Materials from the Dataset are selected in a round-robin fashion following the training technique described
            # in the Diversified Texture Synthesis with Feed-forward Networks paper.
            material = cycle % min(epoch + 1, len(datasets['Training']))

            # The reconstruction loss encourages the network to accurately infer the SVBRDF parameters of a given texture.
            reconstruction_losses = compute_reconstruction_losses(
                autoencoder=autoencoder,
                network_svbrdf=svbrdf,
                samples=samples,
                dataset=datasets['Training'],
                material=material,
                vgg19=vgg19)
            reconstruction_losses['Total'] = loss_weights['Reconstruction']['Content'] * reconstruction_losses['Content'] + \
                                             loss_weights['Reconstruction']['Style'] * reconstruction_losses['Style'] + \
                                             loss_weights['Reconstruction']['Texel'] * reconstruction_losses['Texel']
            zero_backward_step(loss=reconstruction_losses['Total'])

            # The diversity loss encourages the network to encode the style of a texture in the global latent vector.
            # diversity_losses = compute_diversity_losses(autoencoder=autoencoder, network_svbrdf=svbrdf, samples=samples,
            #                                             dataset=datasets['Training'], material=material, vgg19=vgg19)
            # diversity_losses['Total'] = loss_weights['Diversity']['Content'] * diversity_losses['Content'] + \
            #                             loss_weights['Diversity']['Style'] * diversity_losses['Style']
            # zero_backward_step(loss=diversity_losses['Total'])

            # The training losses are published to the dashboard after each training iteration.
            texture = datasets['Training'].textures[material]
            losses = {'Reconstruction': reconstruction_losses}
            publish_scalar_results(dashboard=dashboard,
                                   mode='Training',
                                   steps=steps,
                                   texture=texture,
                                   losses=losses)

            # Adding one to the number of steps avoids triggering an event on the first training iteration.
            progress = steps + 1
            if progress % frequencies['Parameter Checkpoint'] == 0:
                autoencoder.save()
            if progress % frequencies['Image Publication'] == 0:
                materials = range(min(epoch + 1, len(datasets['Training'])))
                publish_image_results(dashboard=dashboard,
                                      mode='Training',
                                      steps=steps,
                                      autoencoder=autoencoder,
                                      network_svbrdf=svbrdf,
                                      dataset=datasets['Training'],
                                      materials=materials)
            if progress % frequencies['Tests Publication'] == 0:
                publish_testing_results(dashboard=dashboard,
                                        steps=steps,
                                        autoencoder=autoencoder,
                                        network_svbrdf=svbrdf,
                                        dataset=datasets['Testing'],
                                        samples=samples,
                                        loss_weights=loss_weights,
                                        vgg19=vgg19)
            if progress % frequencies['Early Stopping'] == 0 and epoch >= len(
                    datasets['Training']):
                # The testing loss is the mean loss of each material in the testing dataset.
                testing_loss = compute_testing_loss(
                    autoencoder=autoencoder,
                    network_svbrdf=svbrdf,
                    dataset=datasets['Testing'],
                    samples=samples,
                    loss_weights=loss_weights,
                    vgg19=vgg19)
                # The epsilon factor avoids delaying the early stopping due to noise.
                if testing_loss < best_testing_loss - early_stopping['Epsilon']:
                    best_testing_loss = testing_loss
                    early_stopping_counter = 0
                else:
                    early_stopping_counter += 1
                    if early_stopping_counter >= early_stopping['Patience']:
                        logging.info(
                            'Early stopping triggered at epoch %d cycle %d',
                            epoch, cycle)
                        # The parameter weights may no longer be "optimal" but they are probably close enough.
                        autoencoder.save()
                        dashboard.close()
                        return
            steps += 1
    dashboard.close()
def _morph_flow(config: Configuration) -> None:
    '''
    The "morph" flow morphs one texture into another over a series of discrete tiles.

    Args:
        config: Configuration specifying the parameters of the flow.
    '''
    with torch.no_grad():
        autoencoder, svbrdf, lights, viewer, camera, between, input_paths, output_path = config.load_morph_flow(
        )
        autoencoder.eval()

        # The total number of tiles includes the two textures on either end as well as the tiles between the textures.
        tiles = 2 + between
        device = utils.get_device_name()

        # It is assumed that the dimensions of the input images will be accepted by the network.
        input_images = torch.stack([
            image.load(path=input_path, encoding='sRGB')
            for input_path in input_paths
        ],
                                   dim=0)
        # The radial distance field should be the same for both input images.
        num_texture_rows = autoencoder.dimensions['Texture']['Input'][0]
        num_texture_cols = autoencoder.dimensions['Texture']['Input'][1]
        input_distance = utils.create_radial_distance_field(
            num_rows=num_texture_rows, num_cols=num_texture_cols)
        # By convention, PyTorch expects Tensors to be in [B, D, R, C] format.
        input_batch = torch.cat(
            [input_images, input_distance.expand(2, -1, -1, -1)],
            dim=3).permute(0, 3, 1, 2)

        # The width and height of the SVBRDF autoencoder latent are shared between all latent components.
        num_latent_rows = autoencoder.dimensions['Latent']['Local'][0]
        num_latent_cols = autoencoder.dimensions['Latent']['Local'][1]

        # The local field latent is blended such that each texel within a tile has the same alpha component.
        #     +------------+------------+------------+------------+------------+
        #     |  α = 0.00  |  α = 0.25  |  α = 0.50  |  α = 0.75  |  α = 1.00  |
        #     +------------+------------+------------+------------+------------+
        local_encoder_output = autoencoder.encoders['Local'].forward(
            input_batch)
        local_field_output = local_encoder_output.repeat(1, 1, 1,
                                                         tiles).permute(
                                                             0, 2, 3, 1)
        local_field_alphas = torch.linspace(
            0, 1, tiles,
            device=device).repeat_interleave(num_latent_cols).expand(
                num_latent_rows, -1).unsqueeze(-1)
        local_field = torch.lerp(local_field_output[0], local_field_output[1],
                                 local_field_alphas).permute(2, 0, 1)

        # The global field latent is blended continuously between the left and right textures.
        #     +------------+------------+------------+------------+------------+
        #     |  α = 0.00  |  α = 0.00 ... ... 0.50 ... ... 1.00  |  α = 1.00  |
        #     +------------+------------+------------+------------+------------+
        global_encoder_output = autoencoder.encoders['Global'].forward(
            input_batch)
        global_field_output = global_encoder_output.expand(
            num_latent_rows, num_latent_cols * tiles, -1,
            -1).permute(2, 0, 1, 3)
        global_field_alphas = torch.cat([
            torch.zeros(num_latent_cols, device=device),
            torch.linspace(0, 1, num_latent_cols * between, device=device),
            torch.ones(num_latent_cols, device=device)
        ]).expand(num_latent_rows, -1).unsqueeze(-1)
        global_field = torch.lerp(global_field_output[0],
                                  global_field_output[1],
                                  global_field_alphas).permute(2, 0, 1)

        # Fortunately, the periodic field latent does not demand any special treatment.
        periodic_field = autoencoder.derive_periodic_field(
            global_field.unsqueeze(0)).squeeze(0)

        # The fully-convolutional nature of the SVBRDF decoder trivializes the creation of textures with arbitrary sizes.
        latents = torch.cat([local_field, global_field, periodic_field],
                            dim=0).unsqueeze(0)
        normals, svbrdf.parameters = SVBRDFAutoencoder.interpret(
            autoencoder.decode(latents))
        _shade_render_save(normals=normals,
                           svbrdf=svbrdf,
                           lights=lights,
                           viewer=viewer,
                           camera=camera,
                           path=output_path)
    def evaluate(self, normals: Tensor, incident_directions: Tensor,
                 outbound_directions: Tensor) -> Tensor:
        '''See SVBRDF.evaluate().'''
        base_colours, subsurface, metallic, specular_amounts, specular_tints, roughness, anisotropy_levels, anisotropy_angles, \
            sheen_amounts, sheen_tints, clearcoat_amounts, clearcoat_gloss = torch.split(self.parameters, [3] + [1] * 11, dim=-1)

        # The incident and outbound halfway zeniths are shared across several SVBRDF lobes.
        halfways = vector.normalize(incident_directions + outbound_directions)
        halfway_zeniths = vector.dot(incident_directions, halfways)

        # Isolate the hue and saturation from the base colour luminosity: https://www.w3.org/Graphics/Color/sRGB.
        dtype = self.parameters.dtype
        device = utils.get_device_name()
        luminosity = torch.sum(
            1E-5 + base_colours *
            torch.tensor([0.2126, 0.7152, 0.0722], dtype=dtype, device=device),
            dim=-1,
            keepdim=True)
        tint_colours = base_colours / luminosity

        # Compute the Lambertian SVBRDF contribution with two Fresnel factors.
        diffuse_lambertian_colours = 0.5 + 2 * torch.unsqueeze(
            roughness, dim=3) * halfway_zeniths**2
        diffuse_lambertian_fresnel_factors = MicrofacetSVBRDF.F_Disney(diffuse_lambertian_colours, normals, incident_directions) * \
                                             MicrofacetSVBRDF.F_Disney(diffuse_lambertian_colours, normals, outbound_directions)
        diffuse_lambertian_terms = LambertianSVBRDF(base_colours).evaluate(normals, incident_directions, outbound_directions) \
                                   * diffuse_lambertian_fresnel_factors

        incident_zeniths = vector.dot(incident_directions,
                                      normals).clamp(1E-5, 1)
        outbound_zeniths = vector.dot(outbound_directions,
                                      normals).clamp(1E-5, 1)

        # Compute the Hanrahan-Krueger-inspired SVBRDF contribution (to model subsurface scattering).
        diffuse_subsurface_colours = torch.unsqueeze(
            roughness, dim=3) * halfway_zeniths**2
        diffuse_subsurface_fresnel_factors = MicrofacetSVBRDF.F_Disney(diffuse_subsurface_colours, normals, incident_directions) * \
                                             MicrofacetSVBRDF.F_Disney(diffuse_subsurface_colours, normals, outbound_directions)
        diffuse_subsurface_terms = LambertianSVBRDF(base_colours).evaluate(normals, incident_directions, outbound_directions) \
                                   * 1.25 * (diffuse_subsurface_fresnel_factors * (1 / (incident_zeniths + outbound_zeniths) - 0.5) + 0.5)

        # Compute the sheen SVBRDF contribution (for cloth materials).
        diffuse_sheen_colours = torch.lerp(
            torch.tensor(1.0, dtype=dtype, device=device), tint_colours,
            sheen_tints)
        diffuse_sheen_terms = torch.unsqueeze(
            sheen_amounts * diffuse_sheen_colours,
            dim=3) * (1 - halfway_zeniths)**5

        # Compute the primary specular SVBRDF contribution using an anisotropic GGX microfacet model.
        specular_aspects = torch.sqrt(1.0 - 0.9 * anisotropy_levels)
        specular_alphas_x = torch.max(
            roughness**2 / specular_aspects,
            torch.tensor([1E-3], dtype=dtype, device=device))
        specular_alphas_y = torch.max(
            roughness**2 * specular_aspects,
            torch.tensor([1E-3], dtype=dtype, device=device))
        specular_colours = torch.lerp(
            specular_amounts * 0.08 *
            torch.lerp(torch.tensor(1.0, dtype=dtype, device=device),
                       tint_colours, specular_tints), base_colours, metallic)
        specular_parameters = torch.cat([
            specular_alphas_x, specular_alphas_y,
            2 * math.pi * anisotropy_angles, specular_colours
        ],
                                        dim=3)
        specular_terms = MicrofacetSVBRDF(
            D=(MicrofacetSVBRDF.D_GGX_Anisotropic, [0, 1, 2]),
            F=(MicrofacetSVBRDF.F_Schlick, [3, 4, 5]),
            G=(MicrofacetSVBRDF.G_GGX_Anisotropic, [0, 1, 2]),
            parameters=specular_parameters).evaluate(normals,
                                                     incident_directions,
                                                     outbound_directions)

        # Compute the clearcoat SVBRDF contribution using a GTR model where γ=1.
        clearcoat_D_alphas = torch.lerp(
            torch.full(clearcoat_gloss.shape, 0.1, dtype=dtype, device=device),
            torch.full(clearcoat_gloss.shape,
                       0.001,
                       dtype=dtype,
                       device=device), clearcoat_gloss)
        clearcoat_G_alphas = torch.full(clearcoat_D_alphas.shape,
                                        0.25,
                                        dtype=dtype,
                                        device=device)
        clearcoat_colours = torch.full(base_colours.shape,
                                       0.04,
                                       dtype=dtype,
                                       device=device)
        clearcoat_parameters = torch.cat(
            [clearcoat_D_alphas, clearcoat_G_alphas, clearcoat_colours], dim=3)
        clearcoat_terms = MicrofacetSVBRDF(
            D=(MicrofacetSVBRDF.D_Berry, [0]),
            F=(MicrofacetSVBRDF.F_Schlick, [2, 3, 4]),
            G=(MicrofacetSVBRDF.G_GGX_Isotropic, [1]),
            parameters=clearcoat_parameters).evaluate(normals,
                                                      incident_directions,
                                                      outbound_directions)

        # Cull any reflections that pass through the surface.
        visible = (incident_zeniths >= 0) & (outbound_zeniths >= 0)
        # Combine the SVBRDF contributions in a principled way.
        diffuse_terms = diffuse_sheen_terms + torch.lerp(
            diffuse_lambertian_terms, diffuse_subsurface_terms,
            torch.unsqueeze(subsurface, dim=3))
        combined_terms = (1 - torch.unsqueeze(metallic, dim=3)
                          ) * diffuse_terms + specular_terms + torch.unsqueeze(
                              clearcoat_amounts,
                              dim=3) * 0.25 * clearcoat_terms
        return visible * combined_terms