def _relight_flow(config: Configuration) -> None:
    '''
    The "relight" flow renders a picture of a texture using a Light, Viewer, and Camera from a (trained) SVBRDF autoencoder.

    Args:
        config: Configuration specifying the parameters of the flow.
    '''
    with torch.no_grad():
        autoencoder, svbrdf, lights, viewer, camera, input_path, output_path = config.load_relight_flow(
        )
        autoencoder.eval()
        # It is assumed that the dimensions of the input image will be accepted by the network.
        input_image = image.load(path=input_path, encoding='sRGB')
        num_texture_rows = input_image.size(0)
        num_texture_cols = input_image.size(1)
        input_distance = utils.create_radial_distance_field(
            num_rows=num_texture_rows, num_cols=num_texture_cols)
        # By convention, PyTorch expects Tensors to be in [B, D, R, C] format.
        input_batch = torch.cat([input_image, input_distance],
                                dim=2).unsqueeze(0).permute(0, 3, 1, 2)
        normals, svbrdf.parameters = SVBRDFAutoencoder.interpret(
            autoencoder.forward(input_batch))
        _shade_render_save(normals=normals,
                           svbrdf=svbrdf,
                           lights=lights,
                           viewer=viewer,
                           camera=camera,
                           path=output_path)
Esempio n. 2
0
 def result(self, image_path, results, expected, compare=True):
     fullpath = path = os.path.abspath(os.path.join('data', image_path))
     if self.resized_dir is not None:
         im = image.max_size(image.load(path), (320, 240))
         path = os.path.join(self.resized_dir, os.path.basename(image_path))
         print os.path.dirname(image_path)
         image.save(im, path)
         del im
     self.write('<tr>')
     self.write('<td rowspan="%(len)d"><a href="%(fullpath)s"><img src="%(path)s" width="320" /><br />%(imgpath)s</a></td>',
         fullpath=fullpath, path=path, imgpath=image_path, len=len(results))
     for i,pair in enumerate(results.iteritems()):
         name,result = pair
         if i != 0:
             self.write("</tr><tr>")
         self.write('<td>%(name)s</td><td>%(score)r</td>',
             name=name, score=result[1])
         self.write('<td class="%(cls)s">%(confirmed)s</td><td>%(expected)s</td>',
             cls="bad" if compare and result[2] != (name in expected) else "good",
             confirmed="Yes" if result[2] else "",
             expected="Yes" if name in expected else "")
         self.write('<td class="%(cls)s">%(timing).2f second(s)</td>',
             cls="bad" if compare and result[3] > 10 else "",
             timing=result[3])
     self.write("</tr>")
Esempio n. 3
0
    def _load_SVBRDF(config: Dict) -> SVBRDF:
        '''
        Loads an SVBRDF from the given SVBRDF configuration.

        Args:
            config: SVBRDF configuration.
        
        Returns:
            SVBRDF instance as specified by the SVBRDF configuration.
        '''
        parameters = Tensor()
        if 'Parameters' in config:
            for (i, parameter_config) in enumerate(config['Parameters']):
                for key in ('Type', 'Path'):
                    assert key in parameter_config, f'SVBRDF parameter {i} in configuration file is missing key "{key}".'
                parameter = image.load(parameter_config['Path'],
                                       parameter_config['Type']).unsqueeze(0)
                parameters = parameter if parameters.size(
                    0) == 0 else torch.cat([parameters, parameter], dim=3)

        assert 'Type' in config, 'Scope "SVBRDF" in configuration file is missing key "Type".'
        if config['Type'] == 'Lambertian':
            return LambertianSVBRDF(parameters)
        elif config['Type'] == 'Blinn-Phong':
            return BlinnPhongSVBRDF(parameters)
        elif config['Type'] == 'Substance':
            return SubstanceSVBRDF(parameters)
        elif config['Type'] == 'Disney':
            return DisneySVBRDF(parameters)
        else:
            raise ValueError('SVBRDF type "%s" is not supported.' %
                             config['Type'])
 def load(self, max_size=None):
     try:
         im = image.load(self.imgpath)
     except IOError:
         return None
     if max_size is not None:
         im = image.max_size(im, max_size)
     return im
 def load(self, max_size=None):
     try:
         im = image.load(self.imgpath)
     except IOError:
         return None
     if max_size is not None:
         im = image.max_size(im, max_size)
     return im
Esempio n. 6
0
def compute_path_image(infilename, outfilename, start, end, threshold=10):
    '''Altera l'immagine nel file infilename 
    colorando il percorso da start a end e 
    salvandola nel file outfilename. 
    Usa un grafo implicito.'''
    img = load(infilename)
    tree = visit_tree_image(img, start, end, threshold)
    path = visit_path(tree, end)
    draw_path(img, path)
    save(outfilename, img)
def _blend_flow(config: Configuration) -> None:
    '''
    The "blend" flow blends two textures using a (trained) SVBRDF autoencoder and renders the result.

    Args:
        config: Configuration specifying the parameters of the flow.
    '''
    with torch.no_grad():
        autoencoder, svbrdf, lights, viewer, camera, alphas, input_paths, output_path = config.load_blend_flow(
        )
        for key in ('Local', 'Global', 'Periodic'):
            assert key in alphas, f'Alphas dictionary is missing key "{key}".'
            assert 0 <= alphas[
                key] <= 1, f'Alpha value for key "{key}" falls outside the closed interval [0, 1].'
        autoencoder.eval()

        # It is assumed that the dimensions of the input images will be accepted by the network.
        input_images = torch.stack([
            image.load(path=input_path, encoding='sRGB')
            for input_path in input_paths
        ],
                                   dim=0)

        # The radial distance field should be the same for both input images.
        num_texture_rows = autoencoder.dimensions['Texture']['Input'][0]
        num_texture_cols = autoencoder.dimensions['Texture']['Input'][1]
        input_distance = utils.create_radial_distance_field(
            num_rows=num_texture_rows, num_cols=num_texture_cols)
        # By convention, PyTorch expects Tensors to be in [B, D, R, C] format.
        input_batch = torch.cat(
            [input_images, input_distance.expand(2, -1, -1, -1)],
            dim=3).permute(0, 3, 1, 2)

        # The blended latent tensor must have a batch dimension to proceed through the SVBRDF decoder.
        texture_latents = autoencoder.encode(input_batch)
        blended_latents = torch.zeros_like(texture_latents[:1])
        start_channel = 0
        for key in ('Local', 'Global', 'Periodic'):
            # Crucially, the latent components must be traversed in smallest-to-greatest-depth order.
            step_channel = autoencoder.dimensions['Latent'][key][2]
            stop_channel = start_channel + step_channel
            channels = slice(start_channel, stop_channel)
            # An alpha value of 0 represents the first texture while an alpha value of 1 represents the second texture.
            blended_latents[0, channels, :, :] = texture_latents[0, channels, :, :] * (1 - alphas[key]) + \
                                                 texture_latents[1, channels, :, :] * alphas[key]
            start_channel = stop_channel
        normals, svbrdf.parameters = SVBRDFAutoencoder.interpret(
            autoencoder.decode(blended_latents))
        _shade_render_save(normals=normals,
                           svbrdf=svbrdf,
                           lights=lights,
                           viewer=viewer,
                           camera=camera,
                           path=output_path)
Esempio n. 8
0
 def __init__(self, w=None, h=None, filename=None):
     """Costruisco una immagine w x h di pixel neri"""
     print(w, h, filename)
     assert (w != None and
             h != None) or filename != None, 'Argumets must be specified'
     if filename is None:
         self._pixels = [[Color(0, 0, 0) for _ in range(w)]
                         for _ in range(h)]
     else:
         _image = image.load(filename)
         self._pixels = [[Color(r, g, b) for r, g, b in line]
                         for line in _image]
Esempio n. 9
0
def compute_path(infilename, outfilename, start, end, threshold=10):
    '''Altera l'immagine nel file infilename
    colorando il percorso da start a end e 
    salvandola nel file outfilename. Usa le 
    funzioni load() e save() dal capitolo sulle
    immagini.'''
    img = load(infilename)
    g = image_to_graph(img, threshold)
    tree = visit_tree(g, start)
    path = visit_path(tree, end)
    draw_path(img, path)
    save(outfilename, img)
Esempio n. 10
0
def __main__():
    args = argparser()
    out = args['output']

    orig_img = image.load(args['image']).convert('RGB')
    if args['contrast'] != None:
        orig_img = ImageEnhance.Contrast(orig_img).enhance(args['contrast'])
    orig_img = resizer.resize(orig_img, 1, ANTI_FONT_DISTORTION)
    fit_ratio = resizer.fit_in_ratio(orig_img.size, term.size())
    if args['ratio']:
        fit_ratio *= args['ratio']

    img = resizer.resize(orig_img, fit_ratio, (1, 1))
    lows, mids, highs = posterizer.thresholds(img)
    if args['color'] != None:
        lows = map(lambda val: bound_addition(val, args['color']), lows)
        mids = map(lambda val: bound_addition(val, args['color']), mids)
        highs = map(lambda val: bound_addition(val, args['color']), highs)
    colors = posterizer.posterize(img, mids, highs)

    #    img.show()
    shapes = resizer.resize(orig_img, fit_ratio, (2, 4))
    #    shapes.show()
    shapes = shapes.convert('L')
    threshold = otsu.threshold(shapes.histogram())
    #    mask = shapes.point(lambda val: 255 if val <= threshold else 0).convert('1')
    #    threshold = otsu.threshold(shapes.histogram(mask))
    if args['shape'] != None:
        threshold = bound_addition(threshold, args['shape'])
    shapes = shapes.point(lambda val: 255
                          if val > threshold else 0).convert('1')
    #    shapes.show()
    dots = Canvas()
    w, h = shapes.size
    for index, pixel in enumerate(list(shapes.getdata()), 0):
        if pixel:
            dots.set(index % w, index // w)


#    out.write(dots.frame(0,0,w,h))
#    out.write('\n')

    w, h = colors.size
    for index, pixel in enumerate(list(colors.getdata()), 0):
        x = (index % w) * 2
        y = (index // w) * 4
        miniframe = unicode(dots.frame(x, y, x + 2, y + 4), 'utf-8')
        miniframe = miniframe if len(miniframe) else u'\u2800'
        out.write(color_print.rgb2esc(pixel, [0, 0, 0], lows, highs,
                                      miniframe))
        if not (1 + index) % w:
            out.write('\n')
Esempio n. 11
0
    def _load_normals(config: Dict) -> Tensor:
        '''
        Loads a normal map from the given flow configuration.

        Args:
            config: Flow configuration.
        
        Returns:
            Tensor [1, R, C, 3] of surface normals as specified by the normal map image.
        '''
        assert 'Normals' in config, 'Scope "Flow" in configuration file is missing key "Normals".'
        return vector.normalize(2 *
                                image.load(config['Normals']).unsqueeze(0) - 1)
Esempio n. 12
0
def _warp_flow(config: Configuration) -> None:
    '''
    The "warp" flow renders a plane from a source texture by sampling a local latent field uniformly at random.

    Args:
        config: Configuration specifying the parameters of the flow.
    '''
    with torch.no_grad():
        autoencoder, svbrdf, lights, viewer, camera, output_size, input_path, output_path = config.load_warp_flow(
        )
        autoencoder.eval()

        # It is assumed that the dimensions of the input image will be accepted by the SVBRDF autoencoder network.
        input_images = image.load(path=input_path,
                                  encoding='sRGB').unsqueeze(0)
        num_texture_rows = autoencoder.dimensions['Texture']['Input'][0]
        num_texture_cols = autoencoder.dimensions['Texture']['Input'][1]
        input_distance = utils.create_radial_distance_field(
            num_rows=num_texture_rows, num_cols=num_texture_cols).unsqueeze(0)
        input_batch = torch.cat([input_images, input_distance],
                                dim=3).permute(0, 3, 1, 2)

        # The expansion ratios represent the multiplicative scaling in size from the latent field to the output texture.
        row_expansion_ratio = autoencoder.dimensions['Texture']['Output'][
            0] // autoencoder.dimensions['Latent']['Local'][0]
        col_expansion_ratio = autoencoder.dimensions['Texture']['Output'][
            1] // autoencoder.dimensions['Latent']['Local'][1]

        # The value at each position in the local field is sampled uniformly at random to simulate structural noise.
        num_warped_rows = output_size[0] // row_expansion_ratio
        num_warped_cols = output_size[1] // col_expansion_ratio
        local_field = torch.rand(
            (1, autoencoder.dimensions['Latent']['Local'][2], num_warped_rows,
             num_warped_cols))
        # The global field is the same everywhere to preserve the look and feel of the input texture.
        global_field = autoencoder.encoders['Global'].forward(
            input_batch).expand(1, num_warped_rows, num_warped_cols,
                                -1).permute(0, 3, 1, 2)
        # The periodic field is derived directly from the global field.
        periodic_field = autoencoder.derive_periodic_field(global_field)

        # The fully-convolutional nature of the SVBRDF decoder trivializes the creation of textures with arbitrary sizes.
        latents = torch.cat([local_field, global_field, periodic_field], dim=1)
        normals, svbrdf.parameters = SVBRDFAutoencoder.interpret(
            autoencoder.decode(latents))
        _shade_render_save(normals=normals,
                           svbrdf=svbrdf,
                           lights=lights,
                           viewer=viewer,
                           camera=camera,
                           path=output_path)
Esempio n. 13
0
def mosaic_average(fname_in, fname_out, s):
    '''Ritorna una nuova immagine ottenuta dividendo
    l'immagine img in quadrati di lato s e riempendo
    ogni quadratino con la media dei suoi colori.'''
    img = image.load(fname_in)
    w, h = len(img[0]), len(img)
    ret = image.create(w, h, (0,0,0))
    # itera sui possibili quadrati
    for jj in range(h//s):
        for ii in range(w//s):
            # colore medio dell'immagine
            c = average(img,ii*s,jj*s,s,s)
            draw_quad(ret, ii*s, jj*s, s, s, c)
    image.save(fname_out, ret)
Esempio n. 14
0
def _tile_flow(config: Configuration) -> None:
    '''
    The "tile" flow attempts to synthesize a tileable output texture from a given input texture.

    Args:
        config: Configuration specifying the parameters of the flow.
    '''
    with torch.no_grad():
        autoencoder, svbrdf, lights, viewer, camera, overlap, input_path, output_path = config.load_tile_flow(
        )
        autoencoder.eval()

        # It is assumed that the dimensions of the input image will be accepted by the network.
        input_image = image.load(path=input_path, encoding='sRGB')
        input_distance = utils.create_radial_distance_field(
            num_rows=autoencoder.dimensions['Texture']['Input'][0],
            num_cols=autoencoder.dimensions['Texture']['Input'][1])
        input_batch = torch.cat([input_image, input_distance],
                                dim=2).unsqueeze(0).permute(0, 3, 1, 2)

        # As long as the perceptive field of an output pixel is less than the size of the latent field, a tileable
        # output texture can be obtained by decoding a tiling of the latent field (interpolated for good measure).
        latent_tiles_row = autoencoder.encode(input_batch).expand(
            3, -1, -1, -1).permute(0, 2, 3, 1)
        latent_field_row = utils.interpolate(latent_tiles_row,
                                             overlap=overlap).expand(
                                                 3, -1, -1, -1)
        latent_field = utils.interpolate(
            latent_field_row.transpose(1, 2),
            overlap=overlap).transpose(0, 1).unsqueeze(0).permute(0, 3, 1, 2)

        # The center crop of the output image will be tileable as long as the latent field was smoothly convolved.
        output = autoencoder.decode(latent_field)
        output_row_padding = output.size(
            2) // 2 - autoencoder.dimensions['Texture']['Output'][0] // 2
        output_col_padding = output.size(
            3) // 2 - autoencoder.dimensions['Texture']['Output'][1] // 2
        cropped_output = output[:, :, output_row_padding:-output_row_padding,
                                output_col_padding:-output_col_padding]

        # The fully-convolutional nature of the SVBRDF decoder trivializes the creation of textures with arbitrary sizes.
        normals, svbrdf.parameters = SVBRDFAutoencoder.interpret(
            cropped_output)
        _shade_render_save(normals=normals,
                           svbrdf=svbrdf,
                           lights=lights,
                           viewer=viewer,
                           camera=camera,
                           path=output_path)
Esempio n. 15
0
    def _load_normals(self, texture: Texture) -> Tensor:
        '''
        Loads the normal map associated with the given texture.

        Args:
            texture: Texture of interest.

        Returns:
            Tensor [1, R, C, 3] of texture normals.
        '''
        path_to_texture = self._derive_path_to_texture_directory(texture)
        path_to_normals = os.path.join(path_to_texture,
                                       self._layout['Normals'])
        return vector.normalize(2 * image.load(path_to_normals).unsqueeze(0) -
                                1)
Esempio n. 16
0
def mosaic_nearest(fname_in, fname_out, s):
    """Ritorna una nuova immagine ottenuta dividendo
    l'immagine img in quadrati di lato s e riempendo
    ogni quadrato con il colore del suo angolo in
    alto a sinistra"""
    img = image.load(fname_in)
    w, h = len(img[0]), len(img)
    ret = image.create(w, h, (0,0,0))
    # itera sui possibili quadrati
    for jj in range(h//s):
        for ii in range(w//s):
            # colore dell'angolo in alto-sinistra
            c = img[jj*s][ii*s]
            draw_quad(ret, ii*s, jj*s, s, s, c)
    image.save(fname_out, ret)
Esempio n. 17
0
def _feedback_flow(config: Configuration) -> None:
    '''
    The "feedback" flow iteratively infers the SVBRDF parameters of a texture, renders it, and feeds the output of the
    rendering back into the network.  The purpose of this flow is to test the robustness of an SVBRDF autoencoder.

    Args:
        config: Configuration specifying the parameters of the flow.
    '''
    with torch.no_grad():
        autoencoder, svbrdf, camera, (feedback_lights, feedback_viewer), (
            rendering_lights, rendering_viewer
        ), input_path, output_path, loops = config.load_feedback_flow()
        autoencoder.eval()

        # It is assumed that the dimensions of the input image will be accepted by the network.
        input_image = image.load(path=input_path, encoding='sRGB')
        num_texture_rows = input_image.size(0)
        num_texture_cols = input_image.size(1)
        input_distance = utils.create_radial_distance_field(
            num_rows=num_texture_rows, num_cols=num_texture_cols)

        # By convention, PyTorch expects Tensors to be in [B, D, R, C] format.
        input_batch = torch.cat([input_image, input_distance],
                                dim=2).unsqueeze(0).permute(0, 3, 1, 2)

        normals, svbrdf.parameters = SVBRDFAutoencoder.interpret(
            autoencoder.forward(input_batch))
        surface = utils.create_grid(num_rows=num_texture_rows,
                                    num_cols=num_texture_cols)

        for i in tqdm.tqdm(range(loops), desc='Feedback Looping'):
            # The slightly-awkward ordering of statements before and inside the loops ensures that |loops| can be set to zero.
            input_image = shader.shade(surface=surface,
                                       normals=normals,
                                       lights=feedback_lights,
                                       viewer=feedback_viewer,
                                       svbrdf=svbrdf)[0]
            input_batch = torch.cat([input_image, input_distance],
                                    dim=2).unsqueeze(0).permute(0, 3, 1, 2)
            normals, svbrdf.parameters = SVBRDFAutoencoder.interpret(
                autoencoder.forward(input_batch))
        _shade_render_save(normals=normals,
                           svbrdf=svbrdf,
                           lights=rendering_lights,
                           viewer=rendering_viewer,
                           camera=camera,
                           path=output_path)
Esempio n. 18
0
def polarize(fname_in, colors, fname_out):

    inputImage = im.load(fname_in)

    outputImage = []

    for r in range(len(inputImage)):

        outputImageRow = []

        for c in range(len(inputImage[0])):

            outputImageRow.append(ApproximateColor(inputImage[r][c], colors))

        outputImage.append(outputImageRow)

    im.save(fname_out, outputImage)
Esempio n. 19
0
    def __init__(self, path: str, num_samples: int, intensity: float) -> None:
        '''
        Constructs a new ImageLight from the given image path, number of samples, and intensity.

        Args:
            path: Path to an environment map image.
            num_samples: Number of samples to take from the environment map.
            intensity: Scalar applied to each environment map radiance sample.
        '''
        assert path, "Path cannot be empty or set to None."
        assert num_samples > 0, "Number of samples must be greater than zero."
        self._image = image.load(path, 'sRGB')
        self._intensity = intensity
        # Construct a grid of azimuth (ϕ) and zenith (θ) angles uniformly distributed over the upper unit hemisphere.
        self._samples = torch.stack([
            2 * math.pi *
            torch.rand(num_samples, device=utils.get_device_name()),
            torch.acos(torch.rand(num_samples, device=utils.get_device_name()))
        ],
                                    dim=1)
Esempio n. 20
0
    def _load_parameters(self, texture: Texture) -> Tensor:
        '''
        Loads the parameter maps associated with the given texture.

        Args:
            texture: Texture of interest.

        Returns:
            Tensor [R, C, D] of parameter maps.
        '''
        path_to_texture = self._derive_path_to_texture_directory(texture)
        parameters = Tensor()
        for (i, parameter_config) in enumerate(self._layout['Parameters']):
            path_to_parameter = os.path.join(path_to_texture,
                                             parameter_config['Name'])
            parameter = image.load(path_to_parameter,
                                   parameter_config['Type']).unsqueeze(0)
            parameters = parameter if parameters.size(0) == 0 else torch.cat(
                [parameters, parameter], dim=3)
        return parameters
Esempio n. 21
0
def mosaic_size(fname_in, fname_out, s):
    '''Ritorna una nuova immagine ottenuta dividendo
    l'immagine img in quadratini di lato s e
    disegnando all'interno di ognuno di essi,
    su sfondo nero, un quadratino centrale bianco di
    lato proporzionale alla luminosità media del
    corrispondente quadratino'''
    img = image.load(fname_in)
    w, h = len(img[0]), len(img)
    ret = image.create(w, h, (0,0,0))
    # itera sui possibili quadrati
    for jj in range(h//s):
        for ii in range(w//s):
            # colore medio dell'immagine
            c = average(img,ii*s,jj*s,s,s)
            # lato del quadratino bianco
            r = round(s*(c[0]+c[1]+c[2])/(3*255))
            draw_quad(ret, ii*s+(s-r)//2,
                jj*s+(s-r)//2, r, r, (255,255,255))
    image.save(fname_out, ret)
Esempio n. 22
0
def fill(img_in, boundaries, pp_cc, img_out):
    '''Implementare qui la funzione'''

    img = image.load(img_in)

    w, h = len(img[0]), len(img)

    for pc in pp_cc:

        visited = set([pc[0]])

        active = set([pc[0]])

        while len(active) > 0:

            newactive = set()

            while len(active) > 0:

                x, y = active.pop()

                for dx, dy in ADJACENTS:

                    px, py = x + dx, y + dy

                    if px >= 0 and px < w and py >= 0 and py < h:

                        if (px, py) not in boundaries:

                            img[py][px] = pc[1]

                            if (px, py) not in visited:

                                visited.add((px, py))

                                newactive.add((px, py))

                active = newactive

    image.save(img_out, img)
Esempio n. 23
0
def _load_MERL_100_BRDF_slices(path: str) -> List[Tensor]:
    '''
    Loads the BRDF slices from the MERL 100 dataset image located at the given path.

    Args:
        path: Path to the MERL 100 BRDF slice image.

    Returns:
        List of Tensors [60, 60, 3] representing the BRDF slice of each material in the MERL 100 dataset.
    '''
    atlas = image.load(path=path, encoding='sRGB')
    brdfs = []
    # The row and column coordinates were manually extracted from the BRDF slice image.
    for row in (23, 107, 190, 274, 358, 441, 525, 609):
        for col in (25, 109, 192, 276, 360, 443, 527, 610, 694, 777, 861, 945,
                    1029):
            brdf = atlas[row:row + 60, col:col + 60]
            brdfs.append(brdf)
            if len(brdfs) == 100:
                return brdfs
    raise Exception(
        'Number of BRDF slices in the MERL 100 dataset is less than 100.')
Esempio n. 24
0
def edging(fname_in, fname_out, n_iter):
    img = image.load(fname_in)
    width, height = len(img[0]), len(img)
    img_out = image.create(width, height, (0, 0, 0))
    pix_hood = ((-1, -1), (0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0))
    for _ in range(n_iter):
        for row in range(height):
            for col in range(width):
                color = img[row][col]
                max_distance = 0
                max_color = color
                for x, y in pix_hood:
                    row_hood = y + row
                    col_hood = x + col
                    if not (0 <= col_hood < width and 0 <= row_hood < height):
                        continue
                    color_hood = img[row_hood][col_hood]
                    distance = calc_dist(color, color_hood)
                    if distance > max_distance:
                        max_distance = distance
                        max_color = color_hood
                img_out[row][col] = max_color
        img, img_out = img_out, img
    image.save(fname_out, img)
Esempio n. 25
0
def _mosaic_flow(config: Configuration) -> None:
    '''
    The "mosaic" flow reconstructs an image of arbitrary scale by partitioning the given image into smaller images,
    encoding the smaller images as latent fields, and then blending the resulting latent fields in a bilinear fashion.

    Args:
        config: Configuration specifying the parameters of the flow.
    '''
    with torch.no_grad():
        autoencoder, svbrdf, lights, viewer, camera, overlap, input_path, output_path = config.load_mosaic_flow(
        )
        autoencoder.eval()

        input_image = image.load(path=input_path, encoding='sRGB')

        # If the input size of the SVBRDF autoencoder does not evenly divide the input image, no valid partitioning exists.
        num_image_rows = input_image.size(0)
        num_image_cols = input_image.size(1)
        num_texture_rows = autoencoder.dimensions['Texture']['Input'][0]
        num_texture_cols = autoencoder.dimensions['Texture']['Input'][1]
        assert (num_image_rows % num_texture_rows == 0) and (
            num_image_cols % num_texture_cols == 0
        ), 'SVBRDF autoencoder input size must divide input image size.'

        # The input batch is constructed by splitting the image and distance fragments by row and then by column, and
        # then concatenating the result in such a way as to form a single column which can be stacked to form a batch.
        input_distance = utils.create_radial_distance_field(
            num_rows=num_image_rows, num_cols=num_image_cols)
        input_tensor = torch.cat([input_image, input_distance], dim=2)
        input_batch = torch.cat([
            torch.stack([
                input_batch_tile
                for input_batch_tile in input_batch_row.split(num_texture_cols,
                                                              dim=1)
            ],
                        dim=0)
            for input_batch_row in input_tensor.split(num_texture_rows, dim=0)
        ],
                                dim=0).permute(0, 3, 1, 2)

        # The latent dimensions defining the size of a sample latent field can be visualized as follows:
        #   +------------+------------+------------+------------+        +----+----+----+----+
        #   |            |            |            |            |        |  1 |  2 |  3 |  4 |
        #   |      1     |      2     |      3     |      4     |        +----+----+----+----+
        #   |            |            |            |            |        |  5 |  6 |  7 |  8 |
        #   +------------+------------+------------+------------+        +----+----+----+----+
        #   |            |            |            |            |        |  9 | 10 | 11 | 12 |
        #   |      5     |      6     |      7     |      8     |        +----+----+----+----+
        #   |            |            |            |            |         <----- Grid ------>
        #   +------------+------------+------------+------------+
        #   |            |            |            |            |
        #   |      9     |     10     |     11     |     12     |
        #   |            |            |            |            |
        #   +------------+------------+------------+------------+
        #    <-- Tile -->
        #    <----------------- Latent Field ------------------>
        num_grid_cols = num_image_cols // num_texture_cols

        # The latent field is assembled by splitting the batch of latent tiles according to their latent grid rows and
        # then interpolating the latent field between each tile in a bilinear fashion.
        texture_latents = torch.stack(autoencoder.encode(input_batch).permute(
            0, 2, 3, 1).split(num_grid_cols, dim=0),
                                      dim=0)
        blended_latents = utils.interpolate(
            torch.stack([
                utils.interpolate(texture_latent_row,
                                  overlap=overlap).transpose(0, 1)
                for texture_latent_row in texture_latents
            ],
                        dim=0),
            overlap=overlap).transpose(0, 1).unsqueeze(0).permute(0, 3, 1, 2)

        # The previous blending procedure leaves the periodic latent component out of alignment with the field indices.
        channels = {
            key: autoencoder.dimensions['Latent'][key][2]
            for key in ('Local', 'Global', 'Periodic')
        }
        global_field = blended_latents[:, channels['Local']:channels['Local'] +
                                       channels['Global'], :, :]
        blended_latents[:, -channels[
            'Periodic']:, :, :] = autoencoder.derive_periodic_field(
                global_field)

        # The fully-convolutional nature of the SVBRDF decoder trivializes the creation of textures with arbitrary sizes.
        normals, svbrdf.parameters = SVBRDFAutoencoder.interpret(
            autoencoder.decode(blended_latents))
        _shade_render_save(normals=normals,
                           svbrdf=svbrdf,
                           lights=lights,
                           viewer=viewer,
                           camera=camera,
                           path=output_path)
Esempio n. 26
0
 def load(self, filename):
     img = image.load(filename)
     self._pixels = [[Color(*c) for c in line] for line in img]
Esempio n. 27
0
def _shuffle_flow(config: Configuration) -> None:
    '''
    The "shuffle" flow expands the SVBRDF parameters of an image to fill an arbitrary plane by shuffling latent tiles.

    Args:
        config: Configuration specifying the parameters of the flow.
    '''
    with torch.no_grad():
        autoencoder, svbrdf, lights, viewer, camera, tile_size, output_size, input_path, output_path = config.load_shuffle_flow(
        )
        autoencoder.eval()

        # Continuing to index sizes with 0 and 1 is simultaneously confusing and a potential debugging nightmare.
        num_tile_rows, num_tile_cols = tile_size
        num_output_rows, num_output_cols = output_size

        # Similarly, it is worthwhile to give names to the otherwise-generic SVBRDF autoencoder dimensions.
        num_latent_rows = autoencoder.dimensions['Latent']['Local'][0]
        num_latent_cols = autoencoder.dimensions['Latent']['Local'][1]
        num_texture_rows = autoencoder.dimensions['Texture']['Input'][0]
        num_texture_cols = autoencoder.dimensions['Texture']['Input'][1]
        row_expansion_ratio = autoencoder.dimensions['Texture']['Output'][
            0] // num_latent_rows
        col_expansion_ratio = autoencoder.dimensions['Texture']['Output'][
            1] // num_latent_cols

        # These sanity checks may seem obvious but you never know...
        assert num_tile_rows <= num_latent_rows, 'Tile height cannot exceed the height of the latent field.'
        assert num_tile_cols <= num_latent_cols, 'Tile width cannot exceed the width of the latent field.'
        assert num_output_rows % (
            row_expansion_ratio * num_tile_rows
        ) == 0, 'Latent height inferred from the output height must be a multiple of the tile height.'
        assert num_output_cols % (
            col_expansion_ratio * num_tile_cols
        ) == 0, 'Latent width inferred from the output width must be a multiple of the tile width.'

        # It is assumed that the dimensions of the input images will be accepted by the network.
        input_images = image.load(path=input_path,
                                  encoding='sRGB').unsqueeze(0)
        input_distance = utils.create_radial_distance_field(
            num_rows=num_texture_rows, num_cols=num_texture_cols).unsqueeze(0)
        input_batch = torch.cat([input_images, input_distance],
                                dim=3).permute(0, 3, 1, 2)
        input_latent = autoencoder.encode(input_batch)

        # As mentioned in the assertions, the size of the shuffled latent field can be inferred from the desired output texture size.
        num_shuffled_rows = num_output_rows // row_expansion_ratio
        num_shuffled_cols = num_output_cols // col_expansion_ratio
        shuffled_latent = torch.zeros(
            (1, input_latent.size(1), num_shuffled_rows, num_shuffled_cols),
            device=utils.get_device_name())

        # The shuffled latent is populated with random tiles from the input image latent.
        for row in range(0, shuffled_latent.size(2), num_tile_rows):
            for col in range(0, shuffled_latent.size(3), num_tile_cols):
                original_row_crop, original_col_crop = utils.sample_embedded_rectangle(
                    num_outer_rows=input_latent.size(2),
                    num_inner_rows=num_tile_rows,
                    num_outer_cols=input_latent.size(3),
                    num_inner_cols=num_tile_cols)
                shuffled_row_crop, shuffled_col_crop = slice(
                    row, row + num_tile_rows), slice(col, col + num_tile_cols)
                shuffled_latent[:, :, shuffled_row_crop,
                                shuffled_col_crop] = input_latent[:, :,
                                                                  original_row_crop,
                                                                  original_col_crop]

        # The periodic latent component needs to be aligned with its relative position in the field.
        channels = {
            key: autoencoder.dimensions['Latent'][key][2]
            for key in ('Local', 'Global', 'Periodic')
        }
        global_field = shuffled_latent[:, channels['Local']:channels['Local'] +
                                       channels['Global'], :, :]
        shuffled_latent[:, -channels[
            'Periodic']:, :, :] = autoencoder.derive_periodic_field(
                global_field)

        # The fully-convolutional nature of the SVBRDF decoder trivializes the creation of textures with arbitrary sizes.
        normals, svbrdf.parameters = SVBRDFAutoencoder.interpret(
            autoencoder.decode(shuffled_latent))
        _shade_render_save(normals=normals,
                           svbrdf=svbrdf,
                           lights=lights,
                           viewer=viewer,
                           camera=camera,
                           path=output_path)
Esempio n. 28
0
	ifilename="italy-smoothed-edges.pgm"
	#process command line arguments
	for opts in sys.argv:
		if opts == "--help" or opts== "-h":
			print "syntax: invert [inputfile]"
			quit()
	if len(sys.argv)>1: #first argument is filename
		ifilename=sys.argv[1]
	if len(sys.argv)>2: #second argument is threshold
		threshold=int(sys.argv[2])
	strippedfilename=ifilename
	#take the extension off of the filename
	if ifilename[-4:]==".ppm" or ifilename[-4:]==".pgm":
		strippedfilename=ifilename[:-4]
	import image
	inlines=image.load(ifilename)
	#add suffix (so we make it clear what the file is)
	if inlines[0]=="P3":
		ofilename=strippedfilename+"-inverted.ppm"
	else:
		ofilename=strippedfilename+"-inverted.pgm"

	#create new image with areas filled
	outarray=invert(inlines)

	#write file
	print "DEBUG: writing to file "+ofilename
	outfile=open(ofilename,'w')
	for i in outarray:
		outfile.write(str(i)+"\n")
	outfile.close()
Esempio n. 29
0
	#second line=comment
	width=int(image[1])
	height=int(image[2])
	count=0
	for x in range(1,width-1):
		for y in range(1,height-1):
			file2[width*y+x+4]=int((1*getpixel(x-1,y-1,image)+2*getpixel(x,y-1,image)+1*getpixel(x+1,y-1,image)+2*getpixel(x-1,y,image)+4*getpixel(x,y,image)+2*getpixel(x+1,y,image)+1*getpixel(x-1,y+1,image)+2*getpixel(x,y+1,image)+1*getpixel(x+1,y+1,image))/16)
	return file2
if __name__ == "__main__":
	import sys
	ifilename="italy.pgm"
	for opts in sys.argv:
		if opts == "--help" or opts== "-h":
			print "syntax: smooth [inputfile]"
			quit()
	if len(sys.argv)>1:
		ifilename=sys.argv[1]
	strippedfilename=ifilename
	if ifilename[-4:]==".ppm" or ifilename[-4:]==".pgm":
		strippedfilename=ifilename[:-4]
	ofilename=strippedfilename+"-smoothed.pgm"
	inlines=load(ifilename)
	outarray=smooth(inlines)

	print "DEBUG: writing to file "+ofilename
	outfile=open(ofilename,'w')
	for i in outarray:
		outfile.write(str(i)+"\n")
	outfile.close()

Esempio n. 30
0
File: item.py Progetto: ext/scfa
 def __init__(self, properties, **kwargs):
     Item.__init__(self, **kwargs)
     self.hp = 25
     self.texture = image.load(properties['texture'])
Esempio n. 31
0
# -*- coding: utf-8 -*-
'''
Scrivere le funzioni seguenti.
1. rotL90(img) ritorna una nuova immagine che è l'immagine img ruotata a sinistra di 90 gradi. 
   Esempio: nel file es1.png vedete come l'immagine img_in_01.png viene ruotata
'''

import image as im
import IPython.display as ipd
import random

img = im.load('img_in_01.png')
im.visd(img)

def rot90(img):
    w, h = len(img[0]), len(img)
    imgR = im.create(h, w, (0,0,0))
    for y in range(h):
        for x in range(w):
            imgR[-1*x][y] = img[y][x]
    return imgR

#imgR = rot90(img)
#im.visd(imgR)

'''
2. red(img, s) ritorna una nuova immagine che è l'immagine img ridotta di un fattore s . Si assume che
   s sia un intero positvo che divide esattamente sia la larghezza che l'altezza di img . 
   Per calcolare il colore di un pixel dell'immagine ridotta si usi la tecnica di mosaic_average() . 
   Ad esempio, red(img, 2) e red(img, 4) , dove img è l'immagine img_in_01.png , 
   producono le immagini es2_1.png e es2_2.png
Esempio n. 32
0
File: item.py Progetto: ext/scfa
 def __init__(self, **kwargs):
     Item.__init__(self, **kwargs)
     self.hp = 50
     self.texture = image.load('texture/kebab.png')
Esempio n. 33
0
File: item.py Progetto: ext/scfa
 def __init__(self, **kwargs):
     Item.__init__(self, **kwargs)
     self.hp = 35
     self.texture = image.load('texture/apple.png')
Esempio n. 34
0
import cv2

import image
import paint
import seismic
from data import *

if __name__ == '__main__':
    _seismic = image.load("SeismicScaled.jpg")
    _transformation = seismic.parse_transformation(_seismic, 15 * 1000, int(2.5 * 1000), 10)

    _geo = Geo(Well.generate_left(), Well.generate_right(), _transformation.width, _transformation.left_well_intent, _transformation.right_well_intent)
    _match = Match.generate()
    _image = image.create(_geo.height, _geo.width)

    # image.show(_seismic)

    paint.wells(_image, _geo)
    paint.lines(_image, _geo, _match)
    paint.fill(_image, _geo, _match)

    # _image1 = _image.copy()
    # paint.fill(_image1, _geo, _match)
    # paint.wells(_image1, _geo)
    #
    # _, _image2 = cv2.threshold(_image1, 127, 255, cv2.THRESH_BINARY)
    #
    # print("Showing ...")
    # image.show(_image, _image1, _image2)

    _result = image.create(_transformation.height, _transformation.width)
Esempio n. 35
0
def _album_flow(config: Configuration) -> None:
    '''
    The "album" flow generates an image by blending the latent fields of a random sample of input images.

    Args:
        config: Configuration specifying the parameters of the flow.
    '''
    with torch.no_grad():
        autoencoder, svbrdf, lights, viewer, camera, output_size, overlap, input_paths, output_path = config.load_album_flow(
        )
        autoencoder.eval()

        # Interpreting the indexing of dimensions exactly once saves more minutes of debugging than keystrokes.
        num_output_rows = output_size[0]
        num_output_cols = output_size[1]
        num_texture_input_rows = autoencoder.dimensions['Texture']['Input'][0]
        num_texture_input_cols = autoencoder.dimensions['Texture']['Input'][1]
        num_texture_output_rows = autoencoder.dimensions['Texture']['Output'][
            0]
        num_texture_output_cols = autoencoder.dimensions['Texture']['Output'][
            1]

        # The number of rows and columns that constitute the latent grid (to be decoded into the output image) can be
        # inferred from the relative size of the output image and the SVBRDF autoencoder input.
        num_grid_rows = num_output_rows // num_texture_output_rows
        num_grid_cols = num_output_cols // num_texture_output_cols
        assert (num_output_rows % num_texture_output_rows == 0) and (num_output_cols % num_texture_output_cols == 0), \
               'SVBRDF autoencoder output size must divide output image size.'

        # The images to be included in the latent grid are chosen uniformly at random with replacement from the specified input images.
        input_images = torch.stack([
            image.load(path=input_path, encoding='sRGB')
            for input_path in input_paths
        ],
                                   dim=0)
        album_images = input_images[torch.randint(low=0,
                                                  high=len(input_images) - 1,
                                                  size=(num_grid_rows *
                                                        num_grid_cols, ))]

        # Before feeding the images through the SVBRDF autoencoder, they must be augmented with a radial distance field.
        input_distance = utils.create_radial_distance_field(
            num_rows=num_texture_input_rows, num_cols=num_texture_input_cols)
        input_batch = torch.cat([
            album_images,
            input_distance.expand(album_images.size(0), -1, -1, -1)
        ],
                                dim=3).permute(0, 3, 1, 2)

        # The latent field is assembled by splitting the batch of latent tiles according to their latent grid rows and
        # then interpolating the latent field between each tile in a bilinear fashion.
        texture_latents = torch.stack(autoencoder.encode(input_batch).permute(
            0, 2, 3, 1).split(num_grid_cols, dim=0),
                                      dim=0)
        blended_latents = utils.interpolate(
            torch.stack([
                utils.interpolate(texture_latent_row,
                                  overlap=overlap).transpose(0, 1)
                for texture_latent_row in texture_latents
            ],
                        dim=0),
            overlap=overlap).transpose(0, 1).unsqueeze(0).permute(0, 3, 1, 2)

        # The previous blending procedure leaves the periodic latent component out of alignment with the field indices.
        channels = {
            key: autoencoder.dimensions['Latent'][key][2]
            for key in ('Local', 'Global', 'Periodic')
        }
        global_field = blended_latents[:, channels['Local']:channels['Local'] +
                                       channels['Global'], :, :]
        blended_latents[:, -channels[
            'Periodic']:, :, :] = autoencoder.derive_periodic_field(
                global_field)

        # The fully-convolutional nature of the SVBRDF decoder trivializes the creation of textures with arbitrary sizes.
        normals, svbrdf.parameters = SVBRDFAutoencoder.interpret(
            autoencoder.decode(blended_latents))
        _shade_render_save(normals=normals,
                           svbrdf=svbrdf,
                           lights=lights,
                           viewer=viewer,
                           camera=camera,
                           path=output_path)
Esempio n. 36
0
import paint
import seismic
from data import *

import os, sys, inspect
currentdir = os.path.dirname(
    os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from find_trapeziums import test_trapeziums

if __name__ == '__main__':
    # _geo = Geo(Well.generate_left(), Well.generate_right(), 1000, 250, 750)
    # _match = Match.generate()

    _seismic = image.load("painter/SeismicScaled.jpg")
    _transformation = seismic.parse_transformation(_seismic, 15 * 1000,
                                                   int(2.5 * 1000), 10)  # 2.5

    #test shit begin
    test_data, test_match = test_trapeziums()
    _geo = Geo(Well(test_data[0], test_data[2]),
               Well(test_data[1], test_data[3]), _transformation.width,
               _transformation.left_well_intent,
               _transformation.right_well_intent)
    _match = Match(
        [match[1][0] for match in reversed(test_match)],
        [match[0][0] + 1 for match in reversed(test_match)],
        [match[1][1] for match in reversed(test_match)],
        [match[0][1] + 1 for match in reversed(test_match)],
    )