Example #1
0
def apply_kernel(image, kernel):
    """ Performs convolution between the given image and kernel """
    if utils.is_color(image):
        result_b = convolve2d(image[:, :, 0],
                              kernel,
                              mode='same',
                              fillvalue=np.median(image[:, :, 0]))
        result_g = convolve2d(image[:, :, 1],
                              kernel,
                              mode='same',
                              fillvalue=np.median(image[:, :, 1]))
        result_r = convolve2d(image[:, :, 2],
                              kernel,
                              mode='same',
                              fillvalue=np.median(image[:, :, 2]))
        channels_list = []

        # Trim values lower than 0 or higher than 255 and convert to uint8 for openCV compatibility
        for channel in 'bgr':
            underflow_mask = locals()['result_' + channel] < 0
            result_temp = np.where(underflow_mask, 0,
                                   locals()['result_' + channel])
            result_temp = np.where(result_temp > 255, 255, result_temp)
            result_temp = result_temp.astype(np.uint8)
            channels_list.append(result_temp)

        filtered_image = utils.merge_channels(channels_list)
    else:
        # Trim values lower than 0 or higher than 255 and convert to uint8 for openCV compatibility
        filtered_image = convolve2d(image, kernel, mode='same')
        filtered_image = np.where(filtered_image < 0, 0, filtered_image)
        filtered_image = np.where(filtered_image > 255, 255, filtered_image)
        filtered_image = filtered_image.astype(np.uint8)

    return filtered_image
Example #2
0
def sepia(image, extra_inputs={}, parameters={}):
    """ Applies a **Sepia Filter** onto an image. \n

    Arguments:
        *image* (NumPy array) -- the image to be filtered

        *extra_inputs* (dictionary) -- a dictionary holding any extra inputs
        for the call (empty)

        *parameters* (dictionary) -- a dictionary holding parameter values (empty)

    Returns:
        list of NumPy array uint8 -- list containing the filtered image
    """
    # Apply the Sepia formulas
    if utils.is_color(image):
        result_red = image[:, :,
                           2] * 0.393 + image[:, :,
                                              1] * 0.769 + image[:, :,
                                                                 0] * 0.189
        result_green = image[:, :,
                             2] * 0.349 + image[:, :,
                                                1] * 0.686 + image[:, :,
                                                                   0] * 0.168
        result_blue = image[:, :,
                            2] * 0.272 + image[:, :,
                                               1] * 0.534 + image[:, :,
                                                                  0] * 0.131
    else:
        result_red = image * 0.393 + image * 0.769 + image * 0.189
        result_green = image * 0.349 + image * 0.686 + image * 0.168
        result_blue = image * 0.272 + image * 0.534 + image * 0.131

    # Trim values greater than 255
    result_red = np.where(result_red > 255, 255, result_red)
    result_green = np.where(result_green > 255, 255, result_green)
    result_blue = np.where(result_blue > 255, 255, result_blue)

    # Round the values and convert to int
    result_red = np.uint8(np.rint(result_red))
    result_green = np.uint8(np.rint(result_green))
    result_blue = np.uint8(np.rint(result_blue))

    sepia_image = utils.merge_channels([result_blue, result_green, result_red])

    return [sepia_image]
Example #3
0
def sketch(image, extra_inputs, parameters):
    """ Converts an image to a pencil sketch. \n

    Arguments:
        *image* (NumPy array) -- the image to be sketchified

        *extra_inputs* (dictionary) -- a dictionary holding any extra inputs
        for the call (empty)

        *parameters* (dictionary) -- a dictionary containing following keys:

            *pencil_Stroke_Size* (str, optional) -- the strength of the applied
            blur; possible values are *small*, *medium* and *large*; default
            value is *large*

    Returns:
        list of NumPy array uint8 -- list containing the filtered image
    """
    # Parameter extraction
    if 'pencil_Stroke_Size' in parameters:
        blur_strength = parameters['pencil_Stroke_Size']
    else:
        blur_strength = 'strong'

    if blur_strength == 'small':
        blur_strength = 'weak'
    elif blur_strength == 'large':
        blur_strength = 'strong'

    # The input image is converted (if necessary) to grayscale, inverted and
    # then blurred; since the Colour Dodge technique divides image by inverted
    # mask, the inverted blur would become the normal blur, so we directly pass
    # the blurred image for division
    if utils.is_color(image):
        grayed_image = grayscale(image, {}, {})[0]
    else:
        grayed_image = image.copy()

    blurred_image = blur(grayed_image, {}, {'strength': blur_strength})[0]

    # The blurred and grayscale images are blended using Colour Dodge
    sketched_image = np.where(blurred_image == 0, 0,
                              (grayed_image * 256) / blurred_image)
    sketched_image[sketched_image > 255] = 255

    return [sketched_image.astype(np.uint8)]
Example #4
0
def _k_means(image, colours):
    """ K-Means clustering applied to an image """
    if utils.is_color(image):
        # Remove the alpha channel, if present
        if image.shape[2] == 4:
            image = image[:, :, :3]

        pixels = np.reshape(image, (-1, image.shape[2]))
    else:
        pixels = np.reshape(image, (image.shape[0] * image.shape[1]))

    sample_count = int(0.01 * len(pixels))
    pixels_sample = shuffle(pixels, n_samples=sample_count)
    kmeans_estimator = KMeans(n_clusters=colours).fit(pixels_sample)
    labels = kmeans_estimator.predict(pixels)
    centers = kmeans_estimator.cluster_centers_
    pixels = centers[labels].astype(np.uint8)
    quantized_image = np.reshape(pixels, image.shape)

    return quantized_image
Example #5
0
def remove_channels(image, extra_inputs, parameters):
    """ Zeroes out channels from an image.

    Arguments:
        *image* (NumPy array) -- the image from which to remove channels

        *extra_inputs* (dictionary) -- a dictionary holding any extra inputs
        for the call (empty)

        *parameters* (dictionary) -- a dictionary containing following keys:

            *channel(s)* (str) -- the channel(s) to be removed from the image;
            possible values are *red*, *green*, *blue*, *red & green*, *red &
            blue*, *green & blue*
    Returns:
        list of NumPy array uint8 -- list containing the image having the
        requested channels removed
    """
    channels_information = parameters['channel(s)']
    image_copy = copy.deepcopy(image)

    if utils.is_color(image):
        if '&' in channels_information:
            # Zero out the two specified channels
            if 'red' in channels_information:
                image_copy[:, :, 2] = 0
            if 'green' in channels_information:
                image_copy[:, :, 1] = 0
            if 'blue' in channels_information:
                image_copy[:, :, 0] = 0
        else:
            # Zero out the specified channel
            if channels_information == 'red':
                image_copy[:, :, 2] = 0
            elif channels_information == 'green':
                image_copy[:, :, 1] = 0
            else:
                image_copy[:, :, 0] = 0

    return [image_copy]
Example #6
0
def split_channels(image, extra_inputs, parameters):
    """ Splits an image into its channels and returns them.

    Arguments:
        *image* (NumPy array) -- the image to be split

        *extra_inputs* (dictionary) -- a dictionary holding any extra inputs
        for the call (empty)

        *parameters* (dictionary) -- a dictionary containing following keys:

            *spectrum* (str, optional) -- the spectrum in which the channels
            will be represented; possible values are *grayscale* and *color*;
            default value is *color*

    Returns:
        list of NumPy array uint8 -- list containing the channels of the image
    """
    if utils.is_color(image):
        b = image[:, :, 0]
        g = image[:, :, 1]
        r = image[:, :, 2]

        if 'spectrum' in parameters:
            spectrum = parameters['spectrum']
        else:
            spectrum = 'color'

        if spectrum == 'color':
            zeros = np.zeros((image.shape[:2]), dtype=np.uint8)
            b = utils.merge_channels([b, zeros, zeros])
            g = utils.merge_channels([zeros, g, zeros])
            r = utils.merge_channels([zeros, zeros, r])

        return [b, g, r]

    return [image]
Example #7
0
def binarize(image, extra_inputs={}, parameters={}):
    """ Binarizes an image. \n

    Arguments:
        *image* (NumPy array) -- the image to be binarized

        *extra_inputs* (dictionary) -- a dictionary holding any extra inputs
        for the call (empty)

        *parameters* (dictionary) -- a dictionary containing following keys:

            *thresholding_Method* (str, optional) -- the type of thresholding;
            possible values are *simple* and *adaptive*; default value is
            *adaptive*. In the case of *simple* thresholding, the binarization
            threshold is chosen by the user and it is the same for all pixels;
            this can cause unsatisfactory results if the source image has
            different lighting conditions in different areas. In *adaptive*
            thresholding, the threshold is automatically computed and is
            different for each source pixel

            *threshold* (str, optional) -- the value which separates the two
            pixel values, in the case of simple thresholding; possible values
            are *median* and *127*; default value is *median*

            *maximum_Value* (int, optional) -- the value with which to replace
            pixel values greater than the threshold; must be between 1 and 255;
            default value is 255;

            *adaptive_Method* (str, optional) -- the type of adaptive threshold
            computation; possible values are *mean* and *gaussian*; default value
            is *gaussian*. When *mean*, the threshold is computed as the mean
            of the values in the neighbourhood; when *gaussian*, the threshold
            is computed as a gaussian-weighted sum of the neighbourhood values

            *neighbourhood_Size* (int, optional) -- the square size of the
            neighbourhood of values to consider when computing adaptive thresholds;
            possible values are *5*, *9* and *15*; default value is 15
    """
    # Parameters extraction
    if utils.is_color(image):
        image = grayscale(image, {}, {})[0]

    if 'thresholding_Method' in parameters:
        thresholding = parameters['thresholding_Method']
    else:
        thresholding = 'adaptive'

    if 'maximum_Value' in parameters:
        max_value = parameters['maximum_Value']
    else:
        max_value = 255

    if thresholding == 'simple':
        threshold = parameters['threshold']

        if threshold == 'median':
            threshold = np.median(image)
        else:
            threshold = 127

        # If the pixel value is greater than the threshold, set the pixel value
        # to 'max_value'; otherwise, set it to 0
        binary_image = np.where(image > threshold, max_value,
                                0).astype(np.uint8)
    else:
        if 'adaptive_Method' in parameters:
            method = parameters['adaptive_Method']
        else:
            method = 'gaussian'

        if 'neighbourhood_Size' in parameters:
            neighbourhood_size = parameters['neighbourhood_Size']
        else:
            neighbourhood_size = 15

        # Compute the thresholds and set the new values accordingly
        thresholds = helpers.get_thresholds(image, method,
                                            neighbourhood_size) - 2
        binary_image = np.where(image > thresholds, max_value,
                                0).astype(np.uint8)

    return [binary_image]
Example #8
0
def pixelate_ral(image, extra_inputs, parameters):
    """ A modified version of the pixelate operation, used for converting images
    into a version suitable for mosaicing. The colour representation used is a
    subset of RGB called RAL, a standard used for paint colours. In addition, a
    text file containing extra information is created.

    Arguments:
        *image* (NumPy array) -- the image to be pixelated

        *extra_inputs* (dictionary) -- a dictionary holding any extra inputs
        for the call (empty)

        *parameters* (dictionary) -- a dictionary holding parameter values (empty)

    Returns:
        list of NumPy array uint8 -- list containing the pixelated image
    """
    # Initialisations; constants are measured in centimeters
    tile_size = 3
    mosaic_width = 190
    mosaic_height = 250
    mosaic_lines_count = mosaic_height // tile_size
    mosaic_columns_count = mosaic_width // tile_size

    # Compute how many pixels will each block contain
    image_height, image_width = image.shape[:2]
    block_width = image_width // mosaic_columns_count
    block_height = image_height // mosaic_lines_count

    # Configure the text to be used for writing the template grid
    font_face = cv2.FONT_HERSHEY_DUPLEX
    font_scale = 0.6
    thickness = 1
    upscaling_factor = 6

    if utils.is_color(image):
        channels_count = image.shape[2]
        pixel_tile = np.zeros((block_height * upscaling_factor,
                               block_width * upscaling_factor, channels_count))
        grid_tile = np.zeros((block_height * upscaling_factor,
                              block_width * upscaling_factor, channels_count))
        pixelated_image = np.zeros(
            (image_height * upscaling_factor, image_width * upscaling_factor,
             channels_count),
            dtype=np.uint8)
        grid_image = np.zeros((image_height * upscaling_factor,
                               image_width * upscaling_factor, channels_count),
                              dtype=np.uint8)
        colours_frequencies = {}
    else:
        pixel_tile = np.zeros(
            (block_height * upscaling_factor, block_width * upscaling_factor))
        grid_tile = np.zeros(
            (block_height * upscaling_factor, block_width * upscaling_factor))
        pixelated_image = np.zeros(
            (image_height * upscaling_factor, image_width * upscaling_factor),
            dtype=np.uint8)
        grid_image = np.zeros(
            (image_height * upscaling_factor, image_width * upscaling_factor),
            dtype=np.uint8)

    # For each block:
    #    Compute the average r, g, b values of the block and put them in a vector
    #    Replace the current block with a tile coloured as the closest RAL colour
    #    in relation to the average vector
    for line in range(mosaic_lines_count):
        for column in range(mosaic_columns_count):
            block = image[line * block_height:(line + 1) * block_height,
                          column * block_width:(column + 1) * block_width]
            if utils.is_color(image):
                colour_used = []
                for i in range(channels_count):
                    colour_component = int(round(np.mean(block[:, :, i])))
                    colour_used.append(colour_component)

                # Convert RGB colour to closest RAL colour and record its use
                ral_code, ral_colour = get_closest_ral_colour(colour_used)

                if ral_code in colours_frequencies:
                    colours_frequencies[ral_code] += 1
                else:
                    colours_frequencies[ral_code] = 1

                # Set the pixel tile and the grid tile
                for i in range(channels_count):
                    pixel_tile[:, :, i] = ral_colour[i]

                grid_tile[:, :, :] = 255
                grid_tile[0, :, :] = 0
                grid_tile[-1, :, :] = 0
                grid_tile[:, 0, :] = 0
                grid_tile[:, -1, :] = 0

                # Write the colour code in the center of the tile
                text_size = cv2.getTextSize(ral_code, font_face, font_scale,
                                            thickness)[0]
                text_x = (block_width * upscaling_factor - text_size[0]) // 2
                text_y = (block_height * upscaling_factor + text_size[1]) // 2
                cv2.putText(grid_tile, ral_code, (text_x, text_y), font_face,
                            font_scale, (0, 0, 0), thickness)
            else:
                pixel_tile = int(round(np.mean(block)))

            pixelated_image[line * block_height * upscaling_factor:(line + 1) *
                            block_height * upscaling_factor, column *
                            block_width * upscaling_factor:(column + 1) *
                            block_width * upscaling_factor] = pixel_tile
            grid_image[line * block_height * upscaling_factor:(line + 1) *
                       block_height * upscaling_factor,
                       column * block_width * upscaling_factor:(column + 1) *
                       block_width * upscaling_factor] = grid_tile

    # Write colour usage information into a file
    tempdata_path = os.path.join(project_path, 'webui', 'static', 'tempdata')
    with open(os.path.join(tempdata_path, 'ral_info.txt'), 'w') as f:
        f.write('INFORMATII MOZAIC:\n')
        f.write('==============================\n')
        f.write('NUMAR CULORI: ' + str(len(colours_frequencies)) + '\n')
        f.write('NUMAR BUCATI PE ORIZONTALA: ' + str(mosaic_columns_count) +
                '\n')
        f.write('NUMAR BUCATI PE VERTICALA: ' + str(mosaic_lines_count) + '\n')
        f.write('NUMAR TOTAL BUCATI: ' +
                str(mosaic_lines_count * mosaic_columns_count) + '\n')
        f.write('FRECVENTE CULORI: [ID_CULOARE_RAL: NUMAR DE PIESE]\n')
        for code in colours_frequencies:
            pieces_suffix = 'PIESE'
            if colours_frequencies[code] == 1:
                pieces_suffix = 'PIESA'
            f.write('>> ' + code + ': ' + str(colours_frequencies[code]) +
                    ' ' + pieces_suffix + '\n')

    # Crop grid image into A4-sized images (size 29.7 cm x 21.0 cm) and save them to tempdata
    a4_sheets_lines_count = mosaic_lines_count // 9
    a4_sheets_columns_count = mosaic_columns_count // 7
    sheet_counter = 1

    for line in range(a4_sheets_lines_count):
        for column in range(a4_sheets_columns_count):
            a4_image = grid_image[line * block_height * upscaling_factor *
                                  9:(line + 1) * block_height *
                                  upscaling_factor * 9, column * block_width *
                                  upscaling_factor * 7:(column + 1) *
                                  block_width * upscaling_factor * 7]
            cv2.imwrite(
                os.path.join(tempdata_path,
                             'sheet_' + str(sheet_counter) + '.jpg'), a4_image)
            sheet_counter += 1

    # Separately save the remaining tiles, if any, on the horizontal and vertical
    end_of_horizontal_sheets = a4_sheets_columns_count * block_width * upscaling_factor * 7
    end_of_vertical_sheets = a4_sheets_lines_count * block_height * upscaling_factor * 9
    horizontal_rest_counter = 1
    vertical_rest_counter = 1

    if end_of_horizontal_sheets != grid_image.shape[1]:
        for line in range(a4_sheets_lines_count):
            rest = grid_image[line * block_height * upscaling_factor *
                              9:(line + 1) * block_height * upscaling_factor *
                              9, end_of_horizontal_sheets:]
            a4_image = np.zeros(
                (block_height * upscaling_factor * 9,
                 block_width * upscaling_factor * 7, channels_count))
            a4_image[:, :rest.shape[1]] = rest

            horizontal_rest_path = os.path.join(
                tempdata_path,
                'rest_horizontal_' + str(horizontal_rest_counter) + '.jpg')
            cv2.imwrite(horizontal_rest_path, a4_image)
            horizontal_rest_counter += 1

    if end_of_vertical_sheets != grid_image.shape[0]:
        for column in range(a4_sheets_columns_count):
            rest = grid_image[end_of_vertical_sheets:, column * block_width *
                              upscaling_factor * 7:(column + 1) * block_width *
                              upscaling_factor * 7]
            a4_image = np.zeros(
                (block_height * upscaling_factor * 9,
                 block_width * upscaling_factor * 7, channels_count))
            a4_image[:rest.shape[0], :] = rest

            cv2.imwrite(
                os.path.join(
                    tempdata_path,
                    'rest_vertical_' + str(vertical_rest_counter) + '.jpg'),
                a4_image)
            vertical_rest_counter += 1

    # TODO - If image has rests on both axes, then the intersection of the rests will be left out

    return [pixelated_image, grid_image]
Example #9
0
def pixelate(image, extra_inputs, parameters):
    """ Uses a form of downscaling in order to achieve an 8-bit-like filter
    appearance of an image.

    Arguments:
        *image* (NumPy array) -- the image to be pixelated

        *extra_inputs* (dictionary) -- a dictionary holding any extra inputs
        for the call (empty)

        *parameters* (dictionary) -- a dictionary containing following keys:

            *fidelity* (str, optional) -- how close the resulting image will
            look compared to the original (inverse proportional to the size of
            the composing pixels); possible values are *very low*, *low*,
            *standard*, *high*, *very high* and *ultra high*; default value is
            *standard*

    Returns:
        list of NumPy array uint8 -- list containing the pixelated image
    """
    # Parameters extraction
    if 'fidelity' in parameters:
        resolution = parameters['fidelity']
    else:
        resolution = 'standard'

    # Determine the resolution of the pixel-blocks used (the length of the square)
    if resolution == 'very low':
        resolution = 25
    elif resolution == 'low':
        resolution = 20
    elif resolution == 'standard':
        resolution = 15
    elif resolution == 'high':
        resolution = 10
    elif resolution == 'very high':
        resolution = 5
    elif resolution == 'ultra high':
        resolution = 3

    # Determine the number of pixel-blocks to be used for both dimensions
    image_height, image_width = image.shape[:2]
    lines_count = image_height // resolution
    columns_count = image_width // resolution

    if utils.is_color(image):
        channels_count = image.shape[2]
        pixel_tile = np.zeros((resolution, resolution, channels_count))
        pixelated_image = np.zeros(
            (lines_count * resolution, columns_count * resolution,
             channels_count),
            dtype=np.uint8)
    else:
        pixel_tile = np.zeros((resolution, resolution))
        pixelated_image = np.zeros(
            (lines_count * resolution, columns_count * resolution),
            dtype=np.uint8)

    # For each block:
    #    Compute the average r, g, b values of the block and put them in a vector
    #    Replace the current block with a tile coloured the same as the average vector
    for line in range(lines_count):
        for column in range(columns_count):
            block = image[line * resolution:(line + 1) * resolution,
                          column * resolution:(column + 1) * resolution]
            if utils.is_color(image):
                for i in range(channels_count):
                    colour_component = int(round(np.mean(block[:, :, i])))
                    pixel_tile[:, :, i] = colour_component
            else:
                pixel_tile = int(round(np.mean(block)))

            pixelated_image[line * resolution:(line + 1) * resolution, column *
                            resolution:(column + 1) * resolution] = pixel_tile

    return [pixelated_image]
Example #10
0
def ascii_art(image, extra_inputs, parameters):
    """ Applies an **ASCII Art Filter** onto an image. \n

    Arguments:
        *image* (NumPy array) -- the image to be filtered

        *extra_inputs* (dictionary) -- a dictionary holding any extra inputs
        for the call (empty)

        *parameters* (dictionary) -- a dictionary containing following keys:

            *charset* (str, optional) -- the character set to use when
            rendering ASCII art image; possible values are *standard*,
            *alternate* and *full*; default value is *alternate*

    Returns:
        list of NumPy array uint8 -- list containing the filtered image
    """
    # Small, 11 character ramps
    STANDARD_CHARSET = [' ', '.', ',', ':', '-', '=', '+', '*', '#', '%', '@']
    ALTERNATE_CHARSET = [' ', '.', ',', ':', '-', '=', '+', '*', '%', '@', '#']

    # Full, 70 character ramp
    FULL_CHARSET = [
        ' ', '.', '\'', '`', '^', '"', ',', ':', ';', 'I', 'l', '!', 'i', '>',
        '<', '~', '+', '_', '-', '?', ']', '[', '}', '{', '1', ')', '(', '|',
        '\\', '/', 't', 'f', 'j', 'r', 'x', 'n', 'u', 'v', 'c', 'z', 'X', 'Y',
        'U', 'J', 'C', 'L', 'Q', '0', 'O', 'Z', 'm', 'w', 'q', 'p', 'd', 'b',
        'k', 'h', 'a', 'o', '*', '#', 'M', 'W', '&', '8', '%', 'B', '$', '@'
    ]

    if 'charset' in parameters:
        if parameters['charset'] == 'standard':
            CHARS = STANDARD_CHARSET
        elif parameters['charset'] == 'alternate':
            CHARS = ALTERNATE_CHARSET
        else:
            CHARS = FULL_CHARSET
    else:
        CHARS = ALTERNATE_CHARSET

    buckets = 256 / len(CHARS)
    CHARS = CHARS[::-1]  # Reverse the list

    def number_to_char(number):
        return CHARS[int(number // buckets)]

    # Vectorizing this function allows it to be applied on arrays
    number_to_char = np.vectorize(number_to_char)

    # Resize and convert the image to grayscale
    h, w = image.shape[:2]
    original_size = (w, h)
    image = utils.resize_dimension(image, new_width=80)
    if utils.is_color(image):
        image = grayscale(image)[0]

    # Build results as list of lines of text and entire text
    lines = [''.join(number_to_char(row)) for row in list(image)]
    text_spaceless = ''.join(lines)

    # Determine the widest letter, to account for the rectangular aspect ratio of the characters
    font_face = cv2.FONT_HERSHEY_PLAIN
    font_scale = 1
    thickness = 1
    size, base_line = cv2.getTextSize('.', font_face, font_scale, thickness)
    maximum_letter_width = size[0]

    for i in range(len(text_spaceless)):
        letter_width = cv2.getTextSize(text_spaceless[i], font_face,
                                       font_scale, thickness)[0][0]
        if letter_width > maximum_letter_width:
            maximum_letter_width = letter_width

    # Create resulting image as white and write text on it
    number_of_lines = len(lines)
    number_of_cols = len(lines[0]) * maximum_letter_width
    dy = 14  # Vertical offset to account for the characters height
    ascii_image = np.zeros((number_of_lines * dy, number_of_cols), np.uint8)
    ascii_image[:, :] = 255

    for i, line in enumerate(lines):
        y = i * dy
        for j, char in enumerate(line):
            cv2.putText(ascii_image,
                        char, (j * maximum_letter_width, y),
                        font_face,
                        1, (0, 0, 0),
                        1,
                        lineType=cv2.FILLED)

    # Resize resulting image to original size of input image
    ascii_image = cv2.resize(ascii_image,
                             original_size,
                             interpolation=cv2.INTER_AREA)

    return [ascii_image]
Example #11
0
def _median_cut(image, colours):
    """ An improved version of the Median Cut quantization algorithm """
    if utils.is_color(image):
        # Remove the alpha channel, if present
        if image.shape[2] == 4:
            image = image[:, :, :3]

        # Determine the channel having the greatest range
        range_b = np.amax(image[:, :, 0]) - np.amin(image[:, :, 0])
        range_g = np.amax(image[:, :, 1]) - np.amin(image[:, :, 1])
        range_r = np.amax(image[:, :, 2]) - np.amin(image[:, :, 2])
        greatest_range_index = np.argmax([range_b, range_g, range_r])

        # Sort the image pixels according to that channel's values
        pixels = np.reshape(image, (-1, 3))
        pixels = unstructured_to_structured(
            pixels, np.dtype([('b', int), ('g', int), ('r', int)]))
        sorting_indices = np.argsort(pixels, order='bgr'[greatest_range_index])
        pixels_sorted = pixels[sorting_indices]

        # Split the pixels list into <colours> buckets and compute the average of each bucket
        buckets = np.array_split(pixels_sorted, colours)
        bucket_averages = [(int(np.average(bucket['b'])),
                            int(np.average(bucket['g'])),
                            int(np.average(bucket['r'])))
                           for bucket in buckets]

        # Assign the averages to the pixels contained in the buckets
        left_index = 0
        right_index = len(buckets[0])
        for i in range(len(buckets)):
            pixels_sorted[left_index:right_index] = bucket_averages[i]
            left_index = right_index
            if i + 1 < len(buckets):
                right_index += len(buckets[i + 1])

        # Return the quantized image
        pixels_sorted = structured_to_unstructured(pixels_sorted)
        pixels = pixels_sorted[np.argsort(sorting_indices)]
        quantized_image = np.reshape(pixels, image.shape)
    else:
        # Sort the image pixels
        pixels = np.ravel(image)
        sorting_indices = np.argsort(pixels)
        pixels_sorted = pixels[sorting_indices]

        # Split the pixels list into <colours> buckets and compute the average of each bucket
        buckets = np.array_split(pixels_sorted, colours)
        bucket_averages = [int(np.average(bucket)) for bucket in buckets]

        # Assign the averages to the pixels contained in the buckets
        left_index = 0
        right_index = len(buckets[0])
        for i in range(len(buckets)):
            pixels_sorted[left_index:right_index] = bucket_averages[i]
            left_index = right_index
            if i + 1 < len(buckets):
                right_index += len(buckets[i + 1])

        # Return the quantized image
        pixels = pixels_sorted[np.argsort(sorting_indices)]
        quantized_image = np.reshape(pixels, image.shape)

    return quantized_image
Example #12
0
def high_pass(image, extra_inputs, parameters):
    """Applies a **High Pass Filter** on an image. \n
    The image is converted into the frequency domain (using the *Fast Fourier
    Transform*) and only the frequencies higher than the cutoff frequency are
    let through. *High-frequency emphasis* can be achieved by providing an *offset*
    greater than 0 (0 is default) and a *multiplier* greater than 1 (1 is default).
    The filter is then transformed by the equation:
        emphasisFilter = offset + multiplier * highpassFilter

    Arguments:
        *image* (NumPy array) -- the image on which the filter is to be applied

        *extra_inputs* (dictionary) -- a dictionary holding any extra inputs
        for the call (empty)

        *parameters* (dictionary) -- a dictionary containing following keys:

            *cutoff* (int) -- the minimum frequency to be let through by the filter

            *offset* (int, optional) -- number used for avoiding the reduction
            of the DC term to 0; default value is 0, which does not prevent
            reduction of the DC term

            *multiplier* (int, optional) -- number used for emphasizing
            frequencies; default value is 1, which does not have any effect

            *type* (str, optional) -- the type of high-pass filter to be applied;
            possible values are: *ideal*, *butterworth*, *gaussian*; default
            value is *gaussian*

            *order* (int, optional) -- the order used for Butterworth filtering;
            default value is 2

            *filename* (str, optional) -- the name of the image file to be
            filtered, used for checking whether the corresponding FFT(s) are
            serialized on the server or not

    Returns:
        list of NumPy array uint8 -- list containing the filtered image
    """
    # Parameter validation and assignment
    if 'offset' in parameters:
        offset = parameters['offset']
    else:
        offset = 0

    if 'multiplier' in parameters:
        multiplier = parameters['multiplier']
    else:
        multiplier = 1

    if 'type' in parameters:
        filter_type = parameters['type']
    else:
        filter_type = 'gaussian'

    if 'order' in parameters:
        order = parameters['order']
    else:
        order = 2

    if 'filename' in parameters:
        filename = parameters['filename']
    else:
        filename = ''

    image_h, image_w = image.shape[:2]  # Take image dimensions

    # Compute the cutoff frequency as a percentage from the smaller dimension of the image
    cutoff_dimension = image_h if image_h < image_w else image_w
    cutoff = parameters['cutoff'] / 100 * cutoff_dimension

    padded_h, padded_w = 2 * image_h, 2 * image_w  # Obtain the padding parameters

    # Check whether the FFTs of the image have been serialized or not
    deserializing, file_not_found = False, False
    pickles_path = os.path.join(project_path, 'webui', 'static', 'tempdata')

    if filename != '':
        filename, extension = filename.split('.')
        if utils.is_color(image):
            files_to_check = [filename + '_' + c + '_fft.pickle' for c in 'bgr']
        else:
            files_to_check = [filename + '_fft.pickle']
        for file in files_to_check:
            if not os.path.isfile(os.path.join(pickles_path, file)):
                file_not_found = True
        if not file_not_found:
            deserializing = True

    # Deserialize the FFTs if possible
    if deserializing:
        padded_image_FFTs = []
        for file in files_to_check:
            f = open(os.path.join(pickles_path, file), 'rb')
            padded_image_FFTs.append(pickle.load(f))
            f.close()
            print('Deserialized', file)
    else:
        # Create padded image
        if utils.is_color(image):
            padded_image = np.zeros((padded_h, padded_w, len(utils.get_channels(image))), np.uint8)
            padded_image[0:image_h, 0:image_w, :] = image
        else:
            padded_image = np.zeros((padded_h, padded_w), np.uint8)
            padded_image[0:image_h, 0:image_w] = image

        # Take the FFTs of the padded image channels
        padded_image_FFTs = utils.get_FFTs(padded_image)

    # Compute the filter image
    if filter_type == 'ideal':
        filter_image = ideal_filter('high', (padded_h, padded_w), cutoff)
    elif filter_type == 'butterworth':
        filter_image = butterworth_filter('high', (padded_h, padded_w), cutoff, order)
    else:
        filter_image = gaussian_filter('high', (padded_h, padded_w), cutoff)

    # Perform High-frequency emphasis
    if multiplier == 1:
        filter_image = offset + filter_image
    else:
        filter_image = offset + np.multiply(multiplier, filter_image)

    # Apply the filter to the FFTs
    filtered_FFTs = [np.multiply(channelFFT, filter_image) for channelFFT in padded_image_FFTs]

    # Take the inverse FFT of the filtered padded image FFT components
    result_components = [np.real(np.fft.ifft2(np.fft.ifftshift(filteredComponent)))
                         for filteredComponent in filtered_FFTs]

    # Obtain the result image
    if len(result_components) == 1:
        result_image = result_components[0]
    else:
        result_image = utils.merge_channels(result_components)

    # Trim values lower than 0 or higher than 255
    result_image = np.where(result_image > 255, 255, result_image)
    result_image = np.where(result_image < 0, 0, result_image)

    # Round the values and unpad the image
    result_image = np.uint8(np.rint(result_image))
    if len(result_components) == 1:
        result_image = result_image[0:image_h, 0:image_w]
    else:
        result_image = result_image[0:image_h, 0:image_w, :]

    return [result_image]