Example #1
0
def merge_anps(content_segmentation, style_text, adj_thres, noun_thres,
               result_dir):
    print(
        'Finding gram matrices for each resolution according to ANPs matching each noun in image'
    )
    # load color - label mapping
    color_label_dict = load_color_label_dict()
    label_color_dict = {
        label: color
        for color, labels in color_label_dict.items() for label in labels
    }
    colors = color_label_dict.keys()

    #Extract the boolean mask for every color
    content_masks = extract_segmentation_masks(content_segmentation, colors)
    content_colors = content_masks.keys()

    #Generate gram matrices for each label according to ANP
    style_gram_matrices, anp_results = generate_gram_matrices(
        content_colors, style_text, color_label_dict, label_color_dict,
        adj_thres, noun_thres, result_dir)

    #Discard nouns in image not obtained from ANP list even after semantic matching
    style_colors = style_gram_matrices.keys()
    content_masks = filter_content_masks(content_masks, style_colors)

    assert (frozenset(style_gram_matrices.keys()) == frozenset(
        content_masks.keys()))

    return content_masks, style_gram_matrices, anp_results
Example #2
0
def segmentation_pred(net, image):
    num_classes = 150

    image_shape = tf.shape(image)
    height = tf.maximum(CROP_SIZE[0], image_shape[0])
    width = tf.maximum(CROP_SIZE[1], image_shape[1])

    raw_output = net.layers['conv6']

    # Predictions.
    raw_output_up = tf.image.resize_bilinear(raw_output,
                                             size=[height, width],
                                             align_corners=True)
    raw_output_up = tf.image.crop_to_bounding_box(raw_output_up, 0, 0,
                                                  image_shape[0],
                                                  image_shape[1])
    raw_output_up = tf.argmax(raw_output_up, axis=3)

    color_table = list(load_color_label_dict().keys())
    color_mat = tf.constant(color_table, dtype=tf.float32)
    onehot_output = tf.one_hot(raw_output_up, depth=num_classes)
    onehot_output = tf.reshape(onehot_output, (-1, num_classes))
    pred = tf.matmul(onehot_output, color_mat)
    pred = tf.reshape(pred, (1, image_shape[0], image_shape[1], 3))

    pre = preprocess(image, height, width)

    return pred, pre
def merge_segments(content_segmentation, style_segmentation, semantic_threshold, similarity_metric):
    print("Semantic merge of segments started")

    # load color - label mapping
    color_label_dict = load_color_label_dict()
    label_color_dict = {label: color for color, labels in color_label_dict.items() for label in labels}
    colors = color_label_dict.keys()

    # Extract the boolean mask for every color
    content_masks = extract_segmentation_masks(content_segmentation, colors)
    style_masks = extract_segmentation_masks(style_segmentation, colors)

    content_colors = content_masks.keys()
    style_colors = style_masks.keys()

    # Merge all colors that only occur in the style segmentation with the most similar in the content segmentation
    style_masks = merge_difference(style_masks, style_colors, content_colors, color_label_dict, label_color_dict, similarity_metric)
    style_colors = style_masks.keys()

    # Merge all colors that only occur in the content segmentation with the most similar in the style segmentation
    content_masks = merge_difference(content_masks, content_colors, style_colors, color_label_dict, label_color_dict, similarity_metric)
    content_colors = content_masks.keys()

    assert(frozenset(style_colors) == frozenset(content_colors))

    # Get all colors that are contained in both segmentation images
    intersection = list(set(content_colors).intersection(style_colors))

    # Combine minimal set of colors to compare via semantic similarity
    intersection_colors_to_compare = it.combinations(intersection, 2)

    # Transform colors to labels
    intersection_labels_to_compare = color_tuples_to_label_list_tuples(intersection_colors_to_compare, color_label_dict)

    # Add similarity score to label tuples
    annotated_intersection_labels = annotate_label_similarity(intersection_labels_to_compare, similarity_metric)

    # For labels that are contained in both segmentation images merge only these with a similarity over the threshold
    above_threshold_intersection = [(similarity, label_tuple) for (similarity, label_tuple) in
                                    annotated_intersection_labels if similarity >= semantic_threshold]

    # Drop similarity score
    edge_list_labels = [label_tuple for similarity, label_tuple in above_threshold_intersection]

    # Turn labels back to colors
    edge_list_colors = [(label_color_dict[l1], label_color_dict[l2]) for l1, l2 in edge_list_labels]

    # Find all sub graphs
    color_sub_graphs = list(nx.connected_components(nx.from_edgelist(edge_list_colors)))

    # Create a dictionary with all necessary color replacements
    replacement_colors = {color: list(color_graph)[0] for color_graph in color_sub_graphs for color in color_graph}

    new_content_segmentation = replace_colors_in_dict(content_masks, replacement_colors)
    new_style_segmentation = replace_colors_in_dict(style_masks, replacement_colors)

    assert new_content_segmentation.keys() == new_style_segmentation.keys()

    return new_content_segmentation, new_style_segmentation
Example #4
0
from components.PSPNet.model import load_color_label_dict
import sys

color_label_dict = load_color_label_dict()
label_color_dict = {
    label: color
    for color, labels in color_label_dict.items() for label in labels
}

print(label_color_dict[sys.argv[1]])