def create_edge_files(): for c in classes: image_folder_dir = DATA_DIR + f'\\Regions\\{c}' list_image_ids = os.listdir(DATA_DIR + f'\\Regions\\{c}') for image_id in list_image_ids: print(image_id) list_images = os.listdir(image_folder_dir + f'\\{image_id}\\new\\') list_images = [l.split('.')[0] for l in list_images] graph = vg.get_scene_graph(int(image_id), DATA_DIR + '\\', DATA_DIR + '\\by-id\\', DATA_DIR + '\\synsets.json') # do preprocess .... graph = preprocess_object_names(graph) edge_set = set() for r in graph.relationships: sub = r.subject.__str__() obj = r.object.__str__() sub_ind = list_images.index(sub) obj_ind = list_images.index(obj) edge_set.add((sub_ind, obj_ind)) with open(TARGET_DIR + f'{image_id}.txt', mode='w') as edge_file: edge_writer = csv.writer(edge_file, delimiter=',') edge_writer.writerow(('sub', 'obj')) for items in edge_set: edge_writer.writerow(items)
def get_objects_of_graph(): for c in Categories: list_images = os.listdir(TARGET_DIR + f"\\{c}") for image in list_images: image_id = int(image.split('.')[0]) print(f'Saving for {image_id}...') graph = vg.get_scene_graph(image_id, DATA_DIR, image_data_dir, DATA_DIR + '\\synsets.json') list_objects = preprocess_object_names(graph) crop_regions(image_id, list_objects, c)
def get_next_local(ids=None): images = vgl.get_all_image_data(DATA_DIR) all_regions = vgl.get_all_region_descriptions(DATA_DIR) # slow if ids is None: ids = [i for i in range(1, len(images))] for id in ids: image = images[id - 1] regions = all_regions[id - 1] graph = vgl.get_scene_graph(id, images=DATA_DIR, image_data_dir=DATA_DIR + '/by-id/', synset_file=DATA_DIR + '/synsets.json') yield image, regions, graph
def get_vg_questions(image_id): scene_graph = vg.get_scene_graph(image_id, images='data/', image_data_dir='data/by-id/') sentences = scene_graph.relationships knowledge = [] adjectives = {} attributes = scene_graph.attributes for attr in attributes: subject = str(attr.subject) adjectives[subject] = list(attr.attribute) for sentence in sentences: subject = str(sentence.subject) predicate = str(sentence.predicate).lower() object = str(sentence.object) knowledge.append((subject, predicate.lower(), object)) return knowledge, adjectives
def draw_sample(image_id, label, relevance_sorted_indices, sample_id, relevance_values, experiment='countryVSurban'): graph = vg.get_scene_graph(image_id, DATA_DIR, DATA_DIR + 'by-id/', DATA_DIR + '/synsets.json') objects = preprocess_object_names(graph) categ = None if experiment == 'countryVSurban': categ = 'country' if label == 0 else 'urban' else: categ = 'indoor' if label == 0 else 'outdoor' img = PIL_Image.open(DATA_DIR + f'images/{image_id}.jpg') plt.imshow(img) ax = plt.gca() list_regions = os.listdir(REGIONS_DIR + f'{categ}/{image_id}/new') relevance_sorted_indices = relevance_sorted_indices.cpu().detach().numpy() j = 1 for i in range(relevance_sorted_indices): for o in objects: if list_regions[relevance_sorted_indices[i]].split('.')[0] in o.__str__(): if relevance_values[i] < 0: ax.add_patch(Rectangle((o.x, o.y), o.width, o.height, fill=False, edgecolor='red', linewidth=3)) else: ax.add_patch(Rectangle((o.x, o.y), o.width, o.height, fill=False, edgecolor='green', linewidth=3)) ax.text(o.x, o.y, str(j) + o.__str__(), style='italic', bbox={'facecolor': 'white', 'alpha': 0.7, 'pad': 10}) j += 1 break i += 1 fig = plt.gcf() plt.tick_params(labelbottom='off', labelleft='off') # plt.show() plt.savefig(f'{sample_id}-{categ}.png') plt.close()
def draw_sample_v2(sample_graph, image_id, label, relevance_sorted_indices, sample_id, relevance_values, experiment='countryVSurban'): relevance_sorted_indices = relevance_sorted_indices.cpu().detach().numpy() graph = vg.get_scene_graph(image_id, DATA_DIR, DATA_DIR + '\\by-id\\', DATA_DIR + '\\synsets.json') objects, graph = preprocess_object_names(graph) G = nx.Graph() if experiment == 'countryVSurban': categ = 'country' if label == 0 else 'urban' else: categ = 'indoor' if label == 0 else 'outdoor' objects_dir = REGIONS_DIR + f'\\{categ}\\{image_id}\\new' list_regions = os.listdir(objects_dir) for i in range(len(sample_graph.nodes)): img = mpimg.imread(f'{objects_dir}\\{list_regions[i]}') G.add_node(i, image=img) nodes_dic = {} i = 0 for f in list_regions: temp = f.split('.')[0] nodes_dic[temp] = i i += 1 edge_labels = {} for r in graph.relationships: edge = (nodes_dic[r.subject.__str__()], nodes_dic[r.object.__str__()]) G.add_edge(edge[0], edge[1]) edge_labels[edge] = r.predicate pos = nx.planar_layout(G, 4) piesize = np.ones(len(pos)) * 0.002 index_image = 0.001 index_image_increment = 0.001 index = 0.005 index_increment = 0.005 for i in relevance_sorted_indices: pos[i] += index piesize[i] += index_image index += index_increment index_image += index_image_increment fig = plt.figure(figsize=(100, 100)) ax = plt.subplot(111) ax.set_aspect('equal') nx.draw_networkx_edges(G, pos, ax=ax) nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels) plt.xlim(-5, 5) plt.ylim(-5, 5) trans = ax.transData.transform trans2 = fig.transFigure.inverted().transform i = 0 for n in G: p2 = piesize[i] / 2 xx, yy = trans(pos[n]) # figure coordinates xa, ya = trans2((xx, yy)) # axes coordinates a = plt.axes([xa - p2, ya - p2, piesize[i], piesize[i]]) a.set_aspect('equal') a.imshow(G.node[n]['image']) a.axis('off') i += 1 ax.axis('off') plt.savefig(f'graph-{sample_id}-{categ}.png') plt.close() img = PIL_Image.open(DATA_DIR + f'\\images\\{image_id}.jpg') plt.imshow(img) ax = plt.gca() j = 1 for i in range(len(relevance_sorted_indices)): for o in objects: if list_regions[relevance_sorted_indices[i]].split('.')[0] in o.__str__(): if relevance_values[i] < 0: ax.add_patch(Rectangle((o.x, o.y), o.width, o.height, fill=False, edgecolor='red', linewidth=3)) else: ax.add_patch(Rectangle((o.x, o.y), o.width, o.height, fill=False, edgecolor='green', linewidth=3)) ax.text(o.x, o.y, str(j) + o.__str__(), style='italic', bbox={'facecolor': 'white', 'alpha': 0.7, 'pad': 10}) j += 1 break i += 1 fig = plt.gcf() plt.tick_params(labelbottom='off', labelleft='off') # plt.show() plt.savefig(f'{sample_id}-{categ}.png') plt.close()
def draw_sample_v3(sample_id, image_id, label, relevance_sorted_indices, relevance_values, experiment='countryVSurban'): relevance_sorted_indices = relevance_sorted_indices.cpu().detach().numpy() graph = vg.get_scene_graph(image_id, DATA_DIR, DATA_DIR + '\\by-id\\', DATA_DIR + '\\synsets.json') objects, graph = preprocess_object_names(graph) if experiment == 'countryVSurban': categ = 'country' if label == 0 else 'urban' else: categ = 'indoor' if label == 0 else 'outdoor' objects_dir = REGIONS_DIR + f'\\{categ}\\{image_id}\\new' list_regions = os.listdir(objects_dir) radius_list = [] r = 40 top5_objects = [] for top_relevance in relevance_sorted_indices[:len(relevance_sorted_indices) - 6:-1]: radius_list += [r] r -= 8 for o in objects: if o.__str__() == list_regions[top_relevance].split('.')[0]: top5_objects += [o] break top5_objects_dict = {} for o in top5_objects: top5_objects_dict[o.__str__()] = o edges = [] for r in graph.relationships: for o1 in top5_objects: if r.subject.__str__() == o1.__str__(): for o2 in top5_objects: if r.object.__str__() == o2.__str__(): edges += [r] image_file = cbook.get_sample_data(DATA_DIR + f'\\images\\{image_id}.jpg') img = plt.imread(image_file) # Make some example data # x = np.random.rand(5)*img.shape[1] # y = np.random.rand(5)*img.shape[0] # Create a figure. Equal aspect so circles look circular fig, ax = plt.subplots(1) ax.set_aspect('equal') # Show the image ax.imshow(img) # Now, loop through coord arrays, and create a circle at each x,y pair r = 0 i = -1 for o in top5_objects: color = 'green' if relevance_values[i] >= 0 else 'red' circ = Circle((o.x + o.width / 2, o.y + o.height / 2), radius_list[r], color=color) i -= 1 r += 1 ax.add_patch(circ) ax.text(o.x + o.width / 2, o.y + o.height / 2, o.__str__(), bbox=dict(facecolor='white', alpha=0.7)) for e in edges: subject = top5_objects_dict[e.subject.__str__()] object = top5_objects_dict[e.object.__str__()] x = [subject.x + subject.width / 2, object.x + object.width / 2] y = [subject.y + subject.height / 2, object.y + object.height / 2] # print(f'{e.subject}:{(e.subject.x + e.subject.width / 2, e.subject.y + e.subject.height / 2)}') # print(f'{e.object}:{(e.object.x + e.object.width / 2, e.object.y + e.object.height / 2)}') plt.plot(x, y, 'b', linewidth=3) plt.text(abs(x[0] + x[1]) / 2, abs(y[0] + y[1]) / 2, e.predicate.__str__(), bbox=dict(facecolor='white', alpha=0.7)) # Show the image plt.savefig(f'Result\\{sample_id}-{categ}-{image_id}.png')