コード例 #1
0
def draw_datasets(imgs_pred, output_dir, deprocess_func, image_ids, boxes_pred, boxes):
    samples = {'gt_box_pred_mask': imgs_pred}
    for k, v in samples.items():
        samples[k] = np.transpose(deprocess_batch(v, deprocess_func=deprocess_func).cpu().numpy(),
                                  [0, 2, 3, 1])
    for k, v in samples.items():
        # Set the output path
        if k == 'gt_img':
            path = os.path.join(output_dir, "gt")
        elif k == 'gt_box_pred_mask':
            path = os.path.join(output_dir, "generation")
        else:
            raise Exception

        os.makedirs(path, exist_ok=True)
        for i in range(v.shape[0]):
            RGB_img_i = cv2.cvtColor(v[i], cv2.COLOR_BGR2RGB)
            cv2.imwrite("{}/{}.jpg".format(path, image_ids[i]), RGB_img_i)
コード例 #2
0
def draw_results(model, output_dir, scene_graphs):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    all_objs, all_triplets, all_obj_to_img = encode_scene_graphs_list(model, scene_graphs)
    # Run the model forward
    with torch.no_grad():
        imgs_pred, boxes_pred, _, _, obj_to_img = model(all_objs, all_triplets, all_obj_to_img)
    imgs_pred = deprocess_batch(imgs_pred)
    boxes_pred = boxes_pred.cpu()
    obj_to_img = obj_to_img.cpu()
    # Save the generated images
    draw_predictions(scene_graphs, imgs_pred, boxes_pred, obj_to_img, output_dir)

    for i, sg in enumerate(scene_graphs):
        sg_img = vis.draw_scene_graph(sg['objects'], sg['relationships'])
        sg_img_path = os.path.join(output_dir, 'img_%06d_sg.png' % i)
        imwrite(sg_img_path, sg_img)
    plt.close("all")
コード例 #3
0
                                    debug=True,
                                    max_objects=5,
                                    dense_scenes=False)

    # ss = 0
    # ii = []
    # for scene in dset.clevr_data['scenes']:
    #     ss += len(scene['objects'])
    #     ii.append(len(scene['objects']))
    # ss /= len(dset.clevr_data['scenes'])
    #
    # import matplotlib.pyplot as plt
    #
    # ii = np.array(ii)
    # _ = plt.hist(ii, bins='auto')  # arguments are passed to np.histogram
    # plt.savefig("hist.png")

    it = dset[2]
    while True:
        # idx = 5149
        idx = np.random.randint(0, len(dset))
        item = dset[idx]
        image, objs, boxes, triplets = item
        image = deprocess_batch(torch.unsqueeze(image, 0))[0]
        cv2.imwrite('/tmp/img.png', np.transpose(image.cpu().numpy(), [1, 2, 0]))
        draw_item(item, image_size=dset.image_size)  # dset.clevr_data['scenes'][index]
        plt.figure()
        plt.imshow(draw_scene_graph(dset.clevr_data['scenes'][idx]['objects'],
                                    triplets=dset.clevr_data['scenes'][idx]['relationships'], vocab=dset.vocab))
        plt.savefig('/tmp/sg.png')
コード例 #4
0
ファイル: vg.py プロジェクト: wzj207/CanonicalSg2Im
    return out


if __name__ == "__main__":
    dset = VGSceneGraphDataset(
        h5_path='val.h5',
        base_path='/specific/netapp5_2/gamir/DER-Roei/datasets/VisualGenome',
        image_size=(256, 256),
        max_objects=10)
    # idx = np.where(np.array(dset.data['image_ids']) ==2411298)[0][0]
    item = dset[15]
    idx = 3408
    # idx = np.random.randint(0, len(dset))
    item = dset[idx]
    image, objs, boxes, triplets = item
    image = deprocess_batch(torch.unsqueeze(image, 0),
                            deprocess_func=decode_image)[0]
    cv2.imwrite('img.png', np.transpose(image.cpu().numpy(), [1, 2, 0]))
    objs_text = np.array(
        dset.vocab['object_idx_to_name'])[objs['object_names']]
    # objs_text = obj_names_list
    # objs_text.append("object")
    mask = dset.data['object_names'][idx] != -1
    image_objects = dset.data['object_names'][idx][mask]

    draw_item(item, image_size=dset.image_size, text=objs_text)
    plt.figure()
    # plt.imshow(draw_scene_graph(dset.clevr_data['scenes'][idx]['objects'],
    #                             triplets=dset.clevr_data['scenes'][idx]['relationships'], vocab=dset.vocab))
    plt.savefig('sg.png')