Exemple #1
0
def render_data(render_image=True, render_annotation=True):
    path = pathjoin(tempfile.gettempdir(), "render_" + str(time.time()))
    render_result = {}
    if render_image:
        png_path = path + ".png"
        with set_image_render(), withattr(render, "filepath", png_path):
            print("Render image using:", render.engine)
            bpy.ops.render.render(write_still=True)
        render_result["image"] = imread(png_path)
        os.remove(png_path)

    if render_annotation:
        exr_path = path + ".exr"
        with set_inst_material(), set_annotation_render(), withattr(
            render, "filepath", exr_path
        ):
            print("Render annotation using:", render.engine)
            bpy.ops.render.render(write_still=True)
        render_result["exr"] = parser_exr(exr_path)
        os.remove(exr_path)
    result = ImageWithAnnotation(**render_result)
    if "render_6dof_pose" and "Camera" in bpy.data.objects:
        objs = [obj for obj in bpy.data.objects if "inst_id" in obj]
        ycb_meta = get_6dof_pose(bpy.data.objects["Camera"], objs, inst=result["inst"])
        result["ycb_meta"] = ycb_meta
    return result
Exemple #2
0
def render_image():
    render = bpy.data.scenes[0].render
    png_path = tempfile.NamedTemporaryFile().name + ".png"
    with set_image_render(), withattr(render, "filepath", png_path):
        bpy.ops.render.render(write_still=True)
    image = imread(png_path)[..., :3]
    os.remove(png_path)
    return image
Exemple #3
0
    file = OpenEXR.InputFile(exr_path)
    header = file.header()

    h, w = header["displayWindow"].max.y + 1, header["displayWindow"].max.x + 1
    exr = ExrDict()
    for key in header["channels"]:
        assert header["channels"][key].type.__str__() == "FLOAT"
        exr[key] = np.fromstring(file.channel(key),
                                 dtype=np.float32).reshape(h, w)
    file.close()
    return exr


def test_parser_exr(exr_path="../tmp_exrs/cycles.exr"):
    return parser_exr(exr_path)


if __name__ == "__main__":
    from boxx import show, imread

    exr_path = "tmp_exr.exr"
    exr_path = "../tmp_exrs/untitled.exr"
    exr_path = "/tmp/blender/tmp.exr"
    exr = parser_exr(exr_path)
    inst = exr.get_inst()
    png = imread(exr_path.replace(".exr", ".png"))[..., :3]

    ann = ImageWithAnnotation(png, exr)
    vis = ann.vis()
    show - vis
 def f(imgn):
     imread(imgn)
 def f(imgn):
     return uint8(resize(imread(imgn), (
         218,
         178,
     )))
celebA_path = os.path.expanduser('~/dataset/celeba')
dataset = pathjoin(celebA_path, 'eyeglasses_stgan_dataset')

st_gan_dataset = pathjoin(dataset, 'tf_st_gan_dataset')

psa = sorted(glob(pathjoin(dataset, 'trainA/*')))[0::2]
psb = sorted(glob(pathjoin(dataset, 'trainB/*')))[1::2]

lena = len(psa)
lenb = len(psb)

attribute = np.zeros((lena + lenb, 40), np.bool)
attribute[-lenb:, 15] = True

img = imread(psa[0])
shape = img.shape

makedirs(p / st_gan_dataset)
imgns = psa + psb

if shape[:2] != (
        218,
        178,
):

    def f(imgn):
        return uint8(resize(imread(imgn), (
            218,
            178,
        )))
Exemple #7
0
    #    cfg.MODEL.WEIGHT = "/home/dl/junk/output/single/model_final.pth"
    #    cfg.freeze()

    if args.pth:
        cfg.MODEL.WEIGHT = args.pth

    # prepare object that handles inference plus adds predictions on top of image
    coco_demo = COCODemo(
        cfg,
        confidence_threshold=args.confidence_threshold,
        show_mask_heatmaps=False,
        masks_per_dim=2,
        min_image_size=args.min_image_size,
    )

    imgps = sorted(glob(pathjoin(args.dir, "*.jpg")))
    loopLog = LogLoopTime(imgps)
    for imgp in imgps[:]:
        img = imread(imgp)
        #        composite = coco_demo.run_on_opencv_image(img[...,[2,1,0]])[...,[2,1,0]]
        #        show-composite
        bboxList = coco_demo.getBboxList(img)
        visBboxList(img,
                    bboxList,
                    imgp,
                    pltshow=not cloud,
                    thresh=args.confidence_threshold,
                    classNames=classNames)
        execmd("google-chrome {}".format(imgp.replace('.jpg', '.pdf')))
        loopLog(imgp)
Exemple #8
0
        m1 = np.append(m1, [[0, 0, 1]], 0)
    if m2.shape != (3, 3):
        m2 = np.append(m2, [[0, 0, 1]], 0)
    return m1.dot(m2)[:2]


keys = set(notWearKeys).intersection(set(trainKeys))

keys = bpp.replicaSplitKeys(keys)

for ind, imgn in enumerate(keys):

    trainAP = pathjoin(trainADir, imgn)
    trainBP = pathjoin(trainBDir, imgn)
    try:
        imread(trainAP)
        imread(trainBP)
        continue
    except:
        pass

    d = landmark.loc[imgn]
    imgp = pathjoin(imgdir, d.image_id)
    img = imread(imgp)

    toPoints = (d.lefteye_x, d.lefteye_y), (d.righteye_x,
                                            d.righteye_y), (d.nose_x, d.nose_y)
    toPoints = (d.lefteye_x,
                d.lefteye_y), (d.righteye_x,
                               d.righteye_y), (d.lefteye_x + d.righteye_y -
                                               d.lefteye_y, d.lefteye_y +