Example #1
0
 def _render(self, renderer, no_of_samples):
     height = self._kw['H']
     width = self._kw['W']
     clip_near = self._kw['clip_near']
     clip_far = self._kw['clip_far']
     K = np.array(eval(self._kw['K']))
     i = 0
     while i < no_of_samples:
         R = transform.random_rotation_matrix()[:3, :3]
         t = np.array([0, 0, random.randint(50, 400)], dtype=np.float32)
         color, depth_x = renderer.render(0, int(width), int(height), K, R,
                                          t, clip_near, clip_far)
         ys, xs = np.nonzero(depth_x > 0)
         ys = np.array(ys, dtype=np.int16)
         xs = np.array(xs, dtype=np.int16)
         tx, ty = random.randint(-height,
                                 height), random.randint(-width, width)
         M = np.array(((1, 0, tx), (0, 1, ty)), dtype=np.float)
         color = cv.warpAffine(color, M, (color.shape[1], color.shape[0]))
         depth_x = cv.warpAffine(depth_x, M,
                                 (depth_x.shape[1], depth_x.shape[0]))
         try:
             x, y, w, h = view_sampler.calc_2d_bbox(xs + tx, ys + ty,
                                                    (width, height))
             self.obj_bb[i] = x, y, w, h
             if x <= 0 or y <= 0 or x >= width or y >= height or x + tx >= width or y + ty >= height or w < 0 or h < 0:
                 #print('Object translated out of bounds. Regenerating ...')
                 continue
             self.train_x[i] = color
             self.mask_x[i] = depth_x == 0
             i += 1
         except ValueError as e:
             #print('Object in Rendering not visible. Regenerating ...')
             continue
Example #2
0
def render(height, width, s):
    height = int(height*s)
    width = int(width*s)
    clip_near = 10
    clip_far = 5000
    K = np.array([[572.4114, 0.0, 325.2611], [0.0, 573.57043, 242.04899], [0.0, 0.0, 1.0]])
    renderer = meshrenderer_phong.Renderer('/home/sid/thesis/ply/models_cad/obj_05_red.ply',
                                           samples=1, vertex_tmp_store_folder='.', clamp=False, vertex_scale=1.0)
    # R = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype=np.float32)
    R = transform.random_rotation_matrix()[:3, :3]
    t = np.array([0, 0, random.random()], dtype=np.float32) * 1000
    start = time.time()
    num_runs = 3000
    # for i in tqdm(range(num_runs)):
    color, depth_x = renderer.render(
        0, width, height, K
        , R, t, clip_near, clip_far)
    # cv2.imshow("win", color)
    # cv2.waitKey(1)
    mask_x = depth_x == 0
    ys, xs = np.nonzero(depth_x > 0)

    try:
        obj_bb = view_sampler.calc_2d_bbox(xs, ys, (width, height))
        print obj_bb
    except ValueError as e:
        print('Object in Rendering not visible. Have you scaled the vertices to mm?')

    x, y, w, h = obj_bb
    return color, mask_x, obj_bb
Example #3
0
    def render_embedding_image_batch(self, start, end):
        kw = self._kw
        h, w = self.shape[:2]
        azimuth_range = (0, 2 * np.pi)
        elev_range = (-0.5 * np.pi, 0.5 * np.pi)
        radius = float(kw['radius'])
        render_dims = eval(kw['render_dims'])
        K = eval(kw['k'])
        K = np.array(K).reshape(3, 3)

        clip_near = float(kw['clip_near'])
        clip_far = float(kw['clip_far'])
        pad_factor = float(kw['pad_factor'])

        t = np.array([0, 0, float(kw['radius'])])
        batch = np.empty((end - start, ) + self.shape)
        obj_bbs = np.empty((end - start, ) + (4, ))

        for i, R in enumerate(self.viewsphere_for_embedding[start:end]):
            bgr_y, depth_y = self.renderer.render(obj_id=0,
                                                  W=render_dims[0],
                                                  H=render_dims[1],
                                                  K=K.copy(),
                                                  R=R,
                                                  t=t,
                                                  near=clip_near,
                                                  far=clip_far,
                                                  random_light=False)
            # cv2.imshow('depth',depth_y)
            # cv2.imshow('bgr',bgr_y)
            # print depth_y.max()
            # cv2.waitKey(0)
            ys, xs = np.nonzero(depth_y > 0)
            obj_bb = view_sampler.calc_2d_bbox(xs, ys, render_dims)

            obj_bbs[i] = obj_bb

            resized_bgr_y = self.extract_square_patch(
                bgr_y,
                obj_bb,
                pad_factor,
                resize=self.shape[:2],
                interpolation=cv2.INTER_NEAREST)

            if self.shape[2] == 1:
                resized_bgr_y = cv2.cvtColor(resized_bgr_y,
                                             cv2.COLOR_BGR2GRAY)[:, :,
                                                                 np.newaxis]
            batch[i] = resized_bgr_y / 255.
        return (batch, obj_bbs)
Example #4
0
    def render_rot(self, R, t=None ,downSample = 1):
        kw = self._kw
        h, w = self.shape[:2]
        radius = float(kw['radius'])
        render_dims = eval(kw['render_dims'])
        K = eval(kw['k'])
        K = np.array(K).reshape(3,3)
        K[:2,:] = K[:2,:] / downSample

        clip_near = float(kw['clip_near'])
        clip_far = float(kw['clip_far'])
        pad_factor = float(kw['pad_factor'])
        
        t = np.array([0, 0, float(kw['radius'])])

        bgr_y, depth_y = self.renderer.render( 
            obj_id=0,
            W=render_dims[0]/downSample, 
            H=render_dims[1]/downSample,
            K=K.copy(), 
            R=R, 
            t=t,
            near=clip_near,
            far=clip_far,
            random_light=False
        )

        ys, xs = np.nonzero(depth_y > 0)
        obj_bb = view_sampler.calc_2d_bbox(xs, ys, render_dims)
        x, y, w, h = np.array(obj_bb).astype(np.int32)

        size = int(np.maximum(h, w) * pad_factor)
        
        left = np.maximum(x+w/2-size/2, 0)
        right = np.minimum(x+w/2+size/2, bgr_y.shape[1])
        top = np.maximum(y+h/2-size/2, 0)
        bottom = np.minimum(y+h/2+size/2, bgr_y.shape[0])

        bgr_y = bgr_y[top:bottom, left:right]
        return cv2.resize(bgr_y, self.shape[:2])
Example #5
0
 def _render(self, renderer, no_of_samples):
     train_x = np.empty((no_of_samples, ) + self.shape, dtype=np.uint8)
     mask_x = np.empty((no_of_samples, ) + self.shape[:2], dtype=bool)
     obj_bb = np.empty((no_of_samples, 4), dtype=np.uint16)
     height = self._kw['H']
     width = self._kw['W']
     clip_near = self._kw['clip_near']
     clip_far = self._kw['clip_far']
     i = 0
     while i < no_of_samples:
         R = transform.random_rotation_matrix()[:3, :3]
         t = np.array([0, 0, random.randint(50, 400)], dtype=np.float32)
         color, depth_x = renderer.render(0, int(width), int(height), K, R,
                                          t, clip_near, clip_far)
         ys, xs = np.nonzero(depth_x > 0)
         ys = np.array(ys, dtype=np.int16)
         xs = np.array(xs, dtype=np.int16)
         tx, ty = random.randint(-height,
                                 height), random.randint(-width, width)
         M = np.array(((1, 0, tx), (0, 1, ty)), dtype=np.float)
         color = cv.warpAffine(color, M, (color.shape[1], color.shape[0]))
         depth_x = cv.warpAffine(depth_x, M,
                                 (depth_x.shape[1], depth_x.shape[0]))
         try:
             x, y, w, h = view_sampler.calc_2d_bbox(xs + tx, ys + ty,
                                                    (width, height))
             if x <= 0 or y <= 0 or x >= width or y >= height or x + tx >= width or y + ty >= height or w < 0 or h < 0:
                 #print('Object translated out of bounds. Regenerating ...')
                 continue
             train_x[i] = np.copy(color)
             obj_bb[i] = x, y, w, h
             #self.show_image("train_x[i]", train_x[i])
             mask_x[i] = np.copy(depth_x == 0)
             cv2.waitKey(0)
             i += 1
         except ValueError as e:
             #print('Object in Rendering not visible. Regenerating ...')
             continue
     return train_x, mask_x, obj_bb
Example #6
0
    def render_training_images(self,
                               dataset_path=None,
                               experiment_name=None,
                               augment_data=False):
        '''
        3 params added by lxc: dataset_path = None, experiment_name=None, augment_data = False
        '''
        kw = self._kw
        H, W = int(kw['h']), int(kw['w'])
        render_dims = eval(kw['render_dims'])
        K = eval(kw['k'])
        K = np.array(K).reshape(3, 3)
        clip_near = float(kw['clip_near'])
        clip_far = float(kw['clip_far'])
        pad_factor = float(kw['pad_factor'])
        max_rel_offset = float(kw['max_rel_offset'])
        t = np.array([0, 0, float(kw['radius'])])

        widgets = [
            'Training: ',
            progressbar.Percentage(), ' ',
            progressbar.Bar(), ' ',
            progressbar.Counter(),
            ' / %s' % self.noof_training_imgs, ' ',
            progressbar.ETA(), ' '
        ]
        bar = progressbar.ProgressBar(maxval=self.noof_training_imgs,
                                      widgets=widgets)
        bar.start()

        for i in np.arange(self.noof_training_imgs):
            bar.update(i)

            # print '%s/%s' % (i,self.noof_training_imgs)
            # start_time = time.time()
            R = transform.random_rotation_matrix()[:3, :3]
            bgr_x, depth_x = self.renderer.render(obj_id=0,
                                                  W=render_dims[0],
                                                  H=render_dims[1],
                                                  K=K.copy(),
                                                  R=R,
                                                  t=t,
                                                  near=clip_near,
                                                  far=clip_far,
                                                  random_light=True)
            bgr_y, depth_y = self.renderer.render(obj_id=0,
                                                  W=render_dims[0],
                                                  H=render_dims[1],
                                                  K=K.copy(),
                                                  R=R,
                                                  t=t,
                                                  near=clip_near,
                                                  far=clip_far,
                                                  random_light=False)
            # render_time = time.time() - start_time
            # cv2.imshow('bgr_x',bgr_x)
            # cv2.imshow('bgr_y',bgr_y)
            # cv2.waitKey(0)

            ys, xs = np.nonzero(depth_x > 0)

            try:
                obj_bb = view_sampler.calc_2d_bbox(xs, ys, render_dims)
            except ValueError as e:
                print 'Object in Rendering not visible. Have you scaled the vertices to mm?'
                break

            x, y, w, h = obj_bb

            rand_trans_x = np.random.uniform(-max_rel_offset,
                                             max_rel_offset) * w
            rand_trans_y = np.random.uniform(-max_rel_offset,
                                             max_rel_offset) * h

            obj_bb_off = obj_bb + np.array([rand_trans_x, rand_trans_y, 0, 0])

            bgr_x = self.extract_square_patch(bgr_x,
                                              obj_bb_off,
                                              pad_factor,
                                              resize=(W, H),
                                              interpolation=cv2.INTER_NEAREST)
            depth_x = self.extract_square_patch(
                depth_x,
                obj_bb_off,
                pad_factor,
                resize=(W, H),
                interpolation=cv2.INTER_NEAREST)
            mask_x = depth_x == 0.

            ys, xs = np.nonzero(depth_y > 0)
            obj_bb = view_sampler.calc_2d_bbox(xs, ys, render_dims)

            bgr_y = self.extract_square_patch(bgr_y,
                                              obj_bb,
                                              pad_factor,
                                              resize=(W, H),
                                              interpolation=cv2.INTER_NEAREST)

            if self.shape[2] == 1:
                bgr_x = cv2.cvtColor(np.uint8(bgr_x),
                                     cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]
                bgr_y = cv2.cvtColor(np.uint8(bgr_y),
                                     cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]

            self.train_x[i] = bgr_x.astype(np.uint8)
            self.mask_x[i] = mask_x
            self.train_y[i] = bgr_y.astype(np.uint8)
            #print 'rendertime ', render_time, 'processing ', time.time() - start_time
            # lxc
            self.Rs[i] = R
            self.obj_bbs_x[i] = obj_bb_off
            self.obj_bbs_y[i] = obj_bb

        if augment_data:
            print("render augmented data.")
            '''written by lxc: 
                add background image to original training image by replacing black background with PASCAL VOC image.'''
            self.load_bg_images(dataset_path, experiment_name)
            self.train_x, self.train_y = self.batch(self.noof_training_imgs)
            self.Rs = self.Rs[self.rand_idcs]
            self.obj_bbs_x = self.obj_bbs_x[self.rand_idcs]
            self.obj_bbs_y = self.obj_bbs_y[self.rand_idcs]
            self.mask_x = self.mask_x[self.rand_idcs]
        bar.finish()
Example #7
0
    def render_training_images(self):
        kw = self._kw
        H, W = int(kw['h']), int(kw['w'])
        render_dims = eval(kw['render_dims'])
        K = eval(kw['k'])
        K = np.array(K).reshape(3,3)
        clip_near = float(kw['clip_near'])
        clip_far = float(kw['clip_far'])
        pad_factor = float(kw['pad_factor'])
        crop_offset_sigma = float(kw['crop_offset_sigma'])
        t = np.array([0, 0, float(kw['radius'])])

        bar = progressbar.ProgressBar(
            maxval=self.noof_training_imgs, 
            widgets=[' [', progressbar.Timer(), ' | ', 
                            progressbar.Counter('%0{}d / {}'.format(len(str(self.noof_training_imgs)), 
                                self.noof_training_imgs)), ' ] ', progressbar.Bar(), ' (', progressbar.ETA(), ') ']
            )
        bar.start()

        for i in np.arange(self.noof_training_imgs):
            bar.update(i)

            # print '%s/%s' % (i,self.noof_training_imgs)
            # start_time = time.time()
            R = transform.random_rotation_matrix()[:3,:3]
            bgr_x, depth_x = self.renderer.render( 
                obj_id=0,
                W=render_dims[0], 
                H=render_dims[1],
                K=K.copy(), 
                R=R, 
                t=t,
                near=clip_near,
                far=clip_far,
                random_light=True
            )
            bgr_y, depth_y = self.renderer.render( 
                obj_id=0,
                W=render_dims[0], 
                H=render_dims[1],
                K=K.copy(), 
                R=R, 
                t=t,
                near=clip_near,
                far=clip_far,
                random_light=False
            )
            # render_time = time.time() - start_time
            # cv2.imshow('bgr_x',bgr_x)
            # cv2.imshow('bgr_y',bgr_y)
            # cv2.waitKey(0)
            
            ys, xs = np.nonzero(depth_x > 0)
            try:
                obj_bb = view_sampler.calc_2d_bbox(xs, ys, render_dims)
            except ValueError as e:
                print 'Object in Rendering not visible. Have you scaled the vertices to mm?'
                break

            x, y, w, h = obj_bb

            rand_trans_x = np.random.uniform(-crop_offset_sigma, crop_offset_sigma)
            rand_trans_y = np.random.uniform(-crop_offset_sigma, crop_offset_sigma)

            size = int(np.maximum(h, w) * pad_factor)
            left = int(x+w/2-size/2 + rand_trans_x)
            right = int(x+w/2+size/2 + rand_trans_x)
            top = int(y+h/2-size/2 + rand_trans_y)
            bottom = int(y+h/2+size/2 + rand_trans_y)

            bgr_x = bgr_x[top:bottom, left:right]
            depth_x = depth_x[top:bottom, left:right]
            bgr_x = cv2.resize(bgr_x, (W, H), interpolation = cv2.INTER_NEAREST)
            depth_x = cv2.resize(depth_x, (W, H), interpolation = cv2.INTER_NEAREST)

            mask_x = depth_x == 0.

            ys, xs = np.nonzero(depth_y > 0)
            obj_bb = view_sampler.calc_2d_bbox(xs, ys, render_dims)

            bgr_y = self.extract_square_patch(bgr_y, obj_bb, pad_factor,resize=(W,H),interpolation = cv2.INTER_NEAREST)

            if self.shape[2] == 1:
                bgr_x = cv2.cvtColor(np.uint8(bgr_x), cv2.COLOR_BGR2GRAY)[:,:,np.newaxis]
                bgr_y = cv2.cvtColor(np.uint8(bgr_y), cv2.COLOR_BGR2GRAY)[:,:,np.newaxis]

            self.train_x[i] = bgr_x.astype(np.uint8)
            self.mask_x[i] = mask_x
            self.train_y[i] = bgr_y.astype(np.uint8)

            #print 'rendertime ', render_time, 'processing ', time.time() - start_time
        bar.finish()