Пример #1
0
    def setImageZoomAndResolution(self,
                                  im_xoff=0,
                                  im_yoff=0,
                                  im_width=None,
                                  im_height=None,
                                  im_scale=1):

        self.im_xoff = im_xoff
        self.im_yoff = im_yoff
        self.im_width = im_width or self.systemModel.cam.width
        self.im_height = im_height or self.systemModel.cam.height
        self.im_scale = im_scale

        self.image = ImageProc.crop_and_zoom_image(self.full_image,
                                                   self.im_xoff, self.im_yoff,
                                                   self.im_width,
                                                   self.im_height,
                                                   self.im_scale)
        self._image_h = self.image.shape[0]
        self._image_w = self.image.shape[1]

        if self._show_target_image:
            # form _gl_image that is used for rendering
            # black => 0 alpha, non-black => white => .5 alpha
            im = self.image.copy()
            alpha = np.zeros(im.shape, im.dtype)
            #im[im > 0] = 255
            alpha[im > 0] = 128
            self._gl_image = np.flipud(cv2.merge(
                (im, im, im, alpha))).tobytes()

        self.updateFrustum()

        # WORK-AROUND: for some reason wont use new frustum if window not resized
        s = self.parent().size()
        self.parent().resize(s.width() + 1, s.height())
        self.parent().resize(s.width(), s.height())
        self.update()
        QCoreApplication.processEvents()
Пример #2
0
    def process(self, orig_sce_img, outfile, rotate_sc=False, **kwargs):
        # maybe load torch model
        if self.model is None:
            self.load_model()

        if outfile is not None:
            self.debug_filebase = outfile + ('n' if isinstance(
                orig_sce_img, str) else '')

        # maybe load scene image
        if isinstance(orig_sce_img, str):
            orig_sce_img = self.load_target_image(orig_sce_img)

        self.timer = Stopwatch()
        self.timer.start()

        if self.DEF_ESTIMATE_THRESHOLD:
            threshold = ImageProc.optimal_threshold(None, orig_sce_img)
        else:
            threshold = self.DEF_LUMINOSITY_THRESHOLD

        # detect target, get bounds
        x, y, w, h = ImageProc.single_object_bounds(
            orig_sce_img,
            threshold=threshold,
            crop_marg=self.DEF_CROP_MARGIN,
            min_px=self.DEF_MIN_PIXELS,
            debug=DEBUG)
        if x is None:
            raise PositioningException('asteroid not detected in image')

        # crop image
        img_bw = ImageProc.crop_and_zoom_image(orig_sce_img, x, y, w, h, None,
                                               (224, 224))

        # save cropped image in log archive
        if BATCH_MODE and self.debug_filebase:
            self.timer.stop()
            cv2.imwrite(self.debug_filebase + 'a.png', img_bw)
            self.timer.start()

        # massage input
        input = cv2.cvtColor(img_bw, cv2.COLOR_GRAY2BGR)
        input = Image.fromarray(input)
        input = PoseIllumiDataset.eval_transform(input)[None, :, :, :].to(
            self.device, non_blocking=True)

        # run model
        with torch.no_grad():
            output = self.model(input)

        # massage output
        output = output[0] if isinstance(output, (list, tuple)) else output
        output = output.detach().cpu().numpy()

        # check if estimated illumination direction is close or not
        ill_est = self.model.illumination(output)[0]
        r_ini, q_ini, ill_ini = self.system_model.get_cropped_system_scf(
            x, y, w, h)
        if tools.angle_between_v(
                ill_est, ill_ini) > 10:  # max 10 degree discrepancy accepted
            print(
                'bad illumination direction estimated, initial=%s, estimated=%s'
                % (ill_ini, ill_est))

        # apply result
        r_est = self.model.position(output)[0]
        q_est = np.quaternion(*self.model.rotation(output)[0])
        self.system_model.set_cropped_system_scf(x,
                                                 y,
                                                 w,
                                                 h,
                                                 r_est,
                                                 q_est,
                                                 rotate_sc=rotate_sc)
        self.timer.stop()

        if False:
            r_est2, q_est2, ill_est2 = self.system_model.get_cropped_system_scf(
                x, y, w, h)
            self.system_model.swap_values_with_real_vals()
            r_real, q_real, ill_real = self.system_model.get_cropped_system_scf(
                x, y, w, h)
            self.system_model.swap_values_with_real_vals()
            print('compare q_est vs q_est2, q_real vs q_est, q_real vs q_est2')

        # save result image
        if BATCH_MODE and self.debug_filebase:
            # save result in log archive
            res_img = self.render(textures=False)
            sce_img = cv2.resize(orig_sce_img, tuple(np.flipud(res_img.shape)))
            cv2.imwrite(self.debug_filebase + 'b.png',
                        np.concatenate((sce_img, res_img), axis=1))
            if DEBUG:
                cv2.imshow('compare', np.concatenate((sce_img, res_img),
                                                     axis=1))
                cv2.waitKey()
Пример #3
0
def export(sm, dst_path, src_path=None, src_imgs=None, trg_shape=(224, 224), crop=False, debug=False,
           img_prefix="", title=""):

    trg_w, trg_h = trg_shape
    assert (src_path is not None) + (src_imgs is not None) == 1, 'give either src_path or src_imgs, not both'

    if debug:
        renderer = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=0)
        obj_idx = renderer.load_object(sm.asteroid.target_model_file,
                                       smooth=sm.asteroid.render_smooth_faces)
        algo = AlgorithmBase(sm, renderer, obj_idx)

    metadatafile = os.path.join(dst_path, 'dataset_all.txt')
    if not os.path.exists(metadatafile):
        with open(metadatafile, 'w') as f:
            f.write('\n'.join(['%s, camera centric coordinate frame used' % title,
                               'Image ID, ImageFile, Target Pose [X Y Z W P Q R], Sun Vector [X Y Z]', '', '']))

    files = list(os.listdir(src_path)) if src_imgs is None else src_imgs
    files = sorted(files)

    id = 0
    for i, fn in enumerate(files):
        if src_imgs is not None or re.search(r'(?<!far_)\d{4}\.png$', fn):
            c = 2 if src_imgs is None else 1
            tools.show_progress(len(files)//c, i//c)
            id += 1

            # read system state, write out as relative to s/c
            fname = os.path.basename(fn)
            if src_imgs is None:
                fn = os.path.join(src_path, fn)
            lbl_fn = re.sub(r'_%s(\d{4})' % img_prefix, r'_\1', fn[:-4]) + '.lbl'

            sm.load_state(lbl_fn)
            sm.swap_values_with_real_vals()

            if not crop:
                shutil.copy2(fn, os.path.join(dst_path, fname))
                if os.path.exists(fn[:-4] + '.d.exr'):
                    shutil.copy2(fn[:-4] + '.d.exr', os.path.join(dst_path, fname[:-4] + '.d.exr'))
                if os.path.exists(fn[:-4] + '.xyz.exr'):
                    shutil.copy2(fn[:-4] + '.xyz.exr', os.path.join(dst_path, fname[:-4] + '.xyz.exr'))
                if os.path.exists(fn[:-4] + '.s.exr'):
                    shutil.copy2(fn[:-4] + '.s.exr', os.path.join(dst_path, fname[:-4] + '.s.exr'))
                _write_metadata(metadatafile, id, fname, sm.get_system_scf())
                continue

            from visnav.algo.absnet import AbsoluteNavigationNN

            # read image, detect box, resize, adjust relative pose
            img = cv2.imread(fn, cv2.IMREAD_GRAYSCALE)
            assert img is not None, 'image file %s not found' % fn

            # detect target, get bounds
            x, y, w, h = ImageProc.single_object_bounds(img, threshold=AbsoluteNavigationNN.DEF_LUMINOSITY_THRESHOLD,
                                                        crop_marg=AbsoluteNavigationNN.DEF_CROP_MARGIN,
                                                        min_px=AbsoluteNavigationNN.DEF_MIN_PIXELS, debug=debug)
            if x is None:
                continue

            # write image metadata
            system_scf = sm.get_cropped_system_scf(x, y, w, h)
            _write_metadata(metadatafile, id, fname, system_scf)

            others, (depth, coords, px_size), k = [], [False] * 3, 1
            if os.path.exists(fn[:-4] + '.d.exr'):
                depth = True
                others.append(cv2.imread(fn[:-4] + '.d.exr', cv2.IMREAD_UNCHANGED))
            if os.path.exists(fn[:-4] + '.xyz.exr'):
                coords = True
                others.append(cv2.imread(fn[:-4] + '.xyz.exr', cv2.IMREAD_UNCHANGED))
            if os.path.exists(fn[:-4] + '.s.exr'):
                px_size = True
                others.append(cv2.imread(fn[:-4] + '.s.exr', cv2.IMREAD_UNCHANGED))

            # crop & resize image, write it
            cropped = ImageProc.crop_and_zoom_image(img, x, y, w, h, None, (trg_w, trg_h), others=others)

            cv2.imwrite(os.path.join(dst_path, fname), cropped[0], [cv2.IMWRITE_PNG_COMPRESSION, 9])
            if depth:
                cv2.imwrite(os.path.join(dst_path, fname[:-4] + '.d.exr'), cropped[k],
                            (cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT))
                k += 1
            if coords:
                cv2.imwrite(os.path.join(dst_path, fname[:-4] + '.xyz.exr'), cropped[k],
                            (cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT))
                k += 1
            if px_size:
                cv2.imwrite(os.path.join(dst_path, fname[:-4] + '.s.exr'), cropped[k],
                            (cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT))

            if debug:
                sc, dq = sm.cropped_system_tf(x, y, w, h)

                sm.spacecraft_pos = tools.q_times_v(SystemModel.sc2gl_q.conj(), sc_ast_lf_r)
                sm.rotate_spacecraft(dq)
                #sm.set_cropped_system_scf(x, y, w, h, sc_ast_lf_r, sc_ast_lf_q)

                if False:
                    sm.load_state(lbl_fn)
                    sm.swap_values_with_real_vals()
                    imgd = cv2.resize(img, (trg_h, trg_w))

                imge = algo.render(center=False, depth=False, shadows=True)
                h, w = imge.shape
                imge = cv2.resize(imge[:, (w - h)//2:(w - h)//2+h], cropped[0].shape)
                cv2.imshow('equal?', np.hstack((
                    cropped[0],
                    np.ones((cropped[0].shape[0], 1), dtype=cropped[0].dtype) * 255,
                    imge,
                )))
                cv2.waitKey()

                if i > 60:
                    quit()