예제 #1
0
    def loadTargetImage(self, src, remove_bg=True):
        tmp = cv2.imread(src, cv2.IMREAD_GRAYSCALE)
        if tmp is None:
            raise Exception('Cant load image from file %s' % (src, ))

        cam = self.systemModel.cam
        if tmp.shape != (cam.height, cam.width):
            # visit fails to generate 1024 high images
            tmp = cv2.resize(tmp,
                             None,
                             fx=cam.width / tmp.shape[1],
                             fy=cam.height / tmp.shape[0],
                             interpolation=cv2.INTER_CUBIC)

        if BATCH_MODE and self.add_image_noise and self._noise_image:
            tmp = ImageProc.add_noise_to_image(tmp, self._noise_image)

        self.image_file = src
        if remove_bg:
            self.full_image, h, th = ImageProc.process_target_image(tmp)
            self.image_bg_threshold = th
            self.parent().centroid.bg_threshold = th
        else:
            self.full_image = tmp
            self.image_bg_threshold = None
            self.parent().centroid.bg_threshold = None

        self.setImageZoomAndResolution(im_scale=self.im_def_scale)
예제 #2
0
    def _get_motion_kernel(psf_sd, line_xy):
        if len(psf_sd) == 3:
            sd1, w, sd2 = psf_sd
        else:
            sd1, w, sd2 = psf_sd[0], 0, 0

        psf_hw = math.ceil(max(sd1 * 3, sd2 * 2))
        psf_fw = 1 + 2 * psf_hw
        psf = ImageProc.gkern2d(psf_fw, sd1) + (0 if w == 0 else w *
                                                ImageProc.gkern2d(psf_fw, sd2))

        line_xy = np.array(line_xy)
        line = np.zeros(
            np.ceil(np.abs(np.flip(line_xy))).astype(np.int) + psf_fw)

        cnt = np.flip(line.shape) / 2
        start = tuple(np.round(cnt - line_xy / 2).astype(np.int))
        end = tuple(np.round(cnt + line_xy / 2).astype(np.int))
        cv2.line(line,
                 start,
                 end,
                 color=1.0,
                 thickness=1,
                 lineType=cv2.LINE_AA)

        mb_psf = cv2.filter2D(line, cv2.CV_64F, psf)
        mb_psf /= np.sum(mb_psf)  # normalize to one
        return mb_psf
예제 #3
0
    def _motion_kernel_psf_saturation(self, du, psf_sd, get_px_du_sat=False):
        read_sd = None
        if len(psf_sd) in (2, 4):
            psf_sd, read_sd = psf_sd[:-1], psf_sd[-1]

        line_xy = self.frame.motion_in_px(self.ixy)
        mb_psf = self._get_motion_kernel(psf_sd, line_xy)
        px_du_sat = np.clip(mb_psf * du, 0, self.frame.max_signal)
        if read_sd:
            noise = trunc_gaussian_shift(px_du_sat,
                                         read_sd * self.frame.max_signal,
                                         self.frame.max_signal)
            if 1:
                px_du_sat = np.clip(px_du_sat - noise, 0,
                                    self.frame.max_signal)
                du_sat = np.sum(px_du_sat)
            else:
                noise = np.random.normal(0, read_sd * saturation_val,
                                         px_du_sat.shape)
                noise = cv2.filter2D(noise, cv2.CV_64F,
                                     ImageProc.gkern2d(5, 1.0))
                px_du_sat = np.clip(px_du_sat + noise, 0, saturation_val)
        else:
            du_sat = np.sum(px_du_sat)
        return (du_sat, ) + ((px_du_sat, ) if get_px_du_sat else tuple())
예제 #4
0
def test_thumbnail_gamma_effect():
    img = np.zeros((1536, 2048))
    j = 1536 // 2 + 8
    I = np.array(tuple(range(8, 2048, 32))[:-1], dtype='int')
    mag = I[-1] * 0.85
    img[j - 8:j + 8, I] = np.array(I) * (mag / I[-1])
    img = ImageProc.apply_point_spread_fn(img, 0.4)
    print('max: %d' % np.max(img))

    img = np.clip(img, 0, 1023).astype('uint16')
    plt.imshow(img)
    plt.show()

    thb_real = cv2.resize(img,
                          None,
                          fx=1 / 16,
                          fy=1 / 16,
                          interpolation=cv2.INTER_AREA)
    plt.imshow(thb_real)
    plt.show()

    img_gamma = ImageProc.adjust_gamma(img, 2.2, 0.1, max_val=1023)
    thb_gamma = cv2.resize(img_gamma,
                           None,
                           fx=1 / 16,
                           fy=1 / 16,
                           interpolation=cv2.INTER_AREA)
    thb = ImageProc.adjust_gamma(thb_gamma, 2.2, 0.1, inverse=1, max_val=1023)

    x = thb_real[j // 16, I // 16]
    xf = img[j, I]
    xfg = img_gamma[j, I]
    yg = thb_gamma[j // 16, I // 16]
    y = thb[j // 16, I // 16]
    line = np.linspace(0, np.max(x))

    plt.plot(x, y, 'x')
    plt.plot(line, line)
    gamma, gamma_break, max_val, scale = fit_gamma(x, y)
    plt.plot(
        line,
        ImageProc.adjust_gamma(line, gamma, gamma_break, max_val=max_val) *
        scale)
    plt.show()
    quit()
예제 #5
0
def output(img, show, maxval=1.0, gamma=1.0, outfile=None):
    img = ImageProc.adjust_gamma(maxval * img / np.max(img) * 255,
                                 gamma=gamma) / 255
    cv2.imwrite(outfile or (sys.argv[1] if len(sys.argv) > 1 else 'test.png'),
                (255 * img).astype('uint8'))
    if show:
        img_sc = cv2.resize(img, (700, 700))
        cv2.imshow('test.png', img_sc)
        return cv2.waitKey()
예제 #6
0
    def adjust_iteratively(self, sce_img, outfile=None, **kwargs):
        self.debug_filebase = outfile
        self._bg_threshold = kwargs.get('bg_threshold', self._bg_threshold)
        sce_img = self.maybe_load_scene_image(sce_img,
                                              preproc='bg_threshold'
                                              not in kwargs)

        if DEBUG:
            sc = self.system_model.view_width / sce_img.shape[1]
            cv2.imshow('target img', cv2.resize(sce_img, None, fx=sc, fy=sc))

        self.system_model.spacecraft_pos = (
            0, 0, -self.system_model.min_med_distance)
        for i in range(self.MAX_ITERATIONS):
            ox, oy, oz = self.system_model.spacecraft_pos
            od = math.sqrt(ox**2 + oy**2 + oz**2)

            if not DEBUG:
                self.adjust(sce_img, preproc=False)
            else:
                try:
                    self.adjust(sce_img, preproc=False)
                except PositioningException as e:
                    print(str(e))
                    break
                finally:
                    cv2.imshow('rendered img', self._ref_img)
                    cv2.waitKey()

            nx, ny, nz = self.system_model.spacecraft_pos
            ch = math.sqrt((nx - ox)**2 + (ny - oy)**2 + (nz - oz)**2)
            if DEBUG:
                print('i%d: d0=%.2f, ch=%.2f, rel_ch=%.2f%%' %
                      (i, od, ch, ch / od * 100))
            if ch / od < self.ITERATION_TOL:
                break

        if self.CHECK_RESULT_VALIDITY:
            result_quality = ImageProc.norm_xcorr(sce_img, self._ref_img)
            if result_quality < self.MIN_RESULT_XCORR:
                raise PositioningException(
                    'Result failed quality test with score: %.3f' %
                    (result_quality, ))

        if BATCH_MODE and self.debug_filebase:
            img = self.render(shadows=self.RENDER_SHADOWS,
                              textures=self.RENDER_TEXTURES)
            cv2.imwrite(self.debug_filebase + 'r.png', img)

        if DEBUG:
            cv2.waitKey()
            cv2.destroyAllWindows()
예제 #7
0
    def show_image(self,
                   gain=1,
                   processed=False,
                   compare=False,
                   median_filter=False,
                   zero_bg=False,
                   save_as=None):
        img = self.image.astype('float')
        if processed:
            if zero_bg:
                img = np.clip(
                    img - np.min(img) - (0 if zero_bg is True else zero_bg), 0,
                    np.inf)
            img *= gain
            if median_filter:
                img = cv2.medianBlur(img.astype('uint16'), median_filter)
            img = ImageProc.color_correct(img,
                                          self.applied_bgr_mx,
                                          max_val=self.max_val)
            img = ImageProc.adjust_gamma(img,
                                         self.applied_gamma,
                                         self.applied_gamma_break,
                                         max_val=self.max_val)
        else:
            img = np.clip(img * gain, 0, 2**self.bits - 1)
        img = ImageProc.change_color_depth(img, self.bits, 8).astype('uint8')

        if save_as is not None:
            cv2.imwrite(save_as, img)

        s = self.image.shape
        if compare:
            img = np.hstack((self.raw_image.astype(img.dtype),
                             np.ones((s[0], 1, s[2]), dtype=img.dtype), img))

        sc = 1
        plt.imshow(np.flip(img, axis=2))
        plt.show()
        return img, sc
예제 #8
0
def generate_field_fft(shape, sd=(0.33, 0.33, 0.34), len_sc=(0.5, 0.5 / 4, 0.5 / 16)):
    from visnav.algo.image import ImageProc
    sds = sd if getattr(sd, '__len__', False) else [sd]
    len_scs = len_sc if getattr(len_sc, '__len__', False) else [len_sc]
    assert len(shape) == 2, 'only 2d shapes are valid'
    assert len(sds) == len(len_scs), 'len(sd) differs from len(len_sc)'
    n = np.prod(shape)

    kernel = np.sum(
        np.stack([1 / len_sc * sd * n * ImageProc.gkern2d(shape, 1 / len_sc) for sd, len_sc in zip(sds, len_scs)],
                 axis=2), axis=2)
    f_img = np.random.normal(0, 1, shape) + np.complex(0, 1) * np.random.normal(0, 1, shape)
    f_img = np.real(np.fft.ifft2(np.fft.fftshift(kernel * f_img)))
    return f_img
예제 #9
0
    def _count_du(self, x, y, size=5, bg=None):
        wmrg = size // 4
        mmrg = 1 if bg is None else 0
        mask = ImageProc.bsphkern(size + 2 * mmrg)
        if bg is None:
            mask[0, :] = 0
            mask[-1, :] = 0
            mask[:, 0] = 0
            mask[:, -1] = 0
        mask = mask.astype(np.bool)
        mr = size // 2 + mmrg
        mn = size + 2 * mmrg

        h, w, _ = self.image.shape
        x, y = int(round(x)), int(round(y))
        if h - y + wmrg <= mr or w - x + wmrg <= mr or x + wmrg < mr or y + wmrg < mr:
            return zip([None] * 3, [None] * 3)

        win = self.image[max(0, y - mr):min(h, y + mr + 1),
                         max(0, x - mr):min(w, x + mr + 1), :].reshape((-1, 3))
        mx0, mx1 = -min(0, x - mr), mn - (max(w, x + mr + 1) - w)
        my0, my1 = -min(0, y - mr), mn - (max(h, y + mr + 1) - h)

        mask = mask[my0:my1, mx0:mx1].flatten()
        bg = np.mean(win[np.logical_not(mask), :],
                     axis=0) if bg is None else bg

        if False:
            tot = np.sum(win[mask, :], axis=0)
            tot_bg = bg * np.sum(mask)
            tot = np.max(np.array((tot, tot_bg)), axis=0)

            # tried to take into account thumbnail mean resizing after gamma correction,
            # also assuming no saturation of original pixels because of motion blur
            # => better results if tune Camera->emp_coef instead
            resizing_gain = (1 / self.resize_scale)**2
            g = self.applied_gamma

            # ([sum over i in n: (bg+s_i)**g] / n) ** (1/g)
            #    => cannot compensate for gamma correction as signal components not summable anymore,
            #       only possible if assume that only a single pixel has signal (or some known distribution of signal?)
            # signal in a single, non-saturating pixel (conflicting assumptions):
            adj_tot = (((tot - tot_bg + bg)**g * resizing_gain) -
                       (resizing_gain - 1) * bg**g)**(1 / g) - bg
            signal = adj_tot
        else:
            #signal = tot - tot_bg
            signal = np.clip(np.sum(win[mask, :] - bg, axis=0), 0, np.inf)

        return zip(signal, bg)
예제 #10
0
    def write_img(raw_imgs, outfile):
        imgs = []
        for raw in raw_imgs:
            img = ImageProc.change_color_depth(raw.astype('float'), 8, bits)
            img = ImageProc.adjust_gamma(img,
                                         gamma,
                                         gamma_break=gamma_break,
                                         inverse=True,
                                         max_val=max_val)
            if bgr_cc_mx is not None:
                img = ImageProc.color_correct(img,
                                              bgr_cc_mx,
                                              inverse=True,
                                              max_val=max_val)
            imgs.append(np.expand_dims(img, axis=0))

        if len(imgs) == 1:
            imgs = imgs[0]

        stacked = np.stack(imgs, axis=0)
        reduced = np.median(stacked, axis=0) if len(imgs) > 2 else np.min(
            stacked, axis=0)
        bg_img = np.round(reduced).squeeze().astype('uint16')
        cv2.imwrite(outfile, bg_img, (cv2.CV_16U, ))
예제 #11
0
    def detect_moon(self):
        x, y = tuple(int(v) for v in self.moon_loc)
        win = self.image[y - 24:y + 25, x - 29:x + 30, :]

        if 0:
            # TODO: what transformation would make this work?
            win = ImageProc.adjust_gamma(win / 662 * 1023,
                                         2.2,
                                         inverse=1,
                                         max_val=1023)

        h, w, s, c = *win.shape[0:2], 19, 18
        mask = np.zeros((h, w), dtype='uint8')
        mask[(h // 2 - s):(h // 2 + s + 1),
             (w // 2 - s):(w // 2 + s +
                           1)] = ImageProc.bsphkern(s * 2 + 1).astype('uint8')
        mask[0:c, :] = 0

        if SHOW_MEASURES:
            mask_img = (mask.reshape(
                (h, w, 1)) * np.array([255, 0, 0]).reshape(
                    (1, 1, 3))).astype('uint8')
            win_img = np.clip(win, 0, 255).astype('uint8')
            plt.imshow(np.flip(ImageProc.merge((mask_img, win_img)), axis=2))
            plt.show()

        mask = mask.flatten().astype('bool')
        n = np.sum(mask)
        measures = []
        for i, cam in enumerate(self.cam):
            raw_du = np.sum(win[:, :, i].flatten()[mask])
            bg_du = np.mean(win[:, :, i].flatten()[np.logical_not(mask)])
            du_count = raw_du - bg_du * n
            measures.append(MoonMeasure(self, i, du_count))
        self.measures = measures
        return measures
예제 #12
0
    def _feature_detection_mask(self, image):
        _, mask = cv2.threshold(image, self.min_feature_intensity, 255,
                                cv2.THRESH_BINARY)
        kernel = ImageProc.bsphkern(round(6 * image.shape[0] / 512) * 2 + 1)

        # exclude asteroid limb from feature detection
        mask = cv2.erode(mask, ImageProc.bsphkern(7),
                         iterations=1)  # remove stars
        mask = cv2.dilate(mask, kernel,
                          iterations=1)  # remove small shadows inside asteroid
        mask = cv2.erode(mask, kernel, iterations=2)  # remove asteroid limb

        # exclude overexposed parts
        _, mask_oe = cv2.threshold(image, self.max_feature_intensity, 255,
                                   cv2.THRESH_BINARY)
        mask_oe = cv2.dilate(mask_oe, kernel, iterations=1)
        mask_oe = cv2.erode(mask_oe, kernel, iterations=1)
        mask[mask_oe > 0] = 0

        if 0:
            cv2.imshow('mask', ImageProc.overlay_mask(image, mask))
            cv2.waitKey()

        return mask
예제 #13
0
파일: lab.py 프로젝트: oknuutti/visnav-py
    def get_kernel(peak, fwhm, psfd, size):
        var = (fwhm / np.sqrt(8 * np.log(2)))**2
        amp = psfd / gaussian(0, var, 0)
        spectrum_fn = lambda lam: amp * gaussian(peak, var, lam)
        kernel = LabFrame.calc_source_kernel(bgr_cam,
                                             spectrum_fn,
                                             size,
                                             points=(peak, ))
        if IGNORE_INVERT_COLOR_CORRECTION:
            kernel = ImageProc.color_correct(
                kernel,
                np.array([
                    [2.083400, -0.524300, -0.389100],
                    [-0.516800, 2.448100, -0.761300],
                    [-0.660600, 0.149600, 1.680900],
                ]))

        return kernel
예제 #14
0
파일: lab.py 프로젝트: oknuutti/visnav-py
    def calc_source_kernel(cams, spectrum_fn, patch_size, points=None):
        # detect source
        kernel = ImageProc.bsphkern(
            tuple(map(int, patch_size)) if '__iter__' in
            dir(patch_size) else int(patch_size))

        expected_bgr = np.zeros(3)
        for i, cam in enumerate(cams):
            ef, _ = Camera.electron_flux_in_sensed_spectrum_fn(cam.qeff_coefs,
                                                               spectrum_fn,
                                                               cam.lambda_min,
                                                               cam.lambda_max,
                                                               fast=False,
                                                               points=points)
            expected_bgr[i] = cam.gain * cam.aperture_area * cam.emp_coef * ef

        kernel = np.repeat(np.expand_dims(kernel, axis=2), 3,
                           axis=2) * expected_bgr
        return kernel
예제 #15
0
    def setImageZoomAndResolution(self,
                                  im_xoff=0,
                                  im_yoff=0,
                                  im_width=None,
                                  im_height=None,
                                  im_scale=1):

        self.im_xoff = im_xoff
        self.im_yoff = im_yoff
        self.im_width = im_width or self.systemModel.cam.width
        self.im_height = im_height or self.systemModel.cam.height
        self.im_scale = im_scale

        self.image = ImageProc.crop_and_zoom_image(self.full_image,
                                                   self.im_xoff, self.im_yoff,
                                                   self.im_width,
                                                   self.im_height,
                                                   self.im_scale)
        self._image_h = self.image.shape[0]
        self._image_w = self.image.shape[1]

        if self._show_target_image:
            # form _gl_image that is used for rendering
            # black => 0 alpha, non-black => white => .5 alpha
            im = self.image.copy()
            alpha = np.zeros(im.shape, im.dtype)
            #im[im > 0] = 255
            alpha[im > 0] = 128
            self._gl_image = np.flipud(cv2.merge(
                (im, im, im, alpha))).tobytes()

        self.updateFrustum()

        # WORK-AROUND: for some reason wont use new frustum if window not resized
        s = self.parent().size()
        self.parent().resize(s.width() + 1, s.height())
        self.parent().resize(s.width(), s.height())
        self.update()
        QCoreApplication.processEvents()
예제 #16
0
def est_refl_model(hapke=True, iters=1, init_noise=0.0, verbose=True):
    sm = RosettaSystemModel()
    imgsize = (512, 512)
    imgs = {
        'ROS_CAM1_20140831T104353': 3.2,  # 60, 3.2s
        'ROS_CAM1_20140831T140853': 3.2,  # 62, 3.2s
        'ROS_CAM1_20140831T103933': 3.2,  # 65, 3.2s
        'ROS_CAM1_20140831T022253': 3.2,  # 70, 3.2s
        'ROS_CAM1_20140821T100719': 2.8,  # 75, 2.8s
        'ROS_CAM1_20140821T200718': 2.0,  # 80, 2.0s
        'ROS_CAM1_20140822T113854': 2.0,  # 85, 2.0s
        'ROS_CAM1_20140823T021833': 2.0,  # 90, 2.0s
        'ROS_CAM1_20140819T120719': 2.0,  # 95, 2.0s
        'ROS_CAM1_20140824T021833': 2.8,  # 100, 2.8s
        'ROS_CAM1_20140824T020853': 2.8,  # 105, 2.8s
        'ROS_CAM1_20140824T103934': 2.8,  # 110, 2.8s
        'ROS_CAM1_20140818T230718': 2.0,  # 113, 2.0s
        'ROS_CAM1_20140824T220434': 2.8,  # 120, 2.8s
        'ROS_CAM1_20140828T020434': 2.8,  # 137, 2.8s
        'ROS_CAM1_20140827T140434': 3.2,  # 145, 3.2s
        'ROS_CAM1_20140827T141834': 3.2,  # 150, 3.2s
        'ROS_CAM1_20140827T061834': 3.2,  # 155, 3.2s
        'ROS_CAM1_20140827T021834': 3.2,  # 157, 3.2s
        'ROS_CAM1_20140826T221834': 2.8,  # 160, 2.8s
    }

    target_exposure = np.min(list(imgs.values()))
    for img, exposure in imgs.items():
        real = cv2.imread(
            os.path.join(sm.asteroid.image_db_path, img + '_P.png'),
            cv2.IMREAD_GRAYSCALE)
        real = ImageProc.adjust_gamma(real, 1 / 1.8)
        #dark_px_lim = np.percentile(real, 0.1)
        #dark_px = np.mean(real[real<=dark_px_lim])
        real = cv2.resize(real, imgsize)
        # remove dark pixel intensity and normalize based on exposure
        #real = real - dark_px
        #real *= (target_exposure / exposure)
        imgs[img] = real

    re = RenderEngine(*imgsize, antialias_samples=0)
    obj_idx = re.load_object(sm.asteroid.hires_target_model_file, smooth=False)
    ab = AlgorithmBase(sm, re, obj_idx)

    model = RenderEngine.REFLMOD_HAPKE if hapke else RenderEngine.REFLMOD_LUNAR_LAMBERT
    defs = RenderEngine.REFLMOD_PARAMS[model]

    if hapke:
        # L, th, w, b (scattering anisotropy), c (scattering direction from forward to back), B0, hs
        #real_ini_x = [515, 16.42, 0.3057, 0.8746]
        sppf_n = 2
        real_ini_x = defs[:2] + defs[3:3 + sppf_n]
        scales = np.array((500, 20, 3e-1, 3e-1))[:2 + sppf_n]
    else:
        ll_poly = 5
        #real_ini_x = np.array(defs[:7])
        real_ini_x = np.array(
            (9.95120e-01, -6.64840e-03, 3.96267e-05, -2.16773e-06, 2.08297e-08,
             -5.48768e-11, 1))  # theta=20
        real_ini_x = np.hstack((real_ini_x[0:ll_poly + 1], (real_ini_x[-1], )))
        scales = np.array((3e-03, 2e-05, 1e-06, 1e-08, 5e-11, 1))
        scales = np.hstack((scales[0:ll_poly], (scales[-1], )))

    def set_params(x):
        if hapke:
            # optimize J, th, w, b, (c), B_SH0, hs
            xsc = list(np.array(x) * scales)
            vals = xsc[:2] + [defs[2]] + xsc[2:] + defs[len(xsc) + 1:]
        else:
            vals = [1] + list(np.array(x)[:-1] * scales[:-1]) + [0] * (
                5 - ll_poly) + [x[-1] * scales[-1], 0, 0, 0]
        RenderEngine.REFLMOD_PARAMS[model] = vals

    # debug 1: real vs synth, 2: err img, 3: both
    def costfun(x, debug=0, verbose=True):
        set_params(x)
        err = 0
        for file, real in imgs.items():
            lblloader.load_image_meta(
                os.path.join(sm.asteroid.image_db_path, file + '.LBL'), sm)
            sm.swap_values_with_real_vals()
            synth2 = ab.render(shadows=True, reflection=model, gamma=1)
            err_img = (synth2.astype('float') - real)**2
            lim = np.percentile(err_img, 99)
            err_img[err_img > lim] = 0
            err += np.mean(err_img)
            if debug:
                if debug % 2:
                    cv2.imshow(
                        'real vs synthetic',
                        np.concatenate((real.astype('uint8'), 255 * np.ones(
                            (real.shape[0], 1), dtype='uint8'), synth2),
                                       axis=1))
                if debug > 1:
                    err_img = err_img**0.2
                    cv2.imshow('err', err_img / np.max(err_img))
                cv2.waitKey()
        err /= len(imgs)
        if verbose:
            print('%s => %f' %
                  (', '.join(['%.4e' % i for i in np.array(x) * scales]), err))
        return err

    best_x = None
    best_err = float('inf')
    for i in range(iters):
        if hapke:
            ini_x = tuple(real_ini_x + init_noise *
                          np.random.normal(0, 1, (len(scales), )) * scales)
        else:
            ini_x = tuple(real_ini_x[1:-1] / real_ini_x[0] + init_noise *
                          np.random.normal(0, 1, (len(scales) - 1, )) *
                          scales[:-1]) + (real_ini_x[-1] * real_ini_x[0], )

        if verbose:
            print('\n\n\n==== i:%d ====\n' % i)
        res = minimize(
            costfun,
            tuple(ini_x / scales),
            args=(0, verbose),
            #method="BFGS", options={'maxiter': 10, 'eps': 1e-3, 'gtol': 1e-3})
            method="Nelder-Mead",
            options={
                'maxiter': 120,
                'xtol': 1e-4,
                'ftol': 1e-4
            })
        #method="COBYLA", options={'rhobeg': 1.0, 'maxiter': 200, 'disp': False, 'catol': 0.0002})
        if not verbose:
            print('%s => %f' %
                  (', '.join(['%.5e' % i
                              for i in np.array(res.x) * scales]), res.fun))

        if res.fun < best_err:
            best_err = res.fun
            best_x = res.x

    if verbose:
        costfun(best_x, 3, verbose=True)

    if hapke:
        x = tuple(best_x * scales)
    else:
        x = (1, ) + tuple(best_x * scales)
        if verbose:
            p = np.linspace(0, 160, 100)
            L = get_graph_L(20, p)
            plt.plot(p, L, p, Lfun(x[:-1], p))
            plt.show()

    return x
예제 #17
0
    def process(self, orig_sce_img, outfile, rotate_sc=False, **kwargs):
        # maybe load torch model
        if self.model is None:
            self.load_model()

        if outfile is not None:
            self.debug_filebase = outfile + ('n' if isinstance(
                orig_sce_img, str) else '')

        # maybe load scene image
        if isinstance(orig_sce_img, str):
            orig_sce_img = self.load_target_image(orig_sce_img)

        self.timer = Stopwatch()
        self.timer.start()

        if self.DEF_ESTIMATE_THRESHOLD:
            threshold = ImageProc.optimal_threshold(None, orig_sce_img)
        else:
            threshold = self.DEF_LUMINOSITY_THRESHOLD

        # detect target, get bounds
        x, y, w, h = ImageProc.single_object_bounds(
            orig_sce_img,
            threshold=threshold,
            crop_marg=self.DEF_CROP_MARGIN,
            min_px=self.DEF_MIN_PIXELS,
            debug=DEBUG)
        if x is None:
            raise PositioningException('asteroid not detected in image')

        # crop image
        img_bw = ImageProc.crop_and_zoom_image(orig_sce_img, x, y, w, h, None,
                                               (224, 224))

        # save cropped image in log archive
        if BATCH_MODE and self.debug_filebase:
            self.timer.stop()
            cv2.imwrite(self.debug_filebase + 'a.png', img_bw)
            self.timer.start()

        # massage input
        input = cv2.cvtColor(img_bw, cv2.COLOR_GRAY2BGR)
        input = Image.fromarray(input)
        input = PoseIllumiDataset.eval_transform(input)[None, :, :, :].to(
            self.device, non_blocking=True)

        # run model
        with torch.no_grad():
            output = self.model(input)

        # massage output
        output = output[0] if isinstance(output, (list, tuple)) else output
        output = output.detach().cpu().numpy()

        # check if estimated illumination direction is close or not
        ill_est = self.model.illumination(output)[0]
        r_ini, q_ini, ill_ini = self.system_model.get_cropped_system_scf(
            x, y, w, h)
        if tools.angle_between_v(
                ill_est, ill_ini) > 10:  # max 10 degree discrepancy accepted
            print(
                'bad illumination direction estimated, initial=%s, estimated=%s'
                % (ill_ini, ill_est))

        # apply result
        r_est = self.model.position(output)[0]
        q_est = np.quaternion(*self.model.rotation(output)[0])
        self.system_model.set_cropped_system_scf(x,
                                                 y,
                                                 w,
                                                 h,
                                                 r_est,
                                                 q_est,
                                                 rotate_sc=rotate_sc)
        self.timer.stop()

        if False:
            r_est2, q_est2, ill_est2 = self.system_model.get_cropped_system_scf(
                x, y, w, h)
            self.system_model.swap_values_with_real_vals()
            r_real, q_real, ill_real = self.system_model.get_cropped_system_scf(
                x, y, w, h)
            self.system_model.swap_values_with_real_vals()
            print('compare q_est vs q_est2, q_real vs q_est, q_real vs q_est2')

        # save result image
        if BATCH_MODE and self.debug_filebase:
            # save result in log archive
            res_img = self.render(textures=False)
            sce_img = cv2.resize(orig_sce_img, tuple(np.flipud(res_img.shape)))
            cv2.imwrite(self.debug_filebase + 'b.png',
                        np.concatenate((sce_img, res_img), axis=1))
            if DEBUG:
                cv2.imshow('compare', np.concatenate((sce_img, res_img),
                                                     axis=1))
                cv2.waitKey()
예제 #18
0
def texture_noise(model, support=None, L=None, noise_sd=SHAPE_MODEL_NOISE_LV['lo'],
                  len_sc=SHAPE_MODEL_NOISE_LEN_SC, max_rng=None, max_n=1e4, hf_noise=True):
    tex = model.load_texture()
    if tex is None:
        print('tools.texture_noise: no texture loaded')
        return [None] * 3

    r = np.sqrt(max_n / np.prod(tex.shape[:2]))
    ny, nx = (np.array(tex.shape[:2]) * r).astype(np.int)
    n = nx * ny
    tx_grid_xx, tx_grid_yy = np.meshgrid(np.linspace(0, 1, nx), np.linspace(0, 1, ny))
    tx_grid = np.hstack((tx_grid_xx.reshape((-1, 1)), tx_grid_yy.reshape((-1, 1))))

    support = support if support else model
    points = np.array(support.vertices)
    max_rng = np.max(np.ptp(points, axis=0)) if max_rng is None else max_rng

    # use vertices for distances, find corresponding vertex for each pixel
    y_cov = None
    if L is None:
        try:
            from sklearn.gaussian_process.kernels import Matern, WhiteKernel
        except:
            print('Requires scikit-learn, install using "conda install scikit-learn"')
            sys.exit()

        kernel = 1.0 * noise_sd * Matern(length_scale=len_sc * max_rng, nu=1.5) \
                 + 0.5 * noise_sd * Matern(length_scale=0.1 * len_sc * max_rng, nu=1.5) \
                 + WhiteKernel(
            noise_level=1e-5 * noise_sd * max_rng)  # white noise for positive definite covariance matrix only

        # texture coordinates given so that x points left and *Y POINTS UP*
        tex_img_coords = np.array(support.texcoords)
        tex_img_coords[:, 1] = 1 - tex_img_coords[:, 1]
        _, idxs = find_nearest_each(haystack=tex_img_coords, needles=tx_grid)
        tx2vx = support.texture_to_vertex_map()
        y_cov = kernel(points[tx2vx[idxs], :] - np.mean(points, axis=0))

        if 0:
            # for debugging distances
            import matplotlib.pyplot as plt
            import cv2
            from visnav.algo.image import ImageProc

            orig_tx = cv2.imread(os.path.join(DATA_DIR, '67p+tex.png'), cv2.IMREAD_GRAYSCALE)
            gx, gy = np.gradient(points[tx2vx[idxs], :].reshape((ny, nx, 3)), axis=(1, 0))
            gxy = np.linalg.norm(gx, axis=2) + np.linalg.norm(gy, axis=2)
            gxy = (gxy - np.min(gxy)) / (np.max(gxy) - np.min(gxy))
            grad_img = cv2.resize((gxy * 255).astype('uint8'), orig_tx.shape)
            overlaid = ImageProc.merge((orig_tx, grad_img))

            plt.figure(1)
            plt.imshow(overlaid)
            plt.show()

    # sample gp
    e0, L = mv_normal(np.zeros(n), cov=y_cov, L=L)
    e0 = e0.reshape((ny, nx))

    # interpolate for final texture
    x = np.linspace(np.min(tx_grid_xx), np.max(tx_grid_xx), tex.shape[1])
    y = np.linspace(np.min(tx_grid_yy), np.max(tx_grid_yy), tex.shape[0])
    interp0 = RectBivariateSpline(tx_grid_xx[0, :], tx_grid_yy[:, 0], e0, kx=1, ky=1)
    err0 = interp0(x, y)

    if 0:
        import matplotlib.pyplot as plt
        import cv2
        from visnav.algo.image import ImageProc
        orig_tx = cv2.imread(os.path.join(DATA_DIR, '67p+tex.png'), cv2.IMREAD_GRAYSCALE)
        err_ = err0 if 1 else e0
        eimg = (err_ - np.min(err_)) / (np.max(err_) - np.min(err_))
        eimg = cv2.resize((eimg * 255).astype('uint8'), orig_tx.shape)
        overlaid = ImageProc.merge((orig_tx, eimg))
        plt.figure(1)
        plt.imshow(overlaid)
        plt.show()

    err1 = 0
    if hf_noise:
        e1, L = mv_normal(np.zeros(n), L=L)
        e1 = e1.reshape((ny, nx))
        interp1 = RectBivariateSpline(tx_grid_xx[0, :], tx_grid_yy[:, 0], e1, kx=1, ky=1)
        err_coef = interp1(x, y)
        lo, hi = np.min(err_coef), np.max(err_coef)
        err_coef = (err_coef - lo) / (hi - lo)

        len_sc = 10
        err1 = generate_field_fft(tex.shape, (6 * noise_sd, 4 * noise_sd),
                                  (len_sc / 1000, len_sc / 4500)) if hf_noise else 0
        err1 *= err_coef

    noisy_tex = tex + err0 + err1

    min_v, max_v = np.quantile(noisy_tex, (0.0001, 0.9999))
    min_v = min(0, min_v)
    noisy_tex = (np.clip(noisy_tex, min_v, max_v) - min_v) / (max_v - min_v)

    if 0:
        import matplotlib.pyplot as plt
        plt.figure(1)
        plt.imshow(noisy_tex)
        plt.figure(2)
        plt.imshow(err0)
        plt.figure(3)
        plt.imshow(err1)
        plt.show()

    return noisy_tex, np.std(err0 + err1), L
예제 #19
0
def analyze_aurora_img(img_file, get_bgr_cam):
    debug = 1
    n = 0
    Frame.MISSING_BG_REMOVE_STRIPES = 0

    bgr_cam = get_bgr_cam(thumbnail=False, estimated=1, final=1)
    f = Frame.from_file(bgr_cam, img_file, img_file[:-4]+'.lbl', bg_offset=False, debug=debug)

    if 0:
        f.show_image(processed=True, save_as='C:/projects/s100imgs/processed-aurora.png')

    img = f.image.astype('float')

    if 0:
        img = img - np.percentile(img, 5, axis=1).reshape((-1, 1, 3))

    bg1 = (830, 1070), (1180, 1400)
#    bg1 = (0, 900), (660, 1280)
#    bg2 = (1560, 1050), (2048, 1350)
    mean_bg = np.mean(img[bg1[0][1]:bg1[1][1], bg1[0][0]:bg1[1][0], :].reshape((-1, 3)), axis=0)
#    mean_bg = np.mean(np.vstack((img[bg1[0][1]:bg1[1][1], bg1[0][0]:bg1[1][0], :].reshape((-1, 3)),
#                                 img[bg2[0][1]:bg2[1][1], bg2[0][0]:bg2[1][0], :].reshape((-1, 3)))), axis=0)
    img = img - mean_bg
    # img = ImageProc.apply_point_spread_fn(img - mean_bg, 0.01)
    # img = np.clip(img, 0, 1023).astype('uint16')
    # img = cv2.medianBlur(img, 31)
    # img = np.clip(img, 0, RAW_IMG_MAX_VALUE) / RAW_IMG_MAX_VALUE

    n += 1
    plt.figure(n)
    imsh = (np.clip(img * 2 + RAW_IMG_MAX_VALUE * 0.3, 0, RAW_IMG_MAX_VALUE) / RAW_IMG_MAX_VALUE * 255).astype('uint8')
    rd_y = (720, 700), (890, 720)
    rd_r1 = (720, 830), (890, 870)
    rd_r2 = (1080, 820), (1250, 860)
    gr_r = (1280, 770), (1450, 795)
    cv2.rectangle(imsh, bg1[0], bg1[1], (255, 0, 0), 2)       # bg1
#    cv2.rectangle(imsh, bg2[0], bg2[1], (255, 0, 0), 2)       # bg2
    cv2.rectangle(imsh, rd_y[0], rd_y[1], (0, 200, 200), 2)       # yellow
    cv2.rectangle(imsh, rd_r1[0], rd_r1[1], (0, 0, 255), 2)     # red1
    cv2.rectangle(imsh, rd_r2[0], rd_r2[1], (0, 0, 255), 2)     # red2
    cv2.rectangle(imsh, gr_r[0], gr_r[1], (0, 255, 0), 2)     # green
    plt.imshow(np.flip(imsh, axis=2))
    plt.show()

    def electrons(lam):
        h = 6.626e-34  # planck constant (m2kg/s)
        c = 3e8  # speed of light
        return h * c / lam  # energy per photon

    blue = 427.8e-9
    green = 557.7e-9
    yellow = 589.3e-9
    red = 630.0e-9
    colors = (blue, green, yellow, red)
    dus_per_rad = dict(zip(colors, ([], [], [], [])))   # DUs per 1 W/m2/sr of radiance
    coef = f.exposure * f.gain * RAW_IMG_MAX_VALUE

    for wl in dus_per_rad.keys():
        for cam in bgr_cam:
            cgain = cam.gain * cam.emp_coef * cam.aperture_area
            fn, _ = Camera.qeff_fn(tuple(cam.qeff_coefs), 350e-9, 1000e-9)

            # W/m2/sr => phot/s/m2/sr => elec/s/m2/sr => DUs/sr
            dus_per_rad[wl].append(1/electrons(wl) * fn(wl) * coef * cgain)
    for wl in dus_per_rad.keys():
        dus_per_rad[wl] = np.array(dus_per_rad[wl])

    class Patch:
        def __init__(self, name, rect, bands, mean=None, rad=None):
            self.name, self.rect, self.bands, self.mean, self.rad = name, rect, bands, mean, rad
    nt = lambda n, r, b: Patch(name=n, rect=r, bands=b)

    patches = [
        nt('Clean Red', rd_r1, (blue, green, red)),
        nt('Strong Red', rd_r2, (blue, green, red)),
        nt('Green', gr_r, (blue, green, red)),
        nt('Sodium', rd_y, (blue, green, yellow)),
    ]

    # pseudo inverse
    for p in patches:
        p.mean = np.mean(img[p.rect[0][1]:p.rect[1][1], p.rect[0][0]:p.rect[1][0], :].reshape((-1, 3)), axis=0)
        px_sr = cam.pixel_solid_angle((p.rect[0][0]+p.rect[1][0])//2, (p.rect[0][1]+p.rect[1][1])//2)
        E = np.hstack((dus_per_rad[p.bands[0]], dus_per_rad[p.bands[1]], dus_per_rad[p.bands[2]])) * px_sr
        invE = np.linalg.inv(E.T.dot(E)).dot(E.T)
        rad = invE.dot(p.mean)    # radiance in W/m2/sr
        # e = E.dot(rad)
        # diff = (p.mean - e) * 100 / np.linalg.norm(p.mean)
        p.rad = [''] * len(colors)
        for i, b in enumerate(p.bands):
            idx = colors.index(b)
            p.rad[idx] = rad[i]

    sep = '\t' if 1 else ' & '
    le = '\n' if 1 else ' \\\\\n'
    if 0:
        print(sep.join(('Patch', 'Emission at', '', '', 'Red', 'Green', 'Blue')), end=le)
        for name, irr, mean, model, diff in patches:
            print(sep.join((name, '428 nm', ('%.3e' % irr[0]) if irr[0] else 'n/a', 'Mean',
                            *('%.1f' % m for m in np.flip(mean.flatten())))), end=le)
            print(sep.join(('', '557.7 nm', ('%.3e' % irr[1]) if irr[1] else 'n/a', 'Modeled',
                            *('%.1f' % m for m in np.flip(model.flatten())))), end=le)
            print(sep.join(('', '589 nm', ('%.3e' % irr[2]) if irr[2] else 'n/a', 'Diff. [%]',
                            *('%.1f' % m for m in np.flip(diff.flatten())))), end=le)
            print(sep.join(('', '630 nm', ('%.3e' % irr[3]) if irr[3] else 'n/a', *(['']*4))), end=le)
    else:
        print(sep.join(('Patch', 'Red', 'Green', 'Blue', '428 nm', '557.7 nm', '589 nm', '630 nm')), end=le)
        for p in patches:
            # in kilo Rayleigh (kR) == 6330*1e9*lambda * W/m2/sr    or  4*pi*10^(-10)*10^(-3) * photon flux
            print(sep.join((p.name, *('%.1f' % m for m in np.flip(p.mean.flatten())),
                                    *(tools.fixed_precision(r*4*np.pi*1e-13/electrons(colors[i]), 3, True) if r else ''  # r*1e-13*4*np.pi
                                      for i, r in enumerate(p.rad)))), end=le)
    quit()

    aurora = np.zeros_like(img)
    for i, color in enumerate(colors):
        # d/dx[(r-aw)'*(r-aw)] == 0
        #  => w == (r'*a)/(a'*a)
        a = emission[color]
        w = np.sum(img.reshape((-1, 3)) * a.T, axis=1) / sum(a**2)
        e = (w*a).T
        r = img.reshape((-1, 3)) - e
        x = w / np.linalg.norm(r, axis=1)

        # plt.figure(2)
        # plt.imshow(w.reshape(img.shape[:2])/np.max(w))
        # plt.title('weight (max=%f)' % np.max(w))

        # plt.figure(3)
        # plt.imshow(x.reshape(img.shape[:2])/np.max(x))
        # plt.title('x (max=%f)' % np.max(x))

        n += 1
        plt.figure(n)
        x[x < {red: 10, green: 6, yellow: 16}[color]] = 0
        x[w < 100] = 0
        xf = ImageProc.apply_point_spread_fn(x.reshape(img.shape[:2]), 0.03)
        xf = cv2.medianBlur(xf.astype('uint16'), 11)
        plt.imshow(xf / np.max(xf))
        plt.title('Emission detection @ %.1fnm' % (color * 1e9))

        e[xf.flatten() == 0, :] = (0, 0, 0)
        aurora += e.reshape(img.shape)

        # plt.figure(6)
        # plt.imshow(np.flip(e.reshape(img.shape) / np.max(e), axis=2))
        # plt.title('modeled aurora')

        # plt.figure(7)
        # plt.imshow(np.flip(r.reshape(img.shape)/np.max(r), axis=2))
        # plt.title('residual')
        # plt.show()

    plt.figure(8)
    plt.imshow(np.flip(aurora / np.max(aurora), axis=2))
    plt.title('modeled aurora')
    plt.show()

    # TODO: translate rgb values to aurora (ir)radiance
    #  - following uses W/m2/sr for "in-band radiance"
    #  - https://www.osapublishing.org/DirectPDFAccess/A2F3D832-975A-1850-088634AAFCF21258_186134/ETOP-2009-ESB4.pdf?da=1&id=186134&uri=ETOP-2009-ESB4&seq=0&mobile=no
    #  - use pixel sr?

    print('done')
예제 #20
0
    def render(self,
               obj_idxs,
               rel_pos_v,
               rel_rot_q,
               light_v,
               get_depth=False,
               shadows=True,
               textures=True,
               gamma=1.0,
               reflection=REFLMOD_LUNAR_LAMBERT,
               flux_density=False):

        obj_idxs = [obj_idxs] if isinstance(obj_idxs, int) else obj_idxs
        rel_pos_v = np.array(rel_pos_v).reshape((-1, 3))
        rel_rot_q = np.array(rel_rot_q).reshape((-1, ))
        light_v = np.array(light_v)
        assert len(obj_idxs) == rel_pos_v.shape[0] == rel_rot_q.shape[
            0], 'obj_idxs, rel_pos_v and rel_rot_q dimensions dont match'

        shadow_mvps = None
        if shadows:
            shadow_mvps = self._render_shadowmap(obj_idxs, rel_pos_v,
                                                 rel_rot_q, light_v)

        self._fbo.use()
        self._ctx.enable(moderngl.DEPTH_TEST)
        self._ctx.enable(moderngl.CULL_FACE)
        self._ctx.front_face = 'ccw'  # cull back faces
        self._ctx.clear(0, 0, 0, 0, float('inf'))
        if shadows:
            self._shadow_map.use(RenderEngine._LOC_SHADOW_MAP)
            self._prog['shadow_map'].value = RenderEngine._LOC_SHADOW_MAP

        for i, obj_idx in enumerate(obj_idxs):
            self._set_params(obj_idx,
                             rel_pos_v[i],
                             rel_rot_q[i],
                             light_v,
                             shadow_mvps,
                             textures,
                             reflection,
                             prog=self._prog,
                             flux_density=flux_density)
            self._objs[obj_idx].render()

        if self._samples > 0:
            self._ctx.copy_framebuffer(self._fbo2, self._fbo)
            fbo = self._fbo2
            dbo = self._dbo2
        else:
            fbo = self._fbo
            dbo = self._dbo

        data = np.frombuffer(fbo.read(components=1, alignment=1, dtype='f4'),
                             dtype='f4').reshape((self._height, self._width))
        data = np.flipud(data)

        if get_depth:
            depth = np.frombuffer(dbo.read(alignment=1), dtype='f4').reshape(
                (self._height, self._width))
            depth = np.flipud(depth)

            # normalize depth
            if self._persp_proj:
                # for perspective projection
                a = -(self._frustum_far - self._frustum_near) / (
                    2.0 * self._frustum_far * self._frustum_near)
                b = (self._frustum_far + self._frustum_near) / (
                    2.0 * self._frustum_far * self._frustum_near)
                if self._frustum_far / self._frustum_near < 1e7:
                    depth = np.divide(1.0, (2.0 * a) * depth -
                                      (a - b))  # 1/((2*X-1)*a+b)
                else:
                    # up to difference of 1e14
                    depth = np.divide(1.0,
                                      (2.0 * a) * depth.astype(np.float64) -
                                      (a - b)).astype(np.float32)
            else:
                # for orthographic projection
                #  - depth is between 0 and 1
                depth = depth * (self._frustum_far -
                                 self._frustum_near) + self._frustum_near

        # free memory to avoid memory leaks
        if shadows:
            self._shadow_map.release()

        if flux_density:
            data = data * flux_density
        else:
            data = np.clip(data * 255, 0, 255).astype('uint8')
            if gamma != 1.0:
                data = ImageProc.adjust_gamma(data, gamma)

        return (data, depth) if get_depth else data
예제 #21
0
def export(sm, dst_path, src_path=None, src_imgs=None, trg_shape=(224, 224), crop=False, debug=False,
           img_prefix="", title=""):

    trg_w, trg_h = trg_shape
    assert (src_path is not None) + (src_imgs is not None) == 1, 'give either src_path or src_imgs, not both'

    if debug:
        renderer = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=0)
        obj_idx = renderer.load_object(sm.asteroid.target_model_file,
                                       smooth=sm.asteroid.render_smooth_faces)
        algo = AlgorithmBase(sm, renderer, obj_idx)

    metadatafile = os.path.join(dst_path, 'dataset_all.txt')
    if not os.path.exists(metadatafile):
        with open(metadatafile, 'w') as f:
            f.write('\n'.join(['%s, camera centric coordinate frame used' % title,
                               'Image ID, ImageFile, Target Pose [X Y Z W P Q R], Sun Vector [X Y Z]', '', '']))

    files = list(os.listdir(src_path)) if src_imgs is None else src_imgs
    files = sorted(files)

    id = 0
    for i, fn in enumerate(files):
        if src_imgs is not None or re.search(r'(?<!far_)\d{4}\.png$', fn):
            c = 2 if src_imgs is None else 1
            tools.show_progress(len(files)//c, i//c)
            id += 1

            # read system state, write out as relative to s/c
            fname = os.path.basename(fn)
            if src_imgs is None:
                fn = os.path.join(src_path, fn)
            lbl_fn = re.sub(r'_%s(\d{4})' % img_prefix, r'_\1', fn[:-4]) + '.lbl'

            sm.load_state(lbl_fn)
            sm.swap_values_with_real_vals()

            if not crop:
                shutil.copy2(fn, os.path.join(dst_path, fname))
                if os.path.exists(fn[:-4] + '.d.exr'):
                    shutil.copy2(fn[:-4] + '.d.exr', os.path.join(dst_path, fname[:-4] + '.d.exr'))
                if os.path.exists(fn[:-4] + '.xyz.exr'):
                    shutil.copy2(fn[:-4] + '.xyz.exr', os.path.join(dst_path, fname[:-4] + '.xyz.exr'))
                if os.path.exists(fn[:-4] + '.s.exr'):
                    shutil.copy2(fn[:-4] + '.s.exr', os.path.join(dst_path, fname[:-4] + '.s.exr'))
                _write_metadata(metadatafile, id, fname, sm.get_system_scf())
                continue

            from visnav.algo.absnet import AbsoluteNavigationNN

            # read image, detect box, resize, adjust relative pose
            img = cv2.imread(fn, cv2.IMREAD_GRAYSCALE)
            assert img is not None, 'image file %s not found' % fn

            # detect target, get bounds
            x, y, w, h = ImageProc.single_object_bounds(img, threshold=AbsoluteNavigationNN.DEF_LUMINOSITY_THRESHOLD,
                                                        crop_marg=AbsoluteNavigationNN.DEF_CROP_MARGIN,
                                                        min_px=AbsoluteNavigationNN.DEF_MIN_PIXELS, debug=debug)
            if x is None:
                continue

            # write image metadata
            system_scf = sm.get_cropped_system_scf(x, y, w, h)
            _write_metadata(metadatafile, id, fname, system_scf)

            others, (depth, coords, px_size), k = [], [False] * 3, 1
            if os.path.exists(fn[:-4] + '.d.exr'):
                depth = True
                others.append(cv2.imread(fn[:-4] + '.d.exr', cv2.IMREAD_UNCHANGED))
            if os.path.exists(fn[:-4] + '.xyz.exr'):
                coords = True
                others.append(cv2.imread(fn[:-4] + '.xyz.exr', cv2.IMREAD_UNCHANGED))
            if os.path.exists(fn[:-4] + '.s.exr'):
                px_size = True
                others.append(cv2.imread(fn[:-4] + '.s.exr', cv2.IMREAD_UNCHANGED))

            # crop & resize image, write it
            cropped = ImageProc.crop_and_zoom_image(img, x, y, w, h, None, (trg_w, trg_h), others=others)

            cv2.imwrite(os.path.join(dst_path, fname), cropped[0], [cv2.IMWRITE_PNG_COMPRESSION, 9])
            if depth:
                cv2.imwrite(os.path.join(dst_path, fname[:-4] + '.d.exr'), cropped[k],
                            (cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT))
                k += 1
            if coords:
                cv2.imwrite(os.path.join(dst_path, fname[:-4] + '.xyz.exr'), cropped[k],
                            (cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT))
                k += 1
            if px_size:
                cv2.imwrite(os.path.join(dst_path, fname[:-4] + '.s.exr'), cropped[k],
                            (cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT))

            if debug:
                sc, dq = sm.cropped_system_tf(x, y, w, h)

                sm.spacecraft_pos = tools.q_times_v(SystemModel.sc2gl_q.conj(), sc_ast_lf_r)
                sm.rotate_spacecraft(dq)
                #sm.set_cropped_system_scf(x, y, w, h, sc_ast_lf_r, sc_ast_lf_q)

                if False:
                    sm.load_state(lbl_fn)
                    sm.swap_values_with_real_vals()
                    imgd = cv2.resize(img, (trg_h, trg_w))

                imge = algo.render(center=False, depth=False, shadows=True)
                h, w = imge.shape
                imge = cv2.resize(imge[:, (w - h)//2:(w - h)//2+h], cropped[0].shape)
                cv2.imshow('equal?', np.hstack((
                    cropped[0],
                    np.ones((cropped[0].shape[0], 1), dtype=cropped[0].dtype) * 255,
                    imge,
                )))
                cv2.waitKey()

                if i > 60:
                    quit()
예제 #22
0
 def costfn(p, x, y):
     gamma, gamma_break, max_val, scale = tuple(map(abs, p))
     diff = ImageProc.adjust_gamma(x, gamma, gamma_break,
                                   max_val=max_val) * scale - y
     diff = tools.pseudo_huber_loss(120, diff)
     return np.sum(diff) if _USE_BFGS else diff
예제 #23
0
    def __init__(self,
                 cam,
                 gain,
                 exposure,
                 timestamp,
                 raw_image,
                 background_img,
                 bg_offset=0,
                 bits=8,
                 applied_gamma=1.0,
                 applied_gamma_break=0.0,
                 applied_bgr_mx=None,
                 debug=False):
        self.id = Frame.CURRENT_ID
        Frame.CURRENT_ID += 1

        self.cam = [cam] if isinstance(cam, Camera) else cam
        self.resize_scale = raw_image.shape[1] / self.cam[0].width
        for c in self.cam:
            c.height, c.width = raw_image.shape[:2]
        self.bits = bits = int(bits)
        self.gain = gain
        self.exposure = exposure
        self.timestamp = timestamp
        self.raw_image = raw_image
        self.applied_gamma = applied_gamma
        self.applied_gamma_break = applied_gamma_break
        self.applied_bgr_mx = applied_bgr_mx
        self.debug = debug

        img_bits = int(str(raw_image.dtype)[4:])
        max_val = 2**img_bits - 1
        img = raw_image.astype('float')

        # NOTE: NanoCam has this, doesnt make sense in general!
        operation_order = reversed((
            'ex_gamma',
            'depth',
            'color',
            'gamma',
        ))

        for op in operation_order:
            if op == 'depth' and img_bits != bits:
                img = ImageProc.change_color_depth(img, img_bits, bits)
                max_val = 2**bits - 1
            if op == 'gamma' and applied_gamma != 1.0:
                img = ImageProc.adjust_gamma(img,
                                             applied_gamma,
                                             gamma_break=applied_gamma_break,
                                             inverse=True,
                                             max_val=max_val)
            if op == 'color' and applied_bgr_mx is not None:
                img = ImageProc.color_correct(img,
                                              applied_bgr_mx,
                                              inverse=True,
                                              max_val=max_val)
            # if op == 'ex_gamma' and GAMMA_ADJUSTMENT:
            #     img = ImageProc.adjust_gamma(img, GAMMA_ADJUSTMENT, inverse=True, max_val=max_val)

        self.background_img = background_img
        if background_img is not None:
            self.image = ImageProc.remove_bg(img,
                                             background_img,
                                             gain=1,
                                             offset=bg_offset,
                                             max_val=max_val)
        elif self.MISSING_BG_REMOVE_STRIPES:
            for k in range(img.shape[2]):
                img[:, :, k] -= np.percentile(img[:, :, k], 50,
                                              axis=0).reshape((1, -1))
                img[:, :, k] -= np.percentile(img[:, :, k], 50,
                                              axis=1).reshape((-1, 1))
            img += bg_offset - np.min(img)
            self.image = np.clip(img, 0, max_val)
        else:
            self.image = img

        if bg_offset is not False:
            self.image = np.round(self.image).astype('uint16')

        self.measures = []
예제 #24
0
파일: stars.py 프로젝트: oknuutti/visnav-py
            plt.show()

        for hd in (
                48737, 35468, 39801
        ):  # Lambda Orionis (HD36861) Teff too high for model (37689K)
            fname = r'C:\projects\s100imgs\spectra\%s.fits' % hd
            fdat = fits.getdata(fname)
            teff, logg, feh = [stars[hd][f] for f in (f_teff, f_logg, f_feh)]
            if teff > 30000:
                logg = max(logg, 4.0)
            testf(fdat, teff, logg, feh or 0)

        quit()

#    cam = RosettaSystemModel(focused_attenuated=False).cam
    cam = DidymosSystemModel(use_narrow_cam=True).cam

    #    cam_q = tools.rand_q(math.radians(180))
    cam_q = quaternion.one
    for i in range(100):
        cam_q = tools.ypr_to_q(0, np.radians(1), 0) * cam_q
        flux_density = Stars.flux_density(cam_q, cam)
        img = cam.sense(flux_density, exposure=2, gain=2)

        img = np.clip(img * 255, 0, 255).astype('uint8')
        img = ImageProc.adjust_gamma(img, 1.8)

        sc = min(768 / cam.width, 768 / cam.height)
        cv2.imshow('stars', cv2.resize(img, None, fx=sc, fy=sc))
        cv2.waitKey()
    print('done')
예제 #25
0
파일: lab.py 프로젝트: oknuutti/visnav-py
    def detect_source(self, kernel, total_radiation=False):
        assert kernel.shape[0] % 2 and kernel.shape[
            1] % 2, 'kernel width and height must be odd numbers'
        kernel = self.gain * self.exposure * kernel
        fkernel = ImageProc.fuzzy_kernel(kernel, self.impulse_spread)
        method = [cv2.TM_CCORR_NORMED, cv2.TM_SQDIFF][0]
        corr = cv2.matchTemplate(self.image.astype(np.float32),
                                 fkernel.astype(np.float32), method)
        _, _, minloc, maxloc = cv2.minMaxLoc(
            corr)  # minval, maxval, minloc, maxloc
        loc = minloc if method in (cv2.TM_SQDIFF,
                                   cv2.TM_SQDIFF_NORMED) else maxloc
        loc_i = tuple(np.round(np.array(loc)).astype(np.int))

        if SHOW_LAB_MEASURES:
            sc = 1024 / self.image.shape[1]
            img = cv2.resize(self.image.astype(np.float), None, fx=sc,
                             fy=sc) / np.max(self.image)
            center = np.array(loc) + np.flip(fkernel.shape[:2]) / 2
            img = cv2.circle(
                img, tuple(np.round(center * sc).astype(np.int)),
                round((kernel.shape[0] - self.impulse_spread) / 2 * sc),
                [0, 0, 1.0])
            cv2.imshow('te', img)
            print('waiting...', end='', flush=True)
            cv2.waitKey()
            print('done')

        if True:
            # use result as a mask to calculate mean and variance
            kh, kw = np.array(fkernel.shape[:2]) // 2
            win = self.image[loc_i[1]:loc_i[1] + kh * 2 + 1,
                             loc_i[0]:loc_i[0] + kw * 2 + 1, :].reshape(
                                 (-1, 3))
            kernel_max = np.max(np.sum(kernel, axis=2))
            mask = np.sum(fkernel, axis=2) / kernel_max > 0.95
            mean = np.median(win[mask.flatten(), :], axis=0)
            std = np.std(win[mask.flatten(), :], axis=0)
            n = np.sum(mask)

            tmp = np.zeros(self.image.shape[:2])
            tmp[loc_i[1]:loc_i[1] + kh * 2 + 1,
                loc_i[0]:loc_i[0] + kw * 2 + 1] = mask
            img_m = tmp

        else:
            # calculate a correlation channel (over whole image)
            k = kernel.shape[0] // 2
            corr = cv2.matchTemplate(
                self.image.astype(np.float32),
                kernel[k:k + 1, k:k + 1, :].astype(np.float32), method)

            # calculate mean & variance of kernel area using corr channel
            win = corr[loc_i[1] - k:loc_i[1] + k + 1,
                       loc_i[0] - k:loc_i[0] + k + 1]
            corr_mean = np.mean(win)
            corr_std = np.std(win)

            # threshold using mean - sd
            _, mask = cv2.threshold(corr, corr_mean - corr_std, 1,
                                    cv2.THRESH_BINARY)

            # dilate & erode to remove inner spots
            krn1 = ImageProc.bsphkern(round(1.5 * corr.shape[0] / 512) * 2 + 1)
            krn2 = ImageProc.bsphkern(round(2 * corr.shape[0] / 512) * 2 + 1)
            mask = cv2.dilate(mask, krn1, iterations=1)  # remove holes
            mask = cv2.erode(mask, krn2, iterations=1)  # same size
            mask = mask.astype(np.bool)

            # use result as a mask to calculate mean and variance
            mean = np.mean(self.image.reshape((-1, 3))[mask.flatten()], axis=0)
            var = np.var(self.image.reshape((-1, 3))[mask.flatten()], axis=0)
            n = np.sum(mask)

        if self.debug:
            sc = 1024 / self.image.shape[1]
            img_m = np.repeat(np.atleast_3d(img_m.astype(np.uint8) * 127),
                              3,
                              axis=2)
            merged = ImageProc.merge(
                (self.image.astype(np.float32) / np.max(self.image),
                 img_m.astype(np.float32) / 255))
            img = cv2.resize(merged, None, fx=sc, fy=sc)
            cv2.imshow('te', img)
            arr_n, lims, _ = plt.hist(self.image[:, :, 1].flatten(),
                                      bins=np.max(self.image) + 1,
                                      log=True,
                                      histtype='step')
            plt.hist(win[mask.flatten(), 1].flatten(),
                     bins=np.max(self.image) + 1,
                     log=True,
                     histtype='step')
            x = np.linspace(0, np.max(win), np.max(win) + 1)
            i = list(np.logical_and(lims[1:] > mean[1], arr_n > 0)).index(True)
            plt.plot(x, arr_n[i] * np.exp(-((x - mean[1]) / std[1])**2))
            plt.ylim(1e-1, 1e6)
            plt.figure()
            plt.imshow(self.image[loc_i[1]:loc_i[1] + kh * 2 + 1,
                                  loc_i[0]:loc_i[0] + kw * 2 + 1, :] /
                       np.max(self.image))
            print('waiting (%.1f, %.1f)...' % (mean[1], std[1]),
                  end='',
                  flush=True)
            cv2.waitKey(1)
            plt.show()
            print('done')

        return LabMeasure(self, mean, std, n)
예제 #26
0
    writer = cv2.VideoWriter(target_file, cv2.VideoWriter_fourcc(*codecs[0]),
                             framerate, (dw, dh))
    imgs = []
    times = []
    try:
        for i, f in enumerate(img_files):
            if i % skip_mult == 0:
                tools.show_progress(
                    len(img_files) // skip_mult, i // skip_mult)
                img = cv2.imread(os.path.join(folder, f), cv2.IMREAD_COLOR)
                if sw != dw or sh != dh:
                    img = cv2.resize(img, (dw, dh),
                                     interpolation=cv2.INTER_AREA)
                if exposure:
                    # blend images to simulate blur due to long exposure times
                    timestr = f[0:17]
                    time = datetime.datetime.strptime(timestr,
                                                      '%Y-%m-%dT%H%M%S')
                    imgs.append(img)
                    times.append(time)
                    idxs = np.where(
                        np.array(times) > time -
                        datetime.timedelta(seconds=exposure))
                    if len(idxs) < np.ceil(exposure):
                        continue
                    img = ImageProc.merge(np.array(imgs)[idxs])

                writer.write(img)
    finally:
        writer.release()
예제 #27
0
    real = cv2.imread(os.path.join(sm.asteroid.image_db_path, img + '_P.png'),
                      cv2.IMREAD_GRAYSCALE)

    if 1:
        img, dist = ab.render(shadows=True,
                              textures=textures,
                              reflection=model,
                              depth=True)
        base_loc = np.array(sm.spacecraft_pos) + np.array([-0.1, 1.1, 0])

        if 1:
            img = ImageProc.add_jets(
                sm.cam,
                img,
                dist < 1000, (base_loc, base_loc, base_loc), (0.6, 0.4, 0.8),
                phase_angles=(math.radians(60), None, None),
                directions=(math.radians(120), None, None),
                intensities=(0.35, 0.2, 0.3),
                angular_radii=(np.pi / 30, np.pi / 20, np.pi / 40),
                down_scaling=6)
        if 1:
            img = ImageProc.add_haze(img, dist < 1000, 0.15)

        if 1:
            sc = min(1536 / img.shape[1], 1024 / img.shape[0])
            cv2.imshow('jets', cv2.resize(img, None, fx=sc, fy=sc))
            cv2.waitKey()
        else:
            import matplotlib.pyplot as plt
            plt.imshow(img)
            plt.show()
예제 #28
0
 def remove_background(self, img):
     res_img, h, th = ImageProc.process_target_image(img)
     return res_img, th
예제 #29
0
    def solve_pnp(self,
                  orig_sce_img,
                  outfile,
                  feat=ORB,
                  use_feature_db=False,
                  adjust_sc_rot=False,
                  add_noise=False,
                  scale_cam_img=False,
                  vary_scale=False,
                  match_mask_params=None,
                  verbose=1,
                  **kwargs):

        # set max mem usable by reference features, scene features use rest of MAX_WORK_MEM
        ref_max_mem = KeypointAlgo.FDB_MAX_MEM if use_feature_db else KeypointAlgo.MAX_WORK_MEM / 2
        sm = self.system_model
        self._ransac_err = KeypointAlgo.DEF_RANSAC_ERROR
        self._render_z = -sm.min_med_distance
        init_z = kwargs.get('init_z', self._render_z)
        ref_img_sc = min(
            1, self._render_z / init_z) * (sm.view_width if scale_cam_img else
                                           self._cam.width) / sm.view_width
        self.extra_values = None

        if outfile is not None:
            self.debug_filebase = outfile + (self.DEBUG_IMG_POSTFIX
                                             if isinstance(orig_sce_img, str)
                                             else '')

        if self.est_real_ast_orient:
            # so that can track rotation of 67P
            sm.reset_to_real_vals()

        if use_feature_db and self._fdb_helper is None:
            from visnav.algo.fdbgen import FeatureDatabaseGenerator
            self._fdb_helper = FeatureDatabaseGenerator(
                self.system_model, self.render_engine, self.obj_idx)

        # maybe load scene image
        if isinstance(orig_sce_img, str):
            orig_sce_img = self.load_target_image(orig_sce_img)

        if add_noise:
            self._shape_model_rng = np.max(
                np.ptp(sm.asteroid.real_shape_model.vertices, axis=0))

        self.timer = Stopwatch()
        self.timer.start()

        if use_feature_db:
            if KeypointAlgo.FDB_REAL:
                # find correct set of keypoints & descriptors from features db
                ref_desc, ref_kp_3d, ref_kp, ref_img = self._query_fdb(feat)
            else:
                # calculate on-the-fly exactly the same features that would be returned from a feature db
                ref_desc, ref_kp_3d, ref_kp, ref_img = self._fake_fdb(feat)
        else:
            # render model image
            ref_img, depth_result = self.render_ref_img(ref_img_sc)

            if False:
                # normalize ref_img to match sce_img
                ref_img = ImageProc.equalize_brightness(ref_img,
                                                        orig_sce_img,
                                                        percentile=99.999,
                                                        image_gamma=1.8)

            if False:
                gamma = 1.0 / 1.8
                ref_img = ImageProc.adjust_gamma(ref_img, gamma)
                orig_sce_img = ImageProc.adjust_gamma(orig_sce_img, gamma)

            # get keypoints and descriptors
            ee = sm.pixel_extent(abs(self._render_z))
            ref_kp, ref_desc, self._latest_detector = KeypointAlgo.detect_features(
                ref_img,
                feat,
                maxmem=ref_max_mem,
                max_feats=KeypointAlgo.MAX_FEATURES,
                for_ref=True,
                expected_pixel_extent=ee)

        if BATCH_MODE and self.debug_filebase:
            # save start situation in log archive
            self.timer.stop()
            img1 = cv2.resize(orig_sce_img, (sm.view_width, sm.view_height))
            img2 = cv2.resize(ref_img, (sm.view_width, sm.view_height))
            cv2.imwrite(self.debug_filebase + 'a.png',
                        np.concatenate((img1, img2), axis=1))
            if DEBUG:
                cv2.imshow('compare', np.concatenate((img1, img2), axis=1))
            self.timer.start()

        # AKAZE, SIFT, SURF are truly scale invariant, couldnt get ORB to work as good
        vary_scale = vary_scale if feat == self.ORB else False

        if len(ref_kp) < KeypointAlgo.MIN_FEATURES:
            raise PositioningException(
                'Too few (%d) reference features found' % (len(ref_kp), ))

        ok = False
        for i in range(self.MAX_SCENE_SCALE_STEPS):
            try:
                # resize scene image if necessary
                sce_img_sc = (sm.view_width
                              if scale_cam_img else self._cam.width
                              ) / self._cam.width / self.SCENE_SCALE_STEP**i
                if np.isclose(sce_img_sc, 1):
                    sce_img = orig_sce_img
                else:
                    sce_img = cv2.resize(orig_sce_img,
                                         None,
                                         fx=sce_img_sc,
                                         fy=sce_img_sc,
                                         interpolation=cv2.INTER_AREA)

                # detect features in scene image
                sce_max_mem = KeypointAlgo.MAX_WORK_MEM - (
                    KeypointAlgo.BYTES_PER_FEATURE[feat] + 12) * len(ref_desc)
                ee = sm.pixel_extent(abs(match_mask_params[2])
                                     ) if match_mask_params is not None else 0
                sce_kp, sce_desc, self._latest_detector = KeypointAlgo.detect_features(
                    sce_img,
                    feat,
                    maxmem=sce_max_mem,
                    max_feats=KeypointAlgo.MAX_FEATURES,
                    expected_pixel_extent=ee)
                if len(sce_kp) < KeypointAlgo.MIN_FEATURES:
                    raise PositioningException(
                        'Too few (%d) scene features found' % (len(sce_kp), ))

                # match descriptors
                try:
                    mask = None
                    if match_mask_params is not None:
                        mask = KeypointAlgo.calc_match_mask(
                            sm, sce_kp, ref_kp, self._render_z, sce_img_sc,
                            ref_img_sc, *match_mask_params)
                    matches = KeypointAlgo.match_features(
                        sce_desc,
                        ref_desc,
                        self._latest_detector.defaultNorm(),
                        mask=mask,
                        method='brute')
                    error = None
                except PositioningException as e:
                    matches = []
                    error = e

                # debug by drawing matches
                if verbose > 0 and (not BATCH_MODE or DEBUG):
                    logger.info('matches: %s/%s' %
                                (len(matches), min(len(sce_kp), len(ref_kp))))

                if verbose > 1:
                    self._draw_matches(sce_img,
                                       sce_img_sc,
                                       sce_kp,
                                       ref_img,
                                       ref_img_sc,
                                       ref_kp,
                                       matches,
                                       pause=False,
                                       show=DEBUG)

                if error is not None:
                    raise error

                # select matched scene feature image coordinates
                sce_kp_2d = np.array([
                    tuple(np.divide(sce_kp[m.queryIdx].pt, sce_img_sc))
                    for m in matches
                ],
                                     dtype='float')

                # prepare reference feature 3d coordinates (for only matched features)
                if use_feature_db:
                    ref_kp_3d = ref_kp_3d[[m.trainIdx for m in matches], :]
                    if add_noise:
                        # add noise to noiseless 3d ref points from fdb
                        self.timer.stop()
                        ref_kp_3d, self.sm_noise, _ = tools.points_with_noise(
                            ref_kp_3d,
                            only_z=True,
                            noise_lv=SHAPE_MODEL_NOISE_LV[add_noise],
                            max_rng=self._shape_model_rng)
                        self.timer.start()
                else:
                    # get feature 3d points using 3d model
                    ref_kp_3d = KeypointAlgo.inverse_project(
                        sm, [ref_kp[m.trainIdx].pt for m in matches],
                        depth_result, self._render_z, ref_img_sc)

                if KeypointAlgo.DISCARD_OFF_OBJECT_FEATURES:
                    I = np.where(np.logical_not(np.isnan(ref_kp_3d[:, 0])))[0]
                    if len(I) < self.MIN_FEATURES:
                        raise PositioningException('Too few matches found')
                    sce_kp_2d = sce_kp_2d[I, :]
                    ref_kp_3d = ref_kp_3d[I, :]
                    matches = [matches[i] for i in I]

                # finally solve pnp with ransac
                rvec, tvec, inliers = KeypointAlgo.solve_pnp_ransac(
                    sm, sce_kp_2d, ref_kp_3d, self._ransac_err)

                # debug by drawing inlier matches
                if verbose > 1:
                    self._draw_matches(sce_img,
                                       sce_img_sc,
                                       sce_kp,
                                       ref_img,
                                       ref_img_sc,
                                       ref_kp,
                                       [matches[i[0]] for i in inliers],
                                       label='c) inliers',
                                       pause=self._pause)

                inlier_count = self.count_inliers(sce_kp, ref_kp, matches,
                                                  inliers)
                if verbose > 0:
                    logger.info('inliers: %s/%s, ' %
                                (inlier_count, len(matches)))
                if inlier_count < KeypointAlgo.MIN_FEATURES:
                    raise PositioningException(
                        'RANSAC algorithm was left with too few inliers')

                # dont try again if found enough inliers
                ok = True
                break

            except PositioningException as e:
                if not vary_scale:
                    raise e
                # maybe try again using scaled down scene image

        if not ok:
            raise PositioningException(
                'Not enough inliers even if tried scaling scene image down x%.1f'
                % (1 / sce_img_sc))
        elif vary_scale:
            logger.info('success at x%.1f' % (1 / sce_img_sc))

        self.timer.stop()

        # set model params to solved pose & pos
        self._set_sc_from_ast_rot_and_trans(rvec,
                                            tvec,
                                            self.latest_discretization_err_q,
                                            rotate_sc=adjust_sc_rot)

        # debugging
        if verbose > 0 and (not BATCH_MODE or DEBUG):
            rp_err = KeypointAlgo.reprojection_error(self._cam, sce_kp_2d,
                                                     ref_kp_3d, inliers, rvec,
                                                     tvec)
            sh_err = sm.calc_shift_err()

            logger.info(
                'repr-err: %.2f, rel-rot-err: %.2f°, dist-err: %.2f%%, lat-err: %.2f%%, shift-err: %.1fm'
                % (
                    rp_err,
                    math.degrees(sm.rel_rot_err()),
                    sm.dist_pos_err() * 100,
                    sm.lat_pos_err() * 100,
                    sh_err * 1000,
                ))

        # save result image
        if BATCH_MODE and self.debug_filebase:
            # save result in log archive
            res_img = self.render(shadows=self.RENDER_SHADOWS)
            sce_img = cv2.resize(orig_sce_img, tuple(np.flipud(res_img.shape)))
            cv2.imwrite(self.debug_filebase + 'd.png',
                        np.concatenate((sce_img, res_img), axis=1))
예제 #30
0
    def _render(self, params):
        time = params[0]
        sun_distance = np.linalg.norm(np.array(params[1][:3]))  # in meters
        sun_ast_v = tools.normalize_v(np.array(params[1][:3]))
        d1_v, d1_q, d2_v, d2_q, sc_v, sc_q = self._parse_poses(params,
                                                               offset=2)

        d1, d2 = self.asteroids
        q = SystemModel.sc2gl_q.conj() * sc_q.conj()
        rel_rot_q = np.array([
            q * d1_q * d1.ast2sc_q.conj(), q * d2_q * d2.ast2sc_q.conj(),
            np.quaternion(1, 0, 1, 0).normalized()
        ])  # last one is the for the spacecraft
        rel_pos_v = np.array([
            tools.q_times_v(q, d1_v - sc_v),
            tools.q_times_v(q, d2_v - sc_v), [0, 0, 0]
        ])
        light_v = tools.q_times_v(q, sun_ast_v)

        self._maybe_load_objects()  # lazy load objects

        exp_range = (0.001, 3.5)
        for i in range(20):
            if self._autolevel and self._current_level:
                level = self._current_level
            else:
                level = 3 * 2.5 * 1.3e-3 if self._use_nac else 1.8 * 2.5 * 1.3e-3

            exp, gain = self._sm.cam.level_to_exp_gain(level, exp_range)

            img = TestLoop.render_navcam_image_static(self._sm,
                                                      self._renderer,
                                                      self._obj_idxs,
                                                      rel_pos_v,
                                                      rel_rot_q,
                                                      light_v,
                                                      sc_q,
                                                      sun_distance,
                                                      exposure=exp,
                                                      gain=gain,
                                                      auto_gain=False,
                                                      gamma=1.0,
                                                      use_shadows=True,
                                                      use_textures=True)
            if self._autolevel:
                v = np.percentile(img, 100 - 0.0003)
                level_trg = level * 170 / v
                print(
                    'autolevel (max_v=%.1f, e=%.3f, g=%.1f) current: %.3f, target: %.3f'
                    % (v, exp, gain, level, level_trg))

                self._current_level = level_trg if not self._current_level else \
                        (self._current_level*self._level_lambda + level_trg*(1-self._level_lambda))

                if v < 85 or (v == 255 and level > exp_range[0]):
                    level = level_trg if v < 85 else level * 70 / v
                    self._current_level = level
                    continue
            break

        if False:
            img = ImageProc.default_preprocess(img)

        date = datetime.fromtimestamp(time, pytz.utc)  # datetime.now()
        fname = os.path.join(self._logpath,
                             date.isoformat()[:-6].replace(':', '')) + '.png'
        cv2.imwrite(fname, img, [cv2.IMWRITE_PNG_COMPRESSION, 0])

        return fname