示例#1
0
def test_thumbnail_gamma_effect():
    img = np.zeros((1536, 2048))
    j = 1536 // 2 + 8
    I = np.array(tuple(range(8, 2048, 32))[:-1], dtype='int')
    mag = I[-1] * 0.85
    img[j - 8:j + 8, I] = np.array(I) * (mag / I[-1])
    img = ImageProc.apply_point_spread_fn(img, 0.4)
    print('max: %d' % np.max(img))

    img = np.clip(img, 0, 1023).astype('uint16')
    plt.imshow(img)
    plt.show()

    thb_real = cv2.resize(img,
                          None,
                          fx=1 / 16,
                          fy=1 / 16,
                          interpolation=cv2.INTER_AREA)
    plt.imshow(thb_real)
    plt.show()

    img_gamma = ImageProc.adjust_gamma(img, 2.2, 0.1, max_val=1023)
    thb_gamma = cv2.resize(img_gamma,
                           None,
                           fx=1 / 16,
                           fy=1 / 16,
                           interpolation=cv2.INTER_AREA)
    thb = ImageProc.adjust_gamma(thb_gamma, 2.2, 0.1, inverse=1, max_val=1023)

    x = thb_real[j // 16, I // 16]
    xf = img[j, I]
    xfg = img_gamma[j, I]
    yg = thb_gamma[j // 16, I // 16]
    y = thb[j // 16, I // 16]
    line = np.linspace(0, np.max(x))

    plt.plot(x, y, 'x')
    plt.plot(line, line)
    gamma, gamma_break, max_val, scale = fit_gamma(x, y)
    plt.plot(
        line,
        ImageProc.adjust_gamma(line, gamma, gamma_break, max_val=max_val) *
        scale)
    plt.show()
    quit()
示例#2
0
def output(img, show, maxval=1.0, gamma=1.0, outfile=None):
    img = ImageProc.adjust_gamma(maxval * img / np.max(img) * 255,
                                 gamma=gamma) / 255
    cv2.imwrite(outfile or (sys.argv[1] if len(sys.argv) > 1 else 'test.png'),
                (255 * img).astype('uint8'))
    if show:
        img_sc = cv2.resize(img, (700, 700))
        cv2.imshow('test.png', img_sc)
        return cv2.waitKey()
示例#3
0
    def show_image(self,
                   gain=1,
                   processed=False,
                   compare=False,
                   median_filter=False,
                   zero_bg=False,
                   save_as=None):
        img = self.image.astype('float')
        if processed:
            if zero_bg:
                img = np.clip(
                    img - np.min(img) - (0 if zero_bg is True else zero_bg), 0,
                    np.inf)
            img *= gain
            if median_filter:
                img = cv2.medianBlur(img.astype('uint16'), median_filter)
            img = ImageProc.color_correct(img,
                                          self.applied_bgr_mx,
                                          max_val=self.max_val)
            img = ImageProc.adjust_gamma(img,
                                         self.applied_gamma,
                                         self.applied_gamma_break,
                                         max_val=self.max_val)
        else:
            img = np.clip(img * gain, 0, 2**self.bits - 1)
        img = ImageProc.change_color_depth(img, self.bits, 8).astype('uint8')

        if save_as is not None:
            cv2.imwrite(save_as, img)

        s = self.image.shape
        if compare:
            img = np.hstack((self.raw_image.astype(img.dtype),
                             np.ones((s[0], 1, s[2]), dtype=img.dtype), img))

        sc = 1
        plt.imshow(np.flip(img, axis=2))
        plt.show()
        return img, sc
示例#4
0
    def write_img(raw_imgs, outfile):
        imgs = []
        for raw in raw_imgs:
            img = ImageProc.change_color_depth(raw.astype('float'), 8, bits)
            img = ImageProc.adjust_gamma(img,
                                         gamma,
                                         gamma_break=gamma_break,
                                         inverse=True,
                                         max_val=max_val)
            if bgr_cc_mx is not None:
                img = ImageProc.color_correct(img,
                                              bgr_cc_mx,
                                              inverse=True,
                                              max_val=max_val)
            imgs.append(np.expand_dims(img, axis=0))

        if len(imgs) == 1:
            imgs = imgs[0]

        stacked = np.stack(imgs, axis=0)
        reduced = np.median(stacked, axis=0) if len(imgs) > 2 else np.min(
            stacked, axis=0)
        bg_img = np.round(reduced).squeeze().astype('uint16')
        cv2.imwrite(outfile, bg_img, (cv2.CV_16U, ))
示例#5
0
    def detect_moon(self):
        x, y = tuple(int(v) for v in self.moon_loc)
        win = self.image[y - 24:y + 25, x - 29:x + 30, :]

        if 0:
            # TODO: what transformation would make this work?
            win = ImageProc.adjust_gamma(win / 662 * 1023,
                                         2.2,
                                         inverse=1,
                                         max_val=1023)

        h, w, s, c = *win.shape[0:2], 19, 18
        mask = np.zeros((h, w), dtype='uint8')
        mask[(h // 2 - s):(h // 2 + s + 1),
             (w // 2 - s):(w // 2 + s +
                           1)] = ImageProc.bsphkern(s * 2 + 1).astype('uint8')
        mask[0:c, :] = 0

        if SHOW_MEASURES:
            mask_img = (mask.reshape(
                (h, w, 1)) * np.array([255, 0, 0]).reshape(
                    (1, 1, 3))).astype('uint8')
            win_img = np.clip(win, 0, 255).astype('uint8')
            plt.imshow(np.flip(ImageProc.merge((mask_img, win_img)), axis=2))
            plt.show()

        mask = mask.flatten().astype('bool')
        n = np.sum(mask)
        measures = []
        for i, cam in enumerate(self.cam):
            raw_du = np.sum(win[:, :, i].flatten()[mask])
            bg_du = np.mean(win[:, :, i].flatten()[np.logical_not(mask)])
            du_count = raw_du - bg_du * n
            measures.append(MoonMeasure(self, i, du_count))
        self.measures = measures
        return measures
示例#6
0
    def solve_pnp(self,
                  orig_sce_img,
                  outfile,
                  feat=ORB,
                  use_feature_db=False,
                  adjust_sc_rot=False,
                  add_noise=False,
                  scale_cam_img=False,
                  vary_scale=False,
                  match_mask_params=None,
                  verbose=1,
                  **kwargs):

        # set max mem usable by reference features, scene features use rest of MAX_WORK_MEM
        ref_max_mem = KeypointAlgo.FDB_MAX_MEM if use_feature_db else KeypointAlgo.MAX_WORK_MEM / 2
        sm = self.system_model
        self._ransac_err = KeypointAlgo.DEF_RANSAC_ERROR
        self._render_z = -sm.min_med_distance
        init_z = kwargs.get('init_z', self._render_z)
        ref_img_sc = min(
            1, self._render_z / init_z) * (sm.view_width if scale_cam_img else
                                           self._cam.width) / sm.view_width
        self.extra_values = None

        if outfile is not None:
            self.debug_filebase = outfile + (self.DEBUG_IMG_POSTFIX
                                             if isinstance(orig_sce_img, str)
                                             else '')

        if self.est_real_ast_orient:
            # so that can track rotation of 67P
            sm.reset_to_real_vals()

        if use_feature_db and self._fdb_helper is None:
            from visnav.algo.fdbgen import FeatureDatabaseGenerator
            self._fdb_helper = FeatureDatabaseGenerator(
                self.system_model, self.render_engine, self.obj_idx)

        # maybe load scene image
        if isinstance(orig_sce_img, str):
            orig_sce_img = self.load_target_image(orig_sce_img)

        if add_noise:
            self._shape_model_rng = np.max(
                np.ptp(sm.asteroid.real_shape_model.vertices, axis=0))

        self.timer = Stopwatch()
        self.timer.start()

        if use_feature_db:
            if KeypointAlgo.FDB_REAL:
                # find correct set of keypoints & descriptors from features db
                ref_desc, ref_kp_3d, ref_kp, ref_img = self._query_fdb(feat)
            else:
                # calculate on-the-fly exactly the same features that would be returned from a feature db
                ref_desc, ref_kp_3d, ref_kp, ref_img = self._fake_fdb(feat)
        else:
            # render model image
            ref_img, depth_result = self.render_ref_img(ref_img_sc)

            if False:
                # normalize ref_img to match sce_img
                ref_img = ImageProc.equalize_brightness(ref_img,
                                                        orig_sce_img,
                                                        percentile=99.999,
                                                        image_gamma=1.8)

            if False:
                gamma = 1.0 / 1.8
                ref_img = ImageProc.adjust_gamma(ref_img, gamma)
                orig_sce_img = ImageProc.adjust_gamma(orig_sce_img, gamma)

            # get keypoints and descriptors
            ee = sm.pixel_extent(abs(self._render_z))
            ref_kp, ref_desc, self._latest_detector = KeypointAlgo.detect_features(
                ref_img,
                feat,
                maxmem=ref_max_mem,
                max_feats=KeypointAlgo.MAX_FEATURES,
                for_ref=True,
                expected_pixel_extent=ee)

        if BATCH_MODE and self.debug_filebase:
            # save start situation in log archive
            self.timer.stop()
            img1 = cv2.resize(orig_sce_img, (sm.view_width, sm.view_height))
            img2 = cv2.resize(ref_img, (sm.view_width, sm.view_height))
            cv2.imwrite(self.debug_filebase + 'a.png',
                        np.concatenate((img1, img2), axis=1))
            if DEBUG:
                cv2.imshow('compare', np.concatenate((img1, img2), axis=1))
            self.timer.start()

        # AKAZE, SIFT, SURF are truly scale invariant, couldnt get ORB to work as good
        vary_scale = vary_scale if feat == self.ORB else False

        if len(ref_kp) < KeypointAlgo.MIN_FEATURES:
            raise PositioningException(
                'Too few (%d) reference features found' % (len(ref_kp), ))

        ok = False
        for i in range(self.MAX_SCENE_SCALE_STEPS):
            try:
                # resize scene image if necessary
                sce_img_sc = (sm.view_width
                              if scale_cam_img else self._cam.width
                              ) / self._cam.width / self.SCENE_SCALE_STEP**i
                if np.isclose(sce_img_sc, 1):
                    sce_img = orig_sce_img
                else:
                    sce_img = cv2.resize(orig_sce_img,
                                         None,
                                         fx=sce_img_sc,
                                         fy=sce_img_sc,
                                         interpolation=cv2.INTER_AREA)

                # detect features in scene image
                sce_max_mem = KeypointAlgo.MAX_WORK_MEM - (
                    KeypointAlgo.BYTES_PER_FEATURE[feat] + 12) * len(ref_desc)
                ee = sm.pixel_extent(abs(match_mask_params[2])
                                     ) if match_mask_params is not None else 0
                sce_kp, sce_desc, self._latest_detector = KeypointAlgo.detect_features(
                    sce_img,
                    feat,
                    maxmem=sce_max_mem,
                    max_feats=KeypointAlgo.MAX_FEATURES,
                    expected_pixel_extent=ee)
                if len(sce_kp) < KeypointAlgo.MIN_FEATURES:
                    raise PositioningException(
                        'Too few (%d) scene features found' % (len(sce_kp), ))

                # match descriptors
                try:
                    mask = None
                    if match_mask_params is not None:
                        mask = KeypointAlgo.calc_match_mask(
                            sm, sce_kp, ref_kp, self._render_z, sce_img_sc,
                            ref_img_sc, *match_mask_params)
                    matches = KeypointAlgo.match_features(
                        sce_desc,
                        ref_desc,
                        self._latest_detector.defaultNorm(),
                        mask=mask,
                        method='brute')
                    error = None
                except PositioningException as e:
                    matches = []
                    error = e

                # debug by drawing matches
                if verbose > 0 and (not BATCH_MODE or DEBUG):
                    logger.info('matches: %s/%s' %
                                (len(matches), min(len(sce_kp), len(ref_kp))))

                if verbose > 1:
                    self._draw_matches(sce_img,
                                       sce_img_sc,
                                       sce_kp,
                                       ref_img,
                                       ref_img_sc,
                                       ref_kp,
                                       matches,
                                       pause=False,
                                       show=DEBUG)

                if error is not None:
                    raise error

                # select matched scene feature image coordinates
                sce_kp_2d = np.array([
                    tuple(np.divide(sce_kp[m.queryIdx].pt, sce_img_sc))
                    for m in matches
                ],
                                     dtype='float')

                # prepare reference feature 3d coordinates (for only matched features)
                if use_feature_db:
                    ref_kp_3d = ref_kp_3d[[m.trainIdx for m in matches], :]
                    if add_noise:
                        # add noise to noiseless 3d ref points from fdb
                        self.timer.stop()
                        ref_kp_3d, self.sm_noise, _ = tools.points_with_noise(
                            ref_kp_3d,
                            only_z=True,
                            noise_lv=SHAPE_MODEL_NOISE_LV[add_noise],
                            max_rng=self._shape_model_rng)
                        self.timer.start()
                else:
                    # get feature 3d points using 3d model
                    ref_kp_3d = KeypointAlgo.inverse_project(
                        sm, [ref_kp[m.trainIdx].pt for m in matches],
                        depth_result, self._render_z, ref_img_sc)

                if KeypointAlgo.DISCARD_OFF_OBJECT_FEATURES:
                    I = np.where(np.logical_not(np.isnan(ref_kp_3d[:, 0])))[0]
                    if len(I) < self.MIN_FEATURES:
                        raise PositioningException('Too few matches found')
                    sce_kp_2d = sce_kp_2d[I, :]
                    ref_kp_3d = ref_kp_3d[I, :]
                    matches = [matches[i] for i in I]

                # finally solve pnp with ransac
                rvec, tvec, inliers = KeypointAlgo.solve_pnp_ransac(
                    sm, sce_kp_2d, ref_kp_3d, self._ransac_err)

                # debug by drawing inlier matches
                if verbose > 1:
                    self._draw_matches(sce_img,
                                       sce_img_sc,
                                       sce_kp,
                                       ref_img,
                                       ref_img_sc,
                                       ref_kp,
                                       [matches[i[0]] for i in inliers],
                                       label='c) inliers',
                                       pause=self._pause)

                inlier_count = self.count_inliers(sce_kp, ref_kp, matches,
                                                  inliers)
                if verbose > 0:
                    logger.info('inliers: %s/%s, ' %
                                (inlier_count, len(matches)))
                if inlier_count < KeypointAlgo.MIN_FEATURES:
                    raise PositioningException(
                        'RANSAC algorithm was left with too few inliers')

                # dont try again if found enough inliers
                ok = True
                break

            except PositioningException as e:
                if not vary_scale:
                    raise e
                # maybe try again using scaled down scene image

        if not ok:
            raise PositioningException(
                'Not enough inliers even if tried scaling scene image down x%.1f'
                % (1 / sce_img_sc))
        elif vary_scale:
            logger.info('success at x%.1f' % (1 / sce_img_sc))

        self.timer.stop()

        # set model params to solved pose & pos
        self._set_sc_from_ast_rot_and_trans(rvec,
                                            tvec,
                                            self.latest_discretization_err_q,
                                            rotate_sc=adjust_sc_rot)

        # debugging
        if verbose > 0 and (not BATCH_MODE or DEBUG):
            rp_err = KeypointAlgo.reprojection_error(self._cam, sce_kp_2d,
                                                     ref_kp_3d, inliers, rvec,
                                                     tvec)
            sh_err = sm.calc_shift_err()

            logger.info(
                'repr-err: %.2f, rel-rot-err: %.2f°, dist-err: %.2f%%, lat-err: %.2f%%, shift-err: %.1fm'
                % (
                    rp_err,
                    math.degrees(sm.rel_rot_err()),
                    sm.dist_pos_err() * 100,
                    sm.lat_pos_err() * 100,
                    sh_err * 1000,
                ))

        # save result image
        if BATCH_MODE and self.debug_filebase:
            # save result in log archive
            res_img = self.render(shadows=self.RENDER_SHADOWS)
            sce_img = cv2.resize(orig_sce_img, tuple(np.flipud(res_img.shape)))
            cv2.imwrite(self.debug_filebase + 'd.png',
                        np.concatenate((sce_img, res_img), axis=1))
示例#7
0
def est_refl_model(hapke=True, iters=1, init_noise=0.0, verbose=True):
    sm = RosettaSystemModel()
    imgsize = (512, 512)
    imgs = {
        'ROS_CAM1_20140831T104353': 3.2,  # 60, 3.2s
        'ROS_CAM1_20140831T140853': 3.2,  # 62, 3.2s
        'ROS_CAM1_20140831T103933': 3.2,  # 65, 3.2s
        'ROS_CAM1_20140831T022253': 3.2,  # 70, 3.2s
        'ROS_CAM1_20140821T100719': 2.8,  # 75, 2.8s
        'ROS_CAM1_20140821T200718': 2.0,  # 80, 2.0s
        'ROS_CAM1_20140822T113854': 2.0,  # 85, 2.0s
        'ROS_CAM1_20140823T021833': 2.0,  # 90, 2.0s
        'ROS_CAM1_20140819T120719': 2.0,  # 95, 2.0s
        'ROS_CAM1_20140824T021833': 2.8,  # 100, 2.8s
        'ROS_CAM1_20140824T020853': 2.8,  # 105, 2.8s
        'ROS_CAM1_20140824T103934': 2.8,  # 110, 2.8s
        'ROS_CAM1_20140818T230718': 2.0,  # 113, 2.0s
        'ROS_CAM1_20140824T220434': 2.8,  # 120, 2.8s
        'ROS_CAM1_20140828T020434': 2.8,  # 137, 2.8s
        'ROS_CAM1_20140827T140434': 3.2,  # 145, 3.2s
        'ROS_CAM1_20140827T141834': 3.2,  # 150, 3.2s
        'ROS_CAM1_20140827T061834': 3.2,  # 155, 3.2s
        'ROS_CAM1_20140827T021834': 3.2,  # 157, 3.2s
        'ROS_CAM1_20140826T221834': 2.8,  # 160, 2.8s
    }

    target_exposure = np.min(list(imgs.values()))
    for img, exposure in imgs.items():
        real = cv2.imread(
            os.path.join(sm.asteroid.image_db_path, img + '_P.png'),
            cv2.IMREAD_GRAYSCALE)
        real = ImageProc.adjust_gamma(real, 1 / 1.8)
        #dark_px_lim = np.percentile(real, 0.1)
        #dark_px = np.mean(real[real<=dark_px_lim])
        real = cv2.resize(real, imgsize)
        # remove dark pixel intensity and normalize based on exposure
        #real = real - dark_px
        #real *= (target_exposure / exposure)
        imgs[img] = real

    re = RenderEngine(*imgsize, antialias_samples=0)
    obj_idx = re.load_object(sm.asteroid.hires_target_model_file, smooth=False)
    ab = AlgorithmBase(sm, re, obj_idx)

    model = RenderEngine.REFLMOD_HAPKE if hapke else RenderEngine.REFLMOD_LUNAR_LAMBERT
    defs = RenderEngine.REFLMOD_PARAMS[model]

    if hapke:
        # L, th, w, b (scattering anisotropy), c (scattering direction from forward to back), B0, hs
        #real_ini_x = [515, 16.42, 0.3057, 0.8746]
        sppf_n = 2
        real_ini_x = defs[:2] + defs[3:3 + sppf_n]
        scales = np.array((500, 20, 3e-1, 3e-1))[:2 + sppf_n]
    else:
        ll_poly = 5
        #real_ini_x = np.array(defs[:7])
        real_ini_x = np.array(
            (9.95120e-01, -6.64840e-03, 3.96267e-05, -2.16773e-06, 2.08297e-08,
             -5.48768e-11, 1))  # theta=20
        real_ini_x = np.hstack((real_ini_x[0:ll_poly + 1], (real_ini_x[-1], )))
        scales = np.array((3e-03, 2e-05, 1e-06, 1e-08, 5e-11, 1))
        scales = np.hstack((scales[0:ll_poly], (scales[-1], )))

    def set_params(x):
        if hapke:
            # optimize J, th, w, b, (c), B_SH0, hs
            xsc = list(np.array(x) * scales)
            vals = xsc[:2] + [defs[2]] + xsc[2:] + defs[len(xsc) + 1:]
        else:
            vals = [1] + list(np.array(x)[:-1] * scales[:-1]) + [0] * (
                5 - ll_poly) + [x[-1] * scales[-1], 0, 0, 0]
        RenderEngine.REFLMOD_PARAMS[model] = vals

    # debug 1: real vs synth, 2: err img, 3: both
    def costfun(x, debug=0, verbose=True):
        set_params(x)
        err = 0
        for file, real in imgs.items():
            lblloader.load_image_meta(
                os.path.join(sm.asteroid.image_db_path, file + '.LBL'), sm)
            sm.swap_values_with_real_vals()
            synth2 = ab.render(shadows=True, reflection=model, gamma=1)
            err_img = (synth2.astype('float') - real)**2
            lim = np.percentile(err_img, 99)
            err_img[err_img > lim] = 0
            err += np.mean(err_img)
            if debug:
                if debug % 2:
                    cv2.imshow(
                        'real vs synthetic',
                        np.concatenate((real.astype('uint8'), 255 * np.ones(
                            (real.shape[0], 1), dtype='uint8'), synth2),
                                       axis=1))
                if debug > 1:
                    err_img = err_img**0.2
                    cv2.imshow('err', err_img / np.max(err_img))
                cv2.waitKey()
        err /= len(imgs)
        if verbose:
            print('%s => %f' %
                  (', '.join(['%.4e' % i for i in np.array(x) * scales]), err))
        return err

    best_x = None
    best_err = float('inf')
    for i in range(iters):
        if hapke:
            ini_x = tuple(real_ini_x + init_noise *
                          np.random.normal(0, 1, (len(scales), )) * scales)
        else:
            ini_x = tuple(real_ini_x[1:-1] / real_ini_x[0] + init_noise *
                          np.random.normal(0, 1, (len(scales) - 1, )) *
                          scales[:-1]) + (real_ini_x[-1] * real_ini_x[0], )

        if verbose:
            print('\n\n\n==== i:%d ====\n' % i)
        res = minimize(
            costfun,
            tuple(ini_x / scales),
            args=(0, verbose),
            #method="BFGS", options={'maxiter': 10, 'eps': 1e-3, 'gtol': 1e-3})
            method="Nelder-Mead",
            options={
                'maxiter': 120,
                'xtol': 1e-4,
                'ftol': 1e-4
            })
        #method="COBYLA", options={'rhobeg': 1.0, 'maxiter': 200, 'disp': False, 'catol': 0.0002})
        if not verbose:
            print('%s => %f' %
                  (', '.join(['%.5e' % i
                              for i in np.array(res.x) * scales]), res.fun))

        if res.fun < best_err:
            best_err = res.fun
            best_x = res.x

    if verbose:
        costfun(best_x, 3, verbose=True)

    if hapke:
        x = tuple(best_x * scales)
    else:
        x = (1, ) + tuple(best_x * scales)
        if verbose:
            p = np.linspace(0, 160, 100)
            L = get_graph_L(20, p)
            plt.plot(p, L, p, Lfun(x[:-1], p))
            plt.show()

    return x
示例#8
0
    def render(self,
               obj_idxs,
               rel_pos_v,
               rel_rot_q,
               light_v,
               get_depth=False,
               shadows=True,
               textures=True,
               gamma=1.0,
               reflection=REFLMOD_LUNAR_LAMBERT,
               flux_density=False):

        obj_idxs = [obj_idxs] if isinstance(obj_idxs, int) else obj_idxs
        rel_pos_v = np.array(rel_pos_v).reshape((-1, 3))
        rel_rot_q = np.array(rel_rot_q).reshape((-1, ))
        light_v = np.array(light_v)
        assert len(obj_idxs) == rel_pos_v.shape[0] == rel_rot_q.shape[
            0], 'obj_idxs, rel_pos_v and rel_rot_q dimensions dont match'

        shadow_mvps = None
        if shadows:
            shadow_mvps = self._render_shadowmap(obj_idxs, rel_pos_v,
                                                 rel_rot_q, light_v)

        self._fbo.use()
        self._ctx.enable(moderngl.DEPTH_TEST)
        self._ctx.enable(moderngl.CULL_FACE)
        self._ctx.front_face = 'ccw'  # cull back faces
        self._ctx.clear(0, 0, 0, 0, float('inf'))
        if shadows:
            self._shadow_map.use(RenderEngine._LOC_SHADOW_MAP)
            self._prog['shadow_map'].value = RenderEngine._LOC_SHADOW_MAP

        for i, obj_idx in enumerate(obj_idxs):
            self._set_params(obj_idx,
                             rel_pos_v[i],
                             rel_rot_q[i],
                             light_v,
                             shadow_mvps,
                             textures,
                             reflection,
                             prog=self._prog,
                             flux_density=flux_density)
            self._objs[obj_idx].render()

        if self._samples > 0:
            self._ctx.copy_framebuffer(self._fbo2, self._fbo)
            fbo = self._fbo2
            dbo = self._dbo2
        else:
            fbo = self._fbo
            dbo = self._dbo

        data = np.frombuffer(fbo.read(components=1, alignment=1, dtype='f4'),
                             dtype='f4').reshape((self._height, self._width))
        data = np.flipud(data)

        if get_depth:
            depth = np.frombuffer(dbo.read(alignment=1), dtype='f4').reshape(
                (self._height, self._width))
            depth = np.flipud(depth)

            # normalize depth
            if self._persp_proj:
                # for perspective projection
                a = -(self._frustum_far - self._frustum_near) / (
                    2.0 * self._frustum_far * self._frustum_near)
                b = (self._frustum_far + self._frustum_near) / (
                    2.0 * self._frustum_far * self._frustum_near)
                if self._frustum_far / self._frustum_near < 1e7:
                    depth = np.divide(1.0, (2.0 * a) * depth -
                                      (a - b))  # 1/((2*X-1)*a+b)
                else:
                    # up to difference of 1e14
                    depth = np.divide(1.0,
                                      (2.0 * a) * depth.astype(np.float64) -
                                      (a - b)).astype(np.float32)
            else:
                # for orthographic projection
                #  - depth is between 0 and 1
                depth = depth * (self._frustum_far -
                                 self._frustum_near) + self._frustum_near

        # free memory to avoid memory leaks
        if shadows:
            self._shadow_map.release()

        if flux_density:
            data = data * flux_density
        else:
            data = np.clip(data * 255, 0, 255).astype('uint8')
            if gamma != 1.0:
                data = ImageProc.adjust_gamma(data, gamma)

        return (data, depth) if get_depth else data
示例#9
0
 def costfn(p, x, y):
     gamma, gamma_break, max_val, scale = tuple(map(abs, p))
     diff = ImageProc.adjust_gamma(x, gamma, gamma_break,
                                   max_val=max_val) * scale - y
     diff = tools.pseudo_huber_loss(120, diff)
     return np.sum(diff) if _USE_BFGS else diff
示例#10
0
            plt.show()

        for hd in (
                48737, 35468, 39801
        ):  # Lambda Orionis (HD36861) Teff too high for model (37689K)
            fname = r'C:\projects\s100imgs\spectra\%s.fits' % hd
            fdat = fits.getdata(fname)
            teff, logg, feh = [stars[hd][f] for f in (f_teff, f_logg, f_feh)]
            if teff > 30000:
                logg = max(logg, 4.0)
            testf(fdat, teff, logg, feh or 0)

        quit()

#    cam = RosettaSystemModel(focused_attenuated=False).cam
    cam = DidymosSystemModel(use_narrow_cam=True).cam

    #    cam_q = tools.rand_q(math.radians(180))
    cam_q = quaternion.one
    for i in range(100):
        cam_q = tools.ypr_to_q(0, np.radians(1), 0) * cam_q
        flux_density = Stars.flux_density(cam_q, cam)
        img = cam.sense(flux_density, exposure=2, gain=2)

        img = np.clip(img * 255, 0, 255).astype('uint8')
        img = ImageProc.adjust_gamma(img, 1.8)

        sc = min(768 / cam.width, 768 / cam.height)
        cv2.imshow('stars', cv2.resize(img, None, fx=sc, fy=sc))
        cv2.waitKey()
    print('done')
示例#11
0
    def __init__(self,
                 cam,
                 gain,
                 exposure,
                 timestamp,
                 raw_image,
                 background_img,
                 bg_offset=0,
                 bits=8,
                 applied_gamma=1.0,
                 applied_gamma_break=0.0,
                 applied_bgr_mx=None,
                 debug=False):
        self.id = Frame.CURRENT_ID
        Frame.CURRENT_ID += 1

        self.cam = [cam] if isinstance(cam, Camera) else cam
        self.resize_scale = raw_image.shape[1] / self.cam[0].width
        for c in self.cam:
            c.height, c.width = raw_image.shape[:2]
        self.bits = bits = int(bits)
        self.gain = gain
        self.exposure = exposure
        self.timestamp = timestamp
        self.raw_image = raw_image
        self.applied_gamma = applied_gamma
        self.applied_gamma_break = applied_gamma_break
        self.applied_bgr_mx = applied_bgr_mx
        self.debug = debug

        img_bits = int(str(raw_image.dtype)[4:])
        max_val = 2**img_bits - 1
        img = raw_image.astype('float')

        # NOTE: NanoCam has this, doesnt make sense in general!
        operation_order = reversed((
            'ex_gamma',
            'depth',
            'color',
            'gamma',
        ))

        for op in operation_order:
            if op == 'depth' and img_bits != bits:
                img = ImageProc.change_color_depth(img, img_bits, bits)
                max_val = 2**bits - 1
            if op == 'gamma' and applied_gamma != 1.0:
                img = ImageProc.adjust_gamma(img,
                                             applied_gamma,
                                             gamma_break=applied_gamma_break,
                                             inverse=True,
                                             max_val=max_val)
            if op == 'color' and applied_bgr_mx is not None:
                img = ImageProc.color_correct(img,
                                              applied_bgr_mx,
                                              inverse=True,
                                              max_val=max_val)
            # if op == 'ex_gamma' and GAMMA_ADJUSTMENT:
            #     img = ImageProc.adjust_gamma(img, GAMMA_ADJUSTMENT, inverse=True, max_val=max_val)

        self.background_img = background_img
        if background_img is not None:
            self.image = ImageProc.remove_bg(img,
                                             background_img,
                                             gain=1,
                                             offset=bg_offset,
                                             max_val=max_val)
        elif self.MISSING_BG_REMOVE_STRIPES:
            for k in range(img.shape[2]):
                img[:, :, k] -= np.percentile(img[:, :, k], 50,
                                              axis=0).reshape((1, -1))
                img[:, :, k] -= np.percentile(img[:, :, k], 50,
                                              axis=1).reshape((-1, 1))
            img += bg_offset - np.min(img)
            self.image = np.clip(img, 0, max_val)
        else:
            self.image = img

        if bg_offset is not False:
            self.image = np.round(self.image).astype('uint16')

        self.measures = []