Exemplo n.º 1
0
    def prepare(self):
        self._check_params()

        if self.is_dirty():
            if self._renderer is not None:
                del self._renderer

            self._renderer = RenderEngine(self._width, self._height, antialias_samples=self._samples)
            if self.verbose:
                print('loading objects to engine...', end='', flush=True)

            for name, (_, obj) in self._objs.items():
                idx = self._renderer.load_object(obj.model)
                self._objs[name][0] = idx

            self.clear_dirty()
            if self.verbose:
                print('done')
        if self.stars:
            if not os.path.exists(RenderScene.STAR_DB):
                if self.verbose:
                    print('downloading star catalog...', end='', flush=True)
                RenderController.download_file(RenderScene.STAR_DB_URL, RenderScene.STAR_DB)
                if self.verbose:
                    print('done')
Exemplo n.º 2
0
def main(outfile='render-test.jpg'):
    logger = logging.getLogger(__name__)
    logger.info('Setting up renderer and loading the 3d model...')

    sm = RosettaSystemModel(hi_res_shape_model=False, skip_obj_load=True)

    re = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=0)
    re.set_frustum(sm.cam.x_fov, sm.cam.y_fov, 0.1 * sm.min_distance, 1.1 * sm.max_distance)
    obj_path = os.path.join(os.path.dirname(__file__), 'data', '67p-16k.obj')
    obj_idx = re.load_object(obj_path)

    pos = np.array([0, 0, -sm.min_med_distance * 1])
    q = tools.angleaxis_to_q((math.radians(-20), 0, 1, 0))
    light_v = np.array([1, 0, -1]) / math.sqrt(2)

    logger.info('Start rendering...')
    with tools.Stopwatch() as time:
        img = re.render(obj_idx, pos, q, light_v, gamma=1.8, get_depth=False, shadows=True, textures=True,
                        reflection=RenderEngine.REFLMOD_HAPKE)

    logger.info('Rendered one image in %f secs' % time.elapsed)
    ok = cv2.imwrite(outfile, img)
    if ok:
        logger.info('Result saved in file %s' % outfile)
    else:
        logger.warning('Failed to save image in file %s' % outfile)
Exemplo n.º 3
0
def render_didymos(show=False):
    sm = DidymosSystemModel(use_narrow_cam=False,
                            target_primary=False,
                            hi_res_shape_model=False)

    re = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=0)
    re.set_frustum(sm.cam.x_fov, sm.cam.y_fov, 0.05, 2)
    obj_idx = re.load_object(sm.asteroid.real_shape_model)

    pos = np.array([0, 0, -sm.min_med_distance * 1])
    q = tools.angleaxis_to_q((math.radians(20), 0, 1, 0))
    light_v = np.array([1, 0, -1]) / math.sqrt(2)

    img = re.render(obj_idx,
                    pos,
                    q,
                    light_v,
                    gamma=1.8,
                    get_depth=False,
                    shadows=True,
                    textures=True,
                    reflection=RenderEngine.REFLMOD_HAPKE)

    output(img, show)
Exemplo n.º 4
0
def render_67p(show=False):
    sm = RosettaSystemModel(hi_res_shape_model=False, res_mult=1.0)
    re = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=0)
    re.set_frustum(sm.cam.x_fov, sm.cam.y_fov, 0.05, sm.max_distance)
    obj_idx = re.load_object(sm.asteroid.real_shape_model)

    RenderEngine.REFLMOD_PARAMS[RenderEngine.REFLMOD_HAPKE] = [
        553.38,  # J         0
        27,  # th_p      27
        0.034,  # w         0.034
        -0.08,  # b         -0.078577
        0,  # c         0
        2.25,  # B_SH0     2.25
        math.radians(0.061),  # hs        math.radians(0.061)
        0,  # B_CB0     0
        0.005,  # hc        0.005
        1,  # K         1
    ]

    g_sc_q = tools.angleaxis_to_q([1.892926, 0.781228, -0.540109, -0.312995])
    # x, y, z = 69.54, 64.11, 162.976134
    x, y, z = 146.540, 167.110, 154.976  # asteroid
    g_ast_q = get_ast_q(x, y, z)
    g_sol_ast_v = np.array([163613595198.0, 101637176823.0, 36457220690.0
                            ]) * 1e-3
    g_sol_sc_v = np.array([163613518304.0, 101637309778.0, 36457190373.0
                           ]) * 1e-3
    g_sc_ast_v = g_sol_ast_v - g_sol_sc_v
    l_ast_sc_v = tools.q_times_v(SystemModel.sc2gl_q.conj() * g_sc_q.conj(),
                                 g_sc_ast_v)
    l_ast_q = SystemModel.sc2gl_q.conj() * g_sc_q.conj(
    ) * g_ast_q * SystemModel.sc2gl_q
    l_light_v = tools.q_times_v(SystemModel.sc2gl_q.conj() * g_sc_q.conj(),
                                g_sol_ast_v / np.linalg.norm(g_sol_ast_v))

    l_vx_ast_q = SystemModel.sc2gl_q.conj() * quaternion.one.conj(
    ) * g_ast_q * SystemModel.sc2gl_q
    print(str(l_vx_ast_q))
    particles = load_particles(
        r'C:\projects\sispo\data\models\Jets--ROS_CAM1_20150710T074301.exr',
        lf_ast_q=l_vx_ast_q,
        cell_size=0.066667)

    a, b, c = [0] * 3
    w = 10
    while True:
        print('%.3f, %.3f, %.3f' % (x + a, y + b, z + c))

        if particles is None and 0:
            img = re.render(obj_idx,
                            l_ast_sc_v,
                            l_ast_q,
                            l_light_v,
                            flux_density=1.0,
                            gamma=1.0,
                            get_depth=False,
                            shadows=True,
                            textures=False,
                            reflection=RenderEngine.REFLMOD_HAPKE)
        else:
            img = TestLoop.render_navcam_image_static(
                None,
                re, [obj_idx],
                rel_pos_v=l_ast_sc_v,
                rel_rot_q=l_ast_q,
                light_v=l_light_v,
                sc_q=g_sc_q,
                sun_distance=np.linalg.norm(g_sol_ast_v) * 1e3,
                exposure=None,
                gain=None,
                gamma=1.8,
                auto_gain=True,
                reflmod_params=RenderEngine.REFLMOD_PARAMS[
                    RenderEngine.REFLMOD_HAPKE],
                use_shadows=True,
                use_textures=False,
                cam=sm.cam,
                fluxes_only=True,
                stars=True,
                lens_effects=False,
                particles=particles,
                return_depth=False)

        k = output(img, show, maxval=0.50, gamma=1.8)
        #k = output(img, show, maxval=0.70, gamma=1.0)

        if k is None or k == 27:
            break
        if k in (ord('a'), ord('d')):
            b += (1 if k == ord('a') else -1) * w
        if k in (ord('w'), ord('s')):
            a += (1 if k == ord('s') else -1) * w
        if k in (ord('q'), ord('e')):
            c += (1 if k == ord('q') else -1) * w

        if 0:
            l_light_v = tools.q_times_v(
                SystemModel.sc2gl_q.conj() * tools.ypr_to_q(
                    math.radians(a), math.radians(b), math.radians(c)) *
                g_sc_q.conj(), g_sol_ast_v / np.linalg.norm(g_sol_ast_v))
        elif 1:
            g_ast_q = get_ast_q(x + a, y + b, z + c)
            l_ast_q = SystemModel.sc2gl_q.conj() * g_sc_q.conj(
            ) * g_ast_q * SystemModel.sc2gl_q
Exemplo n.º 5
0
        m = cv2.moments(img[:, :, 0], binaryImage=binary)

        if np.isclose(m['m00'], 0):
            if is_scene:
                raise PositioningException('No asteroid found')
            else:
                raise PositioningException(
                    'Algorithm failure: model moved out of view')

        # image centroid
        icx = m['m10'] / m['m00'] / iw * self._cam.width
        icy = m['m01'] / m['m00'] / ih * self._cam.height
        brightness = m['m00'] / iw / ih * self._cam.width * self._cam.height

        # pixel spreads
        # used to model dimensions of asteroid parts visible in image
        hr = math.sqrt(m['mu20'] / m['m00']) if m['mu20'] > 0 else 1
        vr = math.sqrt(m['mu02'] / m['m00']) if m['mu02'] > 0 else 1

        return icx, icy, brightness, hr, vr


if __name__ == '__main__':
    sm = RosettaSystemModel()
    lblloader.load_image_meta(sm.asteroid.sample_image_meta_file, sm)
    re = RenderEngine(sm.view_width, sm.view_height)
    obj_idx = re.load_object(sm.asteroid.real_shape_model)

    DEBUG = True
    algo = CentroidAlgo(sm, re, obj_idx)
    algo.adjust_iteratively(sm.asteroid.sample_image_file, None)
Exemplo n.º 6
0
def export(sm, dst_path, src_path=None, src_imgs=None, trg_shape=(224, 224), crop=False, debug=False,
           img_prefix="", title=""):

    trg_w, trg_h = trg_shape
    assert (src_path is not None) + (src_imgs is not None) == 1, 'give either src_path or src_imgs, not both'

    if debug:
        renderer = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=0)
        obj_idx = renderer.load_object(sm.asteroid.target_model_file,
                                       smooth=sm.asteroid.render_smooth_faces)
        algo = AlgorithmBase(sm, renderer, obj_idx)

    metadatafile = os.path.join(dst_path, 'dataset_all.txt')
    if not os.path.exists(metadatafile):
        with open(metadatafile, 'w') as f:
            f.write('\n'.join(['%s, camera centric coordinate frame used' % title,
                               'Image ID, ImageFile, Target Pose [X Y Z W P Q R], Sun Vector [X Y Z]', '', '']))

    files = list(os.listdir(src_path)) if src_imgs is None else src_imgs
    files = sorted(files)

    id = 0
    for i, fn in enumerate(files):
        if src_imgs is not None or re.search(r'(?<!far_)\d{4}\.png$', fn):
            c = 2 if src_imgs is None else 1
            tools.show_progress(len(files)//c, i//c)
            id += 1

            # read system state, write out as relative to s/c
            fname = os.path.basename(fn)
            if src_imgs is None:
                fn = os.path.join(src_path, fn)
            lbl_fn = re.sub(r'_%s(\d{4})' % img_prefix, r'_\1', fn[:-4]) + '.lbl'

            sm.load_state(lbl_fn)
            sm.swap_values_with_real_vals()

            if not crop:
                shutil.copy2(fn, os.path.join(dst_path, fname))
                if os.path.exists(fn[:-4] + '.d.exr'):
                    shutil.copy2(fn[:-4] + '.d.exr', os.path.join(dst_path, fname[:-4] + '.d.exr'))
                if os.path.exists(fn[:-4] + '.xyz.exr'):
                    shutil.copy2(fn[:-4] + '.xyz.exr', os.path.join(dst_path, fname[:-4] + '.xyz.exr'))
                if os.path.exists(fn[:-4] + '.s.exr'):
                    shutil.copy2(fn[:-4] + '.s.exr', os.path.join(dst_path, fname[:-4] + '.s.exr'))
                _write_metadata(metadatafile, id, fname, sm.get_system_scf())
                continue

            from visnav.algo.absnet import AbsoluteNavigationNN

            # read image, detect box, resize, adjust relative pose
            img = cv2.imread(fn, cv2.IMREAD_GRAYSCALE)
            assert img is not None, 'image file %s not found' % fn

            # detect target, get bounds
            x, y, w, h = ImageProc.single_object_bounds(img, threshold=AbsoluteNavigationNN.DEF_LUMINOSITY_THRESHOLD,
                                                        crop_marg=AbsoluteNavigationNN.DEF_CROP_MARGIN,
                                                        min_px=AbsoluteNavigationNN.DEF_MIN_PIXELS, debug=debug)
            if x is None:
                continue

            # write image metadata
            system_scf = sm.get_cropped_system_scf(x, y, w, h)
            _write_metadata(metadatafile, id, fname, system_scf)

            others, (depth, coords, px_size), k = [], [False] * 3, 1
            if os.path.exists(fn[:-4] + '.d.exr'):
                depth = True
                others.append(cv2.imread(fn[:-4] + '.d.exr', cv2.IMREAD_UNCHANGED))
            if os.path.exists(fn[:-4] + '.xyz.exr'):
                coords = True
                others.append(cv2.imread(fn[:-4] + '.xyz.exr', cv2.IMREAD_UNCHANGED))
            if os.path.exists(fn[:-4] + '.s.exr'):
                px_size = True
                others.append(cv2.imread(fn[:-4] + '.s.exr', cv2.IMREAD_UNCHANGED))

            # crop & resize image, write it
            cropped = ImageProc.crop_and_zoom_image(img, x, y, w, h, None, (trg_w, trg_h), others=others)

            cv2.imwrite(os.path.join(dst_path, fname), cropped[0], [cv2.IMWRITE_PNG_COMPRESSION, 9])
            if depth:
                cv2.imwrite(os.path.join(dst_path, fname[:-4] + '.d.exr'), cropped[k],
                            (cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT))
                k += 1
            if coords:
                cv2.imwrite(os.path.join(dst_path, fname[:-4] + '.xyz.exr'), cropped[k],
                            (cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT))
                k += 1
            if px_size:
                cv2.imwrite(os.path.join(dst_path, fname[:-4] + '.s.exr'), cropped[k],
                            (cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT))

            if debug:
                sc, dq = sm.cropped_system_tf(x, y, w, h)

                sm.spacecraft_pos = tools.q_times_v(SystemModel.sc2gl_q.conj(), sc_ast_lf_r)
                sm.rotate_spacecraft(dq)
                #sm.set_cropped_system_scf(x, y, w, h, sc_ast_lf_r, sc_ast_lf_q)

                if False:
                    sm.load_state(lbl_fn)
                    sm.swap_values_with_real_vals()
                    imgd = cv2.resize(img, (trg_h, trg_w))

                imge = algo.render(center=False, depth=False, shadows=True)
                h, w = imge.shape
                imge = cv2.resize(imge[:, (w - h)//2:(w - h)//2+h], cropped[0].shape)
                cv2.imshow('equal?', np.hstack((
                    cropped[0],
                    np.ones((cropped[0].shape[0], 1), dtype=cropped[0].dtype) * 255,
                    imge,
                )))
                cv2.waitKey()

                if i > 60:
                    quit()
Exemplo n.º 7
0
    # Rose - SURF:
    # \* 10 deg, 128kb,
    # \* 12 deg, 512kb,

    sm = RosettaSystemModel(hi_res_shape_model=True)  # rose
    # sm = DidymosSystemModel(hi_res_shape_model=True, use_narrow_cam=True)   # didy
    # sm = DidymosSystemModel(hi_res_shape_model=True, use_narrow_cam=False)  # didw

    # sm.view_width = sm.cam.width
    sm.view_width = 512
    feat = KeypointAlgo.ORB
    fdb_tol = math.radians(11)
    maxmem = 256 * 1024

    re = RenderEngine(sm.view_width, sm.view_height, antialias_samples=0)
    obj_idx = re.load_object(sm.asteroid.real_shape_model,
                             smooth=sm.asteroid.render_smooth_faces)
    fdbgen = FeatureDatabaseGenerator(sm, re, obj_idx)

    if True:
        fdb = fdbgen.generate_fdb(feat,
                                  fdb_tol=fdb_tol,
                                  maxmem=maxmem,
                                  save_progress=True)
    else:
        fname = fdbgen.fdb_fname(feat, fdb_tol, maxmem)
        status, sc_ast_perms, light_perms, fdb = fdbgen.load_fdb(fname)
        print('status: %s' % (status, ))
        #fdbgen.estimate_mem_usage(12, 512, 0.25)
        #quit()
Exemplo n.º 8
0
    elif method == 'akaze':
        feats = {KeypointAlgo.AKAZE: method.upper()}
    elif method == 'sift':
        feats = {KeypointAlgo.SIFT: method.upper()}
    elif method == 'surf':
        feats = {KeypointAlgo.SURF: method.upper()}
    elif method == 'all':
        feats = {
            KeypointAlgo.SIFT:'sift',
            KeypointAlgo.SURF:'surf',
            KeypointAlgo.ORB: 'orb',
            KeypointAlgo.AKAZE:'akaze',
        }

    sm = RosettaSystemModel()
    re = RenderEngine(sm.cam.width, sm.cam.height)
    obj_idx = re.load_object(sm.asteroid.target_model_file)
    d = KeypointAlgo(sm, re, obj_idx)

    for feat, method in feats.items():
        if False:
            kp, desc, detector = d.detect_features(img, feat, 0, nfeats=100)
            out = cv2.drawKeypoints(img, kp, img.copy(), (0, 0, 255), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        else:
            d.FEATURE_FILTERING_RELATIVE_GRID_SIZE = 0.01
            d.FEATURE_FILTERING_FALLBACK_GRID_SIZE = 2
            ee = 0
            if len(sys.argv) <= 3 or sys.argv[3] == '2':
                d.FEATURE_FILTERING_SCHEME = d.FFS_SIMPLE_GRID
                ee = sm.pixel_extent(abs(sm.min_med_distance))
            elif sys.argv[3] == '1':
Exemplo n.º 9
0
from mpl_toolkits.mplot3d import Axes3D  # noqa: F401 unused import

from visnav.algo import tools
from visnav.algo.model import Camera, SystemModel
from visnav.algo.odometry import VisualOdometry, Pose
from visnav.missions.didymos import DidymosSystemModel
from visnav.render.render import RenderEngine
from visnav.settings import *

if __name__ == '__main__':
    sm = DidymosSystemModel(use_narrow_cam=False,
                            target_primary=False,
                            hi_res_shape_model=False)
    sm.cam = Camera(1024, 1024, 20, 20)

    re = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=0)
    re.set_frustum(sm.cam.x_fov, sm.cam.y_fov, 0.002, 2)
    ast_v = np.array([0, 0, -0.3])
    #q = tools.angleaxis_to_q((math.radians(2), 0, 1, 0))
    #q = tools.angleaxis_to_q((math.radians(1), 1, 0, 0))
    #q = tools.rand_q(math.radians(2))
    #lowq_obj = sm.asteroid.load_noisy_shape_model(Asteroid.SM_NOISE_HIGH)
    obj = sm.asteroid.real_shape_model
    obj_idx = re.load_object(obj)
    # obj_idx = re.load_object(sm.asteroid.hires_target_model_file)
    t0 = datetime.now().timestamp()

    odo = VisualOdometry(
        sm,
        sm.cam.width,
        verbose=True,
Exemplo n.º 10
0
def est_refl_model(hapke=True, iters=1, init_noise=0.0, verbose=True):
    sm = RosettaSystemModel()
    imgsize = (512, 512)
    imgs = {
        'ROS_CAM1_20140831T104353': 3.2,  # 60, 3.2s
        'ROS_CAM1_20140831T140853': 3.2,  # 62, 3.2s
        'ROS_CAM1_20140831T103933': 3.2,  # 65, 3.2s
        'ROS_CAM1_20140831T022253': 3.2,  # 70, 3.2s
        'ROS_CAM1_20140821T100719': 2.8,  # 75, 2.8s
        'ROS_CAM1_20140821T200718': 2.0,  # 80, 2.0s
        'ROS_CAM1_20140822T113854': 2.0,  # 85, 2.0s
        'ROS_CAM1_20140823T021833': 2.0,  # 90, 2.0s
        'ROS_CAM1_20140819T120719': 2.0,  # 95, 2.0s
        'ROS_CAM1_20140824T021833': 2.8,  # 100, 2.8s
        'ROS_CAM1_20140824T020853': 2.8,  # 105, 2.8s
        'ROS_CAM1_20140824T103934': 2.8,  # 110, 2.8s
        'ROS_CAM1_20140818T230718': 2.0,  # 113, 2.0s
        'ROS_CAM1_20140824T220434': 2.8,  # 120, 2.8s
        'ROS_CAM1_20140828T020434': 2.8,  # 137, 2.8s
        'ROS_CAM1_20140827T140434': 3.2,  # 145, 3.2s
        'ROS_CAM1_20140827T141834': 3.2,  # 150, 3.2s
        'ROS_CAM1_20140827T061834': 3.2,  # 155, 3.2s
        'ROS_CAM1_20140827T021834': 3.2,  # 157, 3.2s
        'ROS_CAM1_20140826T221834': 2.8,  # 160, 2.8s
    }

    target_exposure = np.min(list(imgs.values()))
    for img, exposure in imgs.items():
        real = cv2.imread(
            os.path.join(sm.asteroid.image_db_path, img + '_P.png'),
            cv2.IMREAD_GRAYSCALE)
        real = ImageProc.adjust_gamma(real, 1 / 1.8)
        #dark_px_lim = np.percentile(real, 0.1)
        #dark_px = np.mean(real[real<=dark_px_lim])
        real = cv2.resize(real, imgsize)
        # remove dark pixel intensity and normalize based on exposure
        #real = real - dark_px
        #real *= (target_exposure / exposure)
        imgs[img] = real

    re = RenderEngine(*imgsize, antialias_samples=0)
    obj_idx = re.load_object(sm.asteroid.hires_target_model_file, smooth=False)
    ab = AlgorithmBase(sm, re, obj_idx)

    model = RenderEngine.REFLMOD_HAPKE if hapke else RenderEngine.REFLMOD_LUNAR_LAMBERT
    defs = RenderEngine.REFLMOD_PARAMS[model]

    if hapke:
        # L, th, w, b (scattering anisotropy), c (scattering direction from forward to back), B0, hs
        #real_ini_x = [515, 16.42, 0.3057, 0.8746]
        sppf_n = 2
        real_ini_x = defs[:2] + defs[3:3 + sppf_n]
        scales = np.array((500, 20, 3e-1, 3e-1))[:2 + sppf_n]
    else:
        ll_poly = 5
        #real_ini_x = np.array(defs[:7])
        real_ini_x = np.array(
            (9.95120e-01, -6.64840e-03, 3.96267e-05, -2.16773e-06, 2.08297e-08,
             -5.48768e-11, 1))  # theta=20
        real_ini_x = np.hstack((real_ini_x[0:ll_poly + 1], (real_ini_x[-1], )))
        scales = np.array((3e-03, 2e-05, 1e-06, 1e-08, 5e-11, 1))
        scales = np.hstack((scales[0:ll_poly], (scales[-1], )))

    def set_params(x):
        if hapke:
            # optimize J, th, w, b, (c), B_SH0, hs
            xsc = list(np.array(x) * scales)
            vals = xsc[:2] + [defs[2]] + xsc[2:] + defs[len(xsc) + 1:]
        else:
            vals = [1] + list(np.array(x)[:-1] * scales[:-1]) + [0] * (
                5 - ll_poly) + [x[-1] * scales[-1], 0, 0, 0]
        RenderEngine.REFLMOD_PARAMS[model] = vals

    # debug 1: real vs synth, 2: err img, 3: both
    def costfun(x, debug=0, verbose=True):
        set_params(x)
        err = 0
        for file, real in imgs.items():
            lblloader.load_image_meta(
                os.path.join(sm.asteroid.image_db_path, file + '.LBL'), sm)
            sm.swap_values_with_real_vals()
            synth2 = ab.render(shadows=True, reflection=model, gamma=1)
            err_img = (synth2.astype('float') - real)**2
            lim = np.percentile(err_img, 99)
            err_img[err_img > lim] = 0
            err += np.mean(err_img)
            if debug:
                if debug % 2:
                    cv2.imshow(
                        'real vs synthetic',
                        np.concatenate((real.astype('uint8'), 255 * np.ones(
                            (real.shape[0], 1), dtype='uint8'), synth2),
                                       axis=1))
                if debug > 1:
                    err_img = err_img**0.2
                    cv2.imshow('err', err_img / np.max(err_img))
                cv2.waitKey()
        err /= len(imgs)
        if verbose:
            print('%s => %f' %
                  (', '.join(['%.4e' % i for i in np.array(x) * scales]), err))
        return err

    best_x = None
    best_err = float('inf')
    for i in range(iters):
        if hapke:
            ini_x = tuple(real_ini_x + init_noise *
                          np.random.normal(0, 1, (len(scales), )) * scales)
        else:
            ini_x = tuple(real_ini_x[1:-1] / real_ini_x[0] + init_noise *
                          np.random.normal(0, 1, (len(scales) - 1, )) *
                          scales[:-1]) + (real_ini_x[-1] * real_ini_x[0], )

        if verbose:
            print('\n\n\n==== i:%d ====\n' % i)
        res = minimize(
            costfun,
            tuple(ini_x / scales),
            args=(0, verbose),
            #method="BFGS", options={'maxiter': 10, 'eps': 1e-3, 'gtol': 1e-3})
            method="Nelder-Mead",
            options={
                'maxiter': 120,
                'xtol': 1e-4,
                'ftol': 1e-4
            })
        #method="COBYLA", options={'rhobeg': 1.0, 'maxiter': 200, 'disp': False, 'catol': 0.0002})
        if not verbose:
            print('%s => %f' %
                  (', '.join(['%.5e' % i
                              for i in np.array(res.x) * scales]), res.fun))

        if res.fun < best_err:
            best_err = res.fun
            best_x = res.x

    if verbose:
        costfun(best_x, 3, verbose=True)

    if hapke:
        x = tuple(best_x * scales)
    else:
        x = (1, ) + tuple(best_x * scales)
        if verbose:
            p = np.linspace(0, 160, 100)
            L = get_graph_L(20, p)
            plt.plot(p, L, p, Lfun(x[:-1], p))
            plt.show()

    return x
Exemplo n.º 11
0
class ApiServer:
    SERVER_READY_NOTICE = 'server started, waiting for connections'
    MAX_ORI_DIFF_ANGLE = 360  # in deg

    (
        FRAME_GLOBAL,
        FRAME_LOCAL,
    ) = range(2)

    def __init__(self,
                 mission,
                 hires=False,
                 addr='127.0.0.1',
                 port=50007,
                 result_rendering=True,
                 result_frame=FRAME_LOCAL):
        self._pid = os.getpid()

        self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._addr = addr
        self._port = port
        self._sock.bind(('', port))
        self._sock.listen(1)
        self._sock.settimeout(5)

        self._result_rendering = result_rendering
        self._result_frame = result_frame
        self._hires = hires

        self._mission = mission
        self._sm = sm = get_system_model(mission, hires)
        self._target_d2 = '2' in mission
        self._use_nac = mission[-1] == 'n'
        self._autolevel = True  # not self._use_nac
        self._current_level = False
        self._level_lambda = 0.9

        if not self._target_d2:
            # so that D2 always contained in frustrum
            sm.max_distance += 1.3
        self._renderer = RenderEngine(sm.cam.width,
                                      sm.cam.height,
                                      antialias_samples=16 if hires else 0)
        self._renderer.set_frustum(sm.cam.x_fov, sm.cam.y_fov,
                                   sm.min_altitude * .1, sm.max_distance)
        if isinstance(sm, DidymosSystemModel):
            self.asteroids = [
                DidymosPrimary(hi_res_shape_model=hires),
                DidymosSecondary(hi_res_shape_model=hires),
            ]
        else:
            self.asteroids = [sm.asteroid]

        self._obj_idxs = []
        self._wireframe_obj_idxs = [
            self._renderer.load_object(os.path.join(
                DATA_DIR, 'ryugu+tex-%s-100.obj' % ast),
                                       wireframe=True) for ast in ('d1', 'd2')
        ] if result_rendering else []

        self._logpath = os.path.join(LOG_DIR, 'api-server', self._mission)
        os.makedirs(self._logpath, exist_ok=True)

        self._onboard_renderer = RenderEngine(sm.view_width,
                                              sm.view_height,
                                              antialias_samples=0)
        self._target_obj_idx = self._onboard_renderer.load_object(
            sm.asteroid.target_model_file,
            smooth=sm.asteroid.render_smooth_faces)
        self._keypoint = KeypointAlgo(sm, self._onboard_renderer,
                                      self._target_obj_idx)
        self._centroid = CentroidAlgo(sm, self._onboard_renderer,
                                      self._target_obj_idx)
        self._odometry = {}

        # laser measurement range given in dlem-20 datasheet
        self._laser_min_range, self._laser_max_range, self._laser_nominal_max_range = 10, 2100, 5000
        self._laser_fail_prob = 0.05  # probability of measurement fail because e.g. asteroid low albedo
        self._laser_false_prob = 0.01  # probability for random measurement even if no target
        self._laser_err_sigma = 0.5  # 0.5m sigma1 accuracy given in dlem-20 datasheet

        # laser algo params
        self._laser_adj_loc_weight = 1
        self._laser_meas_err_weight = 0.3
        self._laser_max_adj_dist = 100  # in meters

    def _maybe_load_objects(self):
        if len(self._obj_idxs) == 0:
            for ast in self.asteroids:
                file = ast.hires_target_model_file if self._hires else ast.target_model_file
                cache_file = os.path.join(
                    CACHE_DIR,
                    os.path.basename(file).split('.')[0] + '.pickle')
                self._obj_idxs.append(
                    self._renderer.load_object(file,
                                               smooth=ast.render_smooth_faces,
                                               cache_file=cache_file))
            self._obj_idxs.append(
                self._renderer.load_object(self._sm.sc_model_file,
                                           smooth=False))

    @staticmethod
    def _parse_poses(params, offset):
        d1_v = np.array(params[offset][:3]) * 0.001
        d1_q = np.quaternion(*(params[offset][3:7])).normalized()
        d2_v = np.array(params[offset + 1][:3]) * 0.001
        d2_q = np.quaternion(*(params[offset + 1][3:7])).normalized()
        sc_v = np.array(params[offset + 2][:3]) * 0.001
        sc_q = np.quaternion(*(params[offset + 2][3:7])).normalized()
        return d1_v, d1_q, d2_v, d2_q, sc_v, sc_q

    def _laser_meas(self, params):
        time = params[0]
        d1_v, d1_q, d2_v, d2_q, sc_v, sc_q = self._parse_poses(params,
                                                               offset=1)

        d1, d2 = self.asteroids
        q = SystemModel.sc2gl_q.conj() * sc_q.conj()
        rel_rot_q = np.array(
            [q * d1_q * d1.ast2sc_q.conj(), q * d2_q * d2.ast2sc_q.conj()])
        rel_pos_v = np.array(
            [tools.q_times_v(q, d1_v - sc_v),
             tools.q_times_v(q, d2_v - sc_v)])

        self._maybe_load_objects()
        dist = self._renderer.ray_intersect_dist(self._obj_idxs[0:2],
                                                 rel_pos_v, rel_rot_q)

        if dist is None:
            if np.random.uniform(0, 1) < self._laser_false_prob:
                noisy_dist = np.random.uniform(self._laser_min_range,
                                               self._laser_nominal_max_range)
            else:
                noisy_dist = None
        else:
            if np.random.uniform(0, 1) < self._laser_fail_prob:
                noisy_dist = None
            else:
                noisy_dist = dist * 1000 + np.random.normal(
                    0, self._laser_err_sigma)
                if noisy_dist < self._laser_min_range or noisy_dist > self._laser_max_range:
                    noisy_dist = None

        return json.dumps(noisy_dist)

    def _laser_algo(self, params):
        time = params[0]
        dist_meas = params[1]
        if not dist_meas or dist_meas < self._laser_min_range or dist_meas > self._laser_max_range:
            raise PositioningException(
                'invalid laser distance measurement: %s' % dist_meas)

        d1_v, d1_q, d2_v, d2_q, sc_v, sc_q = self._parse_poses(params,
                                                               offset=2)

        q = SystemModel.sc2gl_q.conj() * sc_q.conj()
        d1, d2 = self.asteroids
        ast = d2 if self._target_d2 else d1
        ast_v = d2_v if self._target_d2 else d1_v
        ast_q = d2_q if self._target_d2 else d1_q

        rel_rot_q = q * ast_q * ast.ast2sc_q.conj()
        rel_pos_v = tools.q_times_v(q, ast_v - sc_v) * 1000
        max_r = ast.max_radius
        max_diam = 2 * max_r / 1000

        # set orthographic projection
        self._onboard_renderer.set_orth_frustum(max_diam, max_diam,
                                                -max_diam / 2, max_diam / 2)

        # render orthographic depth image
        _, zz = self._onboard_renderer.render(self._target_obj_idx, [0, 0, 0],
                                              rel_rot_q, [1, 0, 0],
                                              get_depth=True,
                                              shadows=False,
                                              textures=False)

        # restore regular perspective projection
        self._onboard_renderer.set_frustum(self._sm.cam.x_fov,
                                           self._sm.cam.y_fov,
                                           self._sm.min_altitude * .1,
                                           self._sm.max_distance)

        zz[zz > max_diam / 2 * 0.999] = float('nan')
        zz = zz * 1000 - rel_pos_v[2]
        xx, yy = np.meshgrid(
            np.linspace(-max_r, max_r, self._sm.view_width) - rel_pos_v[0],
            np.linspace(-max_r, max_r, self._sm.view_height) - rel_pos_v[1])

        x_expected = np.clip(
            (rel_pos_v[0] + max_r) / max_r / 2 * self._sm.view_width + 0.5, 0,
            self._sm.view_width - 1.001)
        y_expected = np.clip(
            (rel_pos_v[1] + max_r) / max_r / 2 * self._sm.view_height + 0.5, 0,
            self._sm.view_height - 1.001)
        dist_expected = tools.interp2(zz,
                                      x_expected,
                                      y_expected,
                                      discard_bg=True)

        # mse cost function balances between adjusted location and measurement error
        adj_dist_sqr = (zz - dist_meas)**2 + xx**2 + yy**2
        cost = self._laser_adj_loc_weight * adj_dist_sqr \
             + (self._laser_meas_err_weight - self._laser_adj_loc_weight) * (zz - dist_meas)**2

        j, i = np.unravel_index(np.nanargmin(cost), cost.shape)
        if np.isnan(zz[j, i]):
            raise PositioningException(
                'laser algo results in off asteroid pointing')
        if math.sqrt(adj_dist_sqr[j, i]) >= self._laser_max_adj_dist:
            raise PositioningException(
                'laser algo solution too far (%.0fm, limit=%.0fm), spurious measurement assumed'
                % (math.sqrt(adj_dist_sqr[j, i]), self._laser_max_adj_dist))

        dx, dy, dz = xx[0, i], yy[j, 0], zz[j, i] - dist_meas

        if self._result_frame == ApiServer.FRAME_GLOBAL:
            # return global ast-sc vector
            est_sc_ast_v = ast_v * 1000 - tools.q_times_v(
                q.conj(), rel_pos_v + np.array([dx, dy, dz]))
        else:
            # return local sc-ast vector
            est_sc_ast_v = tools.q_times_v(SystemModel.sc2gl_q,
                                           rel_pos_v + np.array([dx, dy, dz]))
        dist_expected = float(
            dist_expected) if not np.isnan(dist_expected) else -1.0
        return json.dumps([list(est_sc_ast_v), dist_expected])

    def _render(self, params):
        time = params[0]
        sun_distance = np.linalg.norm(np.array(params[1][:3]))  # in meters
        sun_ast_v = tools.normalize_v(np.array(params[1][:3]))
        d1_v, d1_q, d2_v, d2_q, sc_v, sc_q = self._parse_poses(params,
                                                               offset=2)

        d1, d2 = self.asteroids
        q = SystemModel.sc2gl_q.conj() * sc_q.conj()
        rel_rot_q = np.array([
            q * d1_q * d1.ast2sc_q.conj(), q * d2_q * d2.ast2sc_q.conj(),
            np.quaternion(1, 0, 1, 0).normalized()
        ])  # last one is the for the spacecraft
        rel_pos_v = np.array([
            tools.q_times_v(q, d1_v - sc_v),
            tools.q_times_v(q, d2_v - sc_v), [0, 0, 0]
        ])
        light_v = tools.q_times_v(q, sun_ast_v)

        self._maybe_load_objects()  # lazy load objects

        exp_range = (0.001, 3.5)
        for i in range(20):
            if self._autolevel and self._current_level:
                level = self._current_level
            else:
                level = 3 * 2.5 * 1.3e-3 if self._use_nac else 1.8 * 2.5 * 1.3e-3

            exp, gain = self._sm.cam.level_to_exp_gain(level, exp_range)

            img = TestLoop.render_navcam_image_static(self._sm,
                                                      self._renderer,
                                                      self._obj_idxs,
                                                      rel_pos_v,
                                                      rel_rot_q,
                                                      light_v,
                                                      sc_q,
                                                      sun_distance,
                                                      exposure=exp,
                                                      gain=gain,
                                                      auto_gain=False,
                                                      gamma=1.0,
                                                      use_shadows=True,
                                                      use_textures=True)
            if self._autolevel:
                v = np.percentile(img, 100 - 0.0003)
                level_trg = level * 170 / v
                print(
                    'autolevel (max_v=%.1f, e=%.3f, g=%.1f) current: %.3f, target: %.3f'
                    % (v, exp, gain, level, level_trg))

                self._current_level = level_trg if not self._current_level else \
                        (self._current_level*self._level_lambda + level_trg*(1-self._level_lambda))

                if v < 85 or (v == 255 and level > exp_range[0]):
                    level = level_trg if v < 85 else level * 70 / v
                    self._current_level = level
                    continue
            break

        if False:
            img = ImageProc.default_preprocess(img)

        date = datetime.fromtimestamp(time, pytz.utc)  # datetime.now()
        fname = os.path.join(self._logpath,
                             date.isoformat()[:-6].replace(':', '')) + '.png'
        cv2.imwrite(fname, img, [cv2.IMWRITE_PNG_COMPRESSION, 0])

        return fname

    def _get_pose(self, params, algo_id=1):
        # load target navcam image
        fname = params[0]
        img = cv2.imread(fname, cv2.IMREAD_GRAYSCALE)

        # asteroid position relative to the sun
        self._sm.asteroid.real_position = np.array(params[1][:3])

        d1_v, d1_q, d2_v, d2_q, sc_v, init_sc_q = self._parse_poses(params,
                                                                    offset=2)

        # set asteroid orientation
        d1, d2 = self.asteroids
        ast = d2 if self._target_d2 else d1
        init_ast_q = d2_q if self._target_d2 else d1_q
        self._sm.asteroid_q = init_ast_q * ast.ast2sc_q.conj()

        # set spacecraft orientation
        self._sm.spacecraft_q = init_sc_q

        # initial rel q
        init_rel_q = init_sc_q.conj() * init_ast_q

        # sc-asteroid relative location
        ast_v = d2_v if self._target_d2 else d1_v  # relative to barycenter
        init_sc_pos = tools.q_times_v(
            SystemModel.sc2gl_q.conj() * init_sc_q.conj(), ast_v - sc_v)
        self._sm.spacecraft_pos = init_sc_pos

        # run keypoint algo
        err = None
        rot_ok = True
        try:
            if algo_id == 1:
                self._keypoint.solve_pnp(
                    img,
                    fname[:-4],
                    KeypointAlgo.AKAZE,
                    verbose=1 if self._result_rendering else 0)
            elif algo_id == 2:
                self._centroid.adjust_iteratively(img, fname[:-4])
                rot_ok = False
            else:
                assert False, 'invalid algo_id=%s' % algo_id
        except PositioningException as e:
            err = e

        rel_gf_q = np.quaternion(*([np.nan] * 4))
        if err is None:
            sc_q = self._sm.spacecraft_q

            # resulting sc-ast relative orientation
            rel_lf_q = self._sm.asteroid_q * ast.ast2sc_q if rot_ok else np.quaternion(
                *([np.nan] * 4))
            rel_gf_q = sc_q.conj() * rel_lf_q

            # sc-ast vector in meters
            rel_lf_v = tools.q_times_v(
                SystemModel.sc2gl_q,
                np.array(self._sm.spacecraft_pos) * 1000)
            rel_gf_v = tools.q_times_v(sc_q, rel_lf_v)

            # collect to one result list
            if self._result_frame == ApiServer.FRAME_GLOBAL:
                result = [
                    list(rel_gf_v),
                    list(quaternion.as_float_array(rel_gf_q))
                ]
            else:
                result = [
                    list(rel_lf_v),
                    list(quaternion.as_float_array(rel_lf_q))
                ]

            diff_angle = math.degrees(
                tools.angle_between_q(init_rel_q, rel_gf_q))
            if diff_angle > ApiServer.MAX_ORI_DIFF_ANGLE:
                err = PositioningException(
                    'Result orientation too different than initial one, diff %.1f°, max: %.1f°'
                    % (diff_angle, ApiServer.MAX_ORI_DIFF_ANGLE))

        # render a result image
        if self._result_rendering:
            self._render_result([fname] + [
                list(np.array(self._sm.spacecraft_pos) * 1000) +
                list(quaternion.as_float_array(rel_gf_q))
            ] + [
                list(np.array(init_sc_pos) * 1000) +
                list(quaternion.as_float_array(init_rel_q))
            ])

        if err is not None:
            raise err

        # send back in json format
        return json.dumps(result)

    def _odometry_track(self, params):
        VO_EST_CAM_POSE = False

        session = params[0]

        fname = params[1]
        img = cv2.imread(fname, cv2.IMREAD_GRAYSCALE)

        orig_time = params[2]
        time = datetime.fromtimestamp(orig_time, pytz.utc)

        sun_ast_v = tools.normalize_v(np.array(params[3][:3]))
        d1_v, d1_q, d2_v, d2_q, sc_v, sc_q = self._parse_poses(params,
                                                               offset=4)
        ast_q = d2_q if self._target_d2 else d1_q
        ast_v = d2_v if self._target_d2 else d1_v

        cv2sc_q = SystemModel.cv2gl_q * SystemModel.sc2gl_q.conj()
        sc_ast_cvf_q = cv2sc_q * sc_q.conj() * ast_q * cv2sc_q.conj()
        sc_ast_cvf_v = tools.q_times_v(cv2sc_q * sc_q.conj(),
                                       ast_v - sc_v) * 1000

        if VO_EST_CAM_POSE:
            # still from global to old ast-sc frame
            sc_ast_cvf_q = cv2sc_q * ast_q.conj() * sc_q * cv2sc_q.conj()
            sc_ast_cvf_v = tools.q_times_v(cv2sc_q * ast_q.conj(),
                                           sc_v - ast_v) * 1000


#            sc_ast_cvf_v = tools.q_times_v(sc_ast_cvf_q * cv2sc_q, sc_v - ast_v)

# TODO: modify so that uncertainties are given (or not)
        prior = Pose(sc_ast_cvf_v, sc_ast_cvf_q,
                     np.ones((3, )) * 0.1,
                     np.ones((3, )) * 0.01)

        # tracemalloc.start()
        # current0, peak0 = tracemalloc.get_traced_memory()
        # print("before: %.0fMB" % (current0/1024/1024))
        if session not in self._odometry:
            self._odometry[session] = VisualOdometry(
                self._sm.cam,
                self._sm.view_width * 2,
                verbose=1,
                use_scale_correction=False,
                est_cam_pose=VO_EST_CAM_POSE)
        post, bias_sds, scale_sd = self._odometry[session].process(
            img, time, prior, sc_q)
        # current, peak = tracemalloc.get_traced_memory()
        # print("transient: %.0fMB" % ((peak - current)/1024/1024))
        # tracemalloc.stop()

        if post is not None:
            est_sc_ast_scf_q = cv2sc_q.conj() * post.quat * cv2sc_q
            est_sc_ast_scf_v = tools.q_times_v(cv2sc_q.conj(), post.loc)

            if VO_EST_CAM_POSE:
                # still from old ast-sc frame to local sc-ast frame
                est_sc_ast_scf_q = cv2sc_q.conj() * post.quat.conj() * cv2sc_q
                # NOTE: we use prior orientation instead of posterior one as the error there grows over time
                est_sc_ast_scf_v = -tools.q_times_v(
                    cv2sc_q.conj() * prior.quat.conj(), post.loc)

            if self._result_frame == ApiServer.FRAME_GLOBAL:
                est_sc_ast_scf_q = sc_q * est_sc_ast_scf_q
                est_sc_ast_scf_v = tools.q_times_v(sc_q, est_sc_ast_scf_v)

            # TODO: (1) return in sc local frame
            #   - distance err sd
            #   - lateral err sd
            #   - orientation err sd (pitch & yaw)
            #   - orientation err sd (roll)
            #   - distance bias drift sd
            #   - lateral bias drift sd
            #   - orientation bias drift sd (pitch & yaw)
            #   - orientation bias drift sd (roll)
            #   - scale drift sd
            dist = np.linalg.norm(sc_ast_cvf_v)
            bias_sds = bias_sds * dist
            est_sc_ast_lf_v_s2 = post.loc_s2 * dist
            est_sc_ast_lf_so3_s2 = post.so3_s2

            result = [
                list(est_sc_ast_scf_v),
                list(quaternion.as_float_array(est_sc_ast_scf_q)),
                list(est_sc_ast_lf_v_s2) + list(est_sc_ast_lf_so3_s2) +
                list(bias_sds) + [scale_sd],
                orig_time,
            ]
        else:
            raise PositioningException('No tracking result')

        # send back in json format
        return json.dumps(result)

    def _render_result(self, params):
        fname = params[0]
        img = cv2.imread(fname, cv2.IMREAD_COLOR)

        if np.all(np.logical_not(np.isnan(params[1]))):
            rel_pos_v = np.array(params[1][:3]) * 0.001
            rel_rot_q = np.quaternion(*(params[1][3:7]))
            color = np.array((0, 1, 0)) * 0.6
        else:
            rel_pos_v = np.array(params[2][:3]) * 0.001
            rel_rot_q = np.quaternion(*(params[2][3:7]))
            color = np.array((0, 0, 1)) * 0.6

        # ast_v = np.array(params[1][:3])
        # ast_q = np.quaternion(*(params[1][3:7]))
        # sc_v = np.array(params[2][:3])
        # sc_q = np.quaternion(*(params[2][3:7]))
        #
        ast_idx = 1 if self._target_d2 else 0
        ast = self.asteroids[ast_idx]
        # q = SystemModel.sc2gl_q.conj() * sc_q.conj()
        # rel_rot_q = q * ast_q * ast.ast2sc_q.conj()
        # rel_pos_v = tools.q_times_v(q, ast_v - sc_v) * 0.001

        rel_rot_q = SystemModel.sc2gl_q.conj() * rel_rot_q * ast.ast2sc_q.conj(
        )
        #rel_pos_v = tools.q_times_v(SystemModel.sc2gl_q.conj(), rel_pos_v) * 0.001

        overlay = self._renderer.render_wireframe(
            self._wireframe_obj_idxs[ast_idx], rel_pos_v, rel_rot_q, color)
        overlay = cv2.resize(overlay, (img.shape[1], img.shape[0]))

        blend_coef = 0.6
        alpha = np.zeros(list(img.shape[:2]) + [1])
        alpha[np.any(overlay > 0, axis=2)] = blend_coef
        result = (overlay * alpha + img * (1 - alpha)).astype('uint8')

        fout = fname[:-4] + '-res.png'
        cv2.imwrite(fout, result, [cv2.IMWRITE_PNG_COMPRESSION, 9])

        return fout

    def _handle(self, call):
        if len(call) == 0:
            return None
        if call == 'quit':
            raise QuitException()
        elif call == 'ping':
            return 'pong'

        error = False
        rval = False

        idx = call.find(' ')
        mission, command = (call[:idx] if idx >= 0 else call).split('|')
        if mission != self._mission:
            assert False, 'wrong mission for this server instance, expected %s but got %s, command was %s' % (
                self._mission, mission, command)

        params = []
        try:
            params = json.loads(call[idx + 1:]) if idx >= 0 else []
        except JSONDecodeError as e:
            error = 'Invalid parameters: ' + str(e) + ' "' + call[idx +
                                                                  1:] + '"'

        if command == 'quit':
            raise QuitException()

        last_exception = None
        if not error:
            try:
                get_pose = re.match(r'^get_pose(\d+)$', command)
                if get_pose:
                    try:
                        rval = self._get_pose(params, algo_id=int(get_pose[1]))
                    except PositioningException as e:
                        error = 'algo failed: ' + str(e)
                elif command == 'odometry':
                    try:
                        rval = self._odometry_track(params)
                    except PositioningException as e:
                        error = str(e)
                elif command == 'render':
                    rval = self._render(params)
                elif command == 'laser_meas':
                    # return a laser measurement
                    rval = self._laser_meas(params)
                elif command == 'laser_algo':
                    # return new sc-target vector based on laser measurement
                    try:
                        rval = self._laser_algo(params)
                    except PositioningException as e:
                        error = 'algo failed: ' + str(e)
                else:
                    error = 'invalid command: ' + command
            except (ValueError, TypeError) as e:
                error = 'invalid args: ' + str(e)
                self.print(
                    str(e) +
                    ''.join(traceback.format_exception(*sys.exc_info())))
            except Exception as e:
                last_exception = ''.join(
                    traceback.format_exception(*sys.exc_info()))
                self.print('Exception: %s' % last_exception)

        if last_exception is not None:
            error = 'Exception encountered: %s' % last_exception

        out = ' '.join((('0' if error else '1'), ) +
                       ((error, ) if error else (rval, ) if rval else tuple()))
        return out

    def listen(self):
        # main loop here
        self.print('%s on port %d' % (self.SERVER_READY_NOTICE, self._port))
        while True:
            # outer loop accepting connections (max 1)
            try:
                conn, addr = self._sock.accept()
                try:
                    with conn:
                        while True:
                            # inner loop accepting multiple requests on same connection
                            req = self._receive(conn).strip(' \n\r\t')
                            if req != '':
                                for call in req.split('\n'):
                                    # in case multiple calls in one request
                                    out = self._handle(call.strip(' \n\r\t'))
                                    if out is not None:
                                        out = out.strip(' \n\r\t') + '\n'
                                        conn.sendall(out.encode('utf-8'))
                except ConnectionAbortedError:
                    self.print('client closed the connection')
            except socket.timeout:
                pass

    def print(self, msg, start=False, finish=False):
        prefix = '%s [%d]' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                              self._pid)
        if start:
            print(' '.join((prefix, msg)), end='', flush=True)
        elif finish:
            print(msg)
        else:
            print(' '.join((prefix, msg)))

    def _receive(self, conn):
        chunks = []
        buffer_size = 1024
        while True:
            try:
                chunk = conn.recv(buffer_size)
                chunks.append(chunk)
                if chunk == b'':
                    if len(chunks) > 1:
                        break
                    else:
                        raise ConnectionAbortedError()
                elif len(chunk) < buffer_size:
                    break
            except socket.timeout:
                pass

        return (b''.join(chunks)).decode('utf-8')

    def close(self):
        self._sock.close()
Exemplo n.º 12
0

if __name__ == '__main__':
    sm = RosettaSystemModel(rosetta_batch='mtp006')
    #img = 'ROS_CAM1_20140822T020718'   # 006 # default
    img = 'ROS_CAM1_20140819T210718'  # 006
    #img = 'ROS_CAM1_20150606T213002'   # 017

    lblloader.load_image_meta(
        os.path.join(sm.asteroid.image_db_path, img + '.LBL'), sm)
    sm.swap_values_with_real_vals(),

    compare = True
    textures = True
    if False:
        re = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=16)
        #obj_idx = re.load_object(sm.asteroid.target_model_file, smooth=False)
        obj_idx = re.load_object(sm.asteroid.hires_target_model_file,
                                 smooth=False)
        quit()
        #obj_idx = re.load_object(os.path.join(DATA_DIR, 'original-shapemodels/CSHP_DV_130_01_HIRES_00200.obj'), smooth=False)
        #obj_idx = re.load_object(os.path.join(DATA_DIR, 'original-shapemodels/dissolved_5deg_1.obj'), smooth=False)
        #obj_idx = re.load_object(os.path.join(DATA_DIR, 'original-shapemodels/67P_C-G_shape_model_MALMER_2015_11_20-in-km.obj'), smooth=False)
        textures = False
    else:
        re = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=0)
        fname = sm.asteroid.constant_noise_shape_model[
            '']  # ''=>17k, 'lo'=>4k, 'hi'=>1k
        with open(fname, 'rb') as fh:
            noisy_model, sm_noise = pickle.load(fh)
        obj_idx = re.load_object(objloader.ShapeModel(data=noisy_model),
Exemplo n.º 13
0
def render_itokawa(show=False):
    cam = Camera(4096,
                 4096,
                 aperture=1.5,
                 focal_length=120.8,
                 sensor_size=[12.288] * 2,
                 px_saturation_e=30e3)
    shape_model_file = [
        r'C:\projects\sispo\data\models\itokawa_16k.obj',
        r'C:\projects\sispo\data\models\itokawa_f3145728.obj',
    ][0]

    RenderEngine.REFLMOD_PARAMS[RenderEngine.REFLMOD_HAPKE] = [
        553.38,  # J         0
        26,  # th_p      26
        0.31,  # w         0.31
        -0.35,  # b         -0.35
        0,  # c         0
        0.86,  # B_SH0     0.86
        0.00021,  # hs        0.00021
        0,  # B_CB0     0
        0.005,  # hc        0.005
        1,  # K         1
    ]

    re = RenderEngine(cam.width, cam.height,
                      antialias_samples=0)  #, enable_extra_data=True)
    re.set_frustum(cam.x_fov, cam.y_fov, 0.05, 12)
    obj_idx = re.load_object(shape_model_file)

    if 1:
        g_sc_q = tools.angleaxis_to_q(
            [1.847799, -0.929873, 0.266931, -0.253146])
        #x, y, z = -13.182141, -64.694813, 116.263134
        x, y, z = 93.818, -8.695, 1.263
        g_ast_q = get_ast_q(x, y, z)
        g_sol_ast_v = np.array([146226194732.0, -68812326932.0, -477863381.0
                                ]) * 1e-3
        g_sol_sc_v = np.array([146226188132.0, -68812322442.0, -477863711.0
                               ]) * 1e-3
        g_sc_ast_v = g_sol_ast_v - g_sol_sc_v
        l_ast_sc_v = tools.q_times_v(
            SystemModel.sc2gl_q.conj() * g_sc_q.conj(), g_sc_ast_v)
        l_ast_q = SystemModel.sc2gl_q.conj() * g_sc_q.conj(
        ) * g_ast_q * SystemModel.sc2gl_q
        l_light_v = tools.q_times_v(SystemModel.sc2gl_q.conj() * g_sc_q.conj(),
                                    g_sol_ast_v / np.linalg.norm(g_sol_ast_v))
    else:
        l_ast_sc_v = np.array([0, 0, -7.990 * 1])
        l_ast_q = tools.angleaxis_to_q((math.radians(20), 0, 1, 0))
        l_light_v = np.array([1, 0, -1]) / math.sqrt(2)

    sc_mode = 0
    a, b, c = [0] * 3
    ds, dq = 0.135, tools.ypr_to_q(math.radians(1.154), 0,
                                   math.radians(-5.643))
    #    ds, dq = 0.535, quaternion.one     # itokawa centered

    while True:
        img = re.render(obj_idx,
                        tools.q_times_v(dq, l_ast_sc_v * ds),
                        l_ast_q,
                        l_light_v,
                        flux_density=1.0,
                        gamma=1.0,
                        get_depth=False,
                        shadows=True,
                        textures=True,
                        reflection=RenderEngine.REFLMOD_HAPKE)

        #        data = re.render_extra_data(obj_idx, tools.q_times_v(dq, l_ast_sc_v*ds), l_ast_q, l_light_v)
        #        import matplotlib.pyplot as plt
        #        plt.imshow(data[:, :, 0])

        k = output(img, show, maxval=0.90)

        if k is None or k == 27:
            break

        tmp = 1 if k in (ord('a'), ord('s'), ord('q')) else -1
        if k in (ord('a'), ord('d')):
            if sc_mode:
                dq = tools.ypr_to_q(math.radians(tmp * 0.033), 0, 0) * dq
            else:
                b += tmp
        if k in (ord('w'), ord('s')):
            if sc_mode:
                dq = tools.ypr_to_q(0, 0, math.radians(tmp * 0.033)) * dq
            else:
                a += tmp
        if k in (ord('q'), ord('e')):
            if sc_mode:
                ds *= 0.9**tmp
            else:
                c += tmp
        if k == ord('i'):
            y, p, r = tools.q_to_ypr(dq)
            print('+c: %.3f, %.3f, %.3f' % (x + a, y + b, z + c))
            print('ds, h, v: %.3f, %.3f, %.3f' %
                  (ds, math.degrees(y), math.degrees(r)))

        g_ast_q = get_ast_q(x + a, y + b, z + c)
        l_ast_q = SystemModel.sc2gl_q.conj() * g_sc_q.conj(
        ) * g_ast_q * SystemModel.sc2gl_q
Exemplo n.º 14
0
    def __init__(self):
        super(Window, self).__init__()
        sm = RosettaSystemModel(rosetta_batch='mtp006')
        sample_img = 'ROS_CAM1_20140822T020718'
        sm.asteroid.sample_image_file = os.path.join(sm.asteroid.image_db_path,
                                                     sample_img + '_P.png')
        sm.asteroid.sample_image_meta_file = os.path.join(
            sm.asteroid.image_db_path, sample_img + '.LBL')

        self.systemModel = sm
        self.glWidget = GLWidget(self.systemModel, parent=self)
        self.closing = []

        # so that can run algorithms as a batch from different thread
        def tsRunHandler(f):
            fun, args, kwargs = f[0], f[1] or [], f[2] or {}
            self.tsRunResult = fun(*args, **kwargs)

        self.tsRun.connect(tsRunHandler)
        self.tsRunResult = None, float('nan')

        self.sliders = dict((n, self.slider(p))
                            for n, p in self.systemModel.get_params(all=True))

        topLayout = QHBoxLayout()
        topLayout.addWidget(self.glWidget)
        topLayout.addWidget(self.sliders['x_off'])
        topLayout.addWidget(self.sliders['y_off'])
        topLayout.addWidget(self.sliders['z_off'])
        topLayout.addWidget(self.sliders['x_rot'])
        topLayout.addWidget(self.sliders['y_rot'])
        topLayout.addWidget(self.sliders['z_rot'])
        topLayout.addWidget(self.sliders['ast_x_rot'])
        topLayout.addWidget(self.sliders['ast_y_rot'])
        topLayout.addWidget(self.sliders['ast_z_rot'])
        topLayout.addWidget(self.sliders['time'])

        bottomLayout = QHBoxLayout()
        render_engine = RenderEngine(sm.view_width, sm.view_width)
        obj_idx = render_engine.load_object(
            os.path.join(DATA_DIR, '67p-4k.obj'))
        self.phasecorr = PhaseCorrelationAlgo(sm, render_engine, obj_idx)
        self.keypoint = KeypointAlgo(sm, render_engine, obj_idx)
        self.centroid = CentroidAlgo(sm, render_engine, obj_idx)
        self.mixed = MixedAlgo(self.centroid, self.keypoint)

        self.buttons = dict((m.lower(), self.optbutton(m, bottomLayout))
                            for m in (
                                'Simplex',
                                #                'Powell',
                                'COBYLA',
                                #                'CG',
                                #                'BFGS',
                                'Anneal',
                                'Brute',
                            ))

        self.infobtn = QPushButton('Info', self)
        self.infobtn.clicked.connect(lambda: self.printInfo())
        bottomLayout.addWidget(self.infobtn)

        #        self.zoombtn = QPushButton('+', self)
        #        self.zoombtn.clicked.connect(lambda: self.glWidget.setImageZoomAndResolution(
        #                im_xoff=300, im_yoff=180,
        #                im_width=512, im_height=512, im_scale=1))
        #        bottomLayout.addWidget(self.zoombtn)
        #
        #        self.defviewbtn = QPushButton('=', self)
        #        self.defviewbtn.clicked.connect(lambda: self.glWidget.setImageZoomAndResolution(
        #                im_xoff=0, im_yoff=0,
        #                im_width=1024, im_height=1024, im_scale=0.5))
        #        bottomLayout.addWidget(self.defviewbtn)

        def testfun1():
            try:
                self.centroid.adjust_iteratively(self.glWidget.image_file,
                                                 LOG_DIR)
            except PositioningException as e:
                print('algorithm failed: %s' % e)

        self.test1 = QPushButton('Ctrd', self)
        self.test1.clicked.connect(testfun1)
        bottomLayout.addWidget(self.test1)

        def testfun2():
            #self.glWidget.saveViewToFile('testimg.png')
            try:
                init_z = self.systemModel.z_off.value
                self.keypoint.solve_pnp(self.glWidget.image_file,
                                        LOG_DIR,
                                        init_z=init_z)
            except PositioningException as e:
                print('algorithm failed: %s' % e)

        self.test2 = QPushButton('KP', self)
        self.test2.clicked.connect(testfun2)
        bottomLayout.addWidget(self.test2)

        # test lunar lambert rendering
        ab = AlgorithmBase(sm, render_engine, obj_idx)

        def testfun3():
            def testfun3i():
                image = ab.render()
                cv2.imshow('lunar-lambert', image)

            try:
                _thread.start_new_thread(testfun3i, tuple(), dict())
            except Exception as e:
                print('failed: %s' % e)

        self.test3 = QPushButton('LL', self)
        self.test3.clicked.connect(testfun3)
        bottomLayout.addWidget(self.test3)

        mainLayout = QVBoxLayout()
        mainLayout.addLayout(topLayout)
        mainLayout.addLayout(bottomLayout)
        self.setLayout(mainLayout)
        self.setWindowTitle("Hello 67P/C-G")
        self.adjustSize()
Exemplo n.º 15
0
        scattering_coef=2e-10,  # affects strength of haze/veil when sun shines on the lens
        **common_kwargs
    )


if __name__ == '__main__':
    import sys

    if len(sys.argv) > 1 and sys.argv[1] == 'record':
        from visnav.algo.model import SystemModel
        from visnav.missions.didymos import DidymosSystemModel
        from visnav.render.render import RenderEngine
        from visnav.settings import *

        sm = DidymosSystemModel(use_narrow_cam=False, target_primary=False, hi_res_shape_model=True)
        re = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=0)
        re.set_frustum(sm.cam.x_fov, sm.cam.y_fov, 0.05, 2)
        obj = sm.asteroid.real_shape_model
        obj_idx = re.load_object(obj)

        light = np.array([1, 0, -0.5])
        light /= np.linalg.norm(light)
        cam_ast_v0 = np.array([0, 0, -sm.min_med_distance * 0.7])
        cam_ast_q0 = quaternion.one
        dq = tools.angleaxis_to_q((math.radians(1), 0, 1, 0))

        inputs = []
        for i in range(60):
            cam_ast_v = cam_ast_v0
            cam_ast_q = dq**i * cam_ast_q0
            image = re.render(obj_idx, cam_ast_v, cam_ast_q, light, gamma=1.8, get_depth=False)
Exemplo n.º 16
0
def main(outfile='spl-test.log', img_path='67p-imgs', config=None):
    logger.info('Setting up renderer and loading the 3d model...')
    tconfig = config or {}
    config = {
        'verbose': 1,
        'feat': KeypointAlgo.AKAZE,
        'v16k': False,
        'view_width': 384
    }
    config.update(tconfig)
    v16k = config.pop('v16k')
    view_width = config.pop('view_width')

    sm = RosettaSystemModel(hi_res_shape_model=False,
                            skip_obj_load=True,
                            res_mult=512 / 1024,
                            view_width=view_width)

    re = RenderEngine(sm.view_width, sm.view_height, antialias_samples=0)
    re.set_frustum(sm.cam.x_fov, sm.cam.y_fov, 0.1 * sm.min_distance,
                   1.1 * sm.max_distance)
    obj_path = os.path.join(os.path.dirname(__file__), 'data',
                            '67p-16k.obj' if v16k else '67p-4k.obj')
    obj_idx = re.load_object(obj_path)

    spl = KeypointAlgo(sm, re, obj_idx)
    spl.RENDER_TEXTURES = False

    img_path = os.path.join(os.path.dirname(__file__), 'data', img_path)
    imgfiles = sorted([
        fname for fname in os.listdir(img_path)
        if fname[-4:].lower() in ('.jpg', '.png')
    ])

    with open(outfile, 'w') as file:
        file.write(' '.join(sys.argv) + '\n' + '\t'.join(log_columns()) + '\n')

    # loop through some test images
    for i, fname in enumerate(imgfiles):
        imgfile = os.path.join(img_path, fname)

        lblfile = imgfile[:-4] + '.lbl'
        if not os.path.exists(lblfile):
            lblfile = imgfile[:-6] + '.lbl'

        # load a noisy system state
        sm.load_state(lblfile, sc_ast_vertices=False)
        initial = {
            'time': sm.time.value,
            'ast_axis': sm.asteroid_axis,
            'sc_rot': sm.spacecraft_rot,
            'sc_pos': sm.spacecraft_pos,
            'ast_pos': sm.asteroid.real_position,
        }

        # get result and log result stats
        try:
            spl.solve_pnp(imgfile, None, scale_cam_img=True, **config)
            ok = True
        except PositioningException as e:
            ok = False
        rtime = spl.timer.elapsed

        # calculate results
        results = calculate_result(sm, spl, fname, ok, initial)

        # write log entry
        write_log_entry(outfile, i, rtime, 0.0, *results)
Exemplo n.º 17
0
    def __init__(self,
                 mission,
                 hires=False,
                 addr='127.0.0.1',
                 port=50007,
                 result_rendering=True,
                 result_frame=FRAME_LOCAL):
        self._pid = os.getpid()

        self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._addr = addr
        self._port = port
        self._sock.bind(('', port))
        self._sock.listen(1)
        self._sock.settimeout(5)

        self._result_rendering = result_rendering
        self._result_frame = result_frame
        self._hires = hires

        self._mission = mission
        self._sm = sm = get_system_model(mission, hires)
        self._target_d2 = '2' in mission
        self._use_nac = mission[-1] == 'n'
        self._autolevel = True  # not self._use_nac
        self._current_level = False
        self._level_lambda = 0.9

        if not self._target_d2:
            # so that D2 always contained in frustrum
            sm.max_distance += 1.3
        self._renderer = RenderEngine(sm.cam.width,
                                      sm.cam.height,
                                      antialias_samples=16 if hires else 0)
        self._renderer.set_frustum(sm.cam.x_fov, sm.cam.y_fov,
                                   sm.min_altitude * .1, sm.max_distance)
        if isinstance(sm, DidymosSystemModel):
            self.asteroids = [
                DidymosPrimary(hi_res_shape_model=hires),
                DidymosSecondary(hi_res_shape_model=hires),
            ]
        else:
            self.asteroids = [sm.asteroid]

        self._obj_idxs = []
        self._wireframe_obj_idxs = [
            self._renderer.load_object(os.path.join(
                DATA_DIR, 'ryugu+tex-%s-100.obj' % ast),
                                       wireframe=True) for ast in ('d1', 'd2')
        ] if result_rendering else []

        self._logpath = os.path.join(LOG_DIR, 'api-server', self._mission)
        os.makedirs(self._logpath, exist_ok=True)

        self._onboard_renderer = RenderEngine(sm.view_width,
                                              sm.view_height,
                                              antialias_samples=0)
        self._target_obj_idx = self._onboard_renderer.load_object(
            sm.asteroid.target_model_file,
            smooth=sm.asteroid.render_smooth_faces)
        self._keypoint = KeypointAlgo(sm, self._onboard_renderer,
                                      self._target_obj_idx)
        self._centroid = CentroidAlgo(sm, self._onboard_renderer,
                                      self._target_obj_idx)
        self._odometry = {}

        # laser measurement range given in dlem-20 datasheet
        self._laser_min_range, self._laser_max_range, self._laser_nominal_max_range = 10, 2100, 5000
        self._laser_fail_prob = 0.05  # probability of measurement fail because e.g. asteroid low albedo
        self._laser_false_prob = 0.01  # probability for random measurement even if no target
        self._laser_err_sigma = 0.5  # 0.5m sigma1 accuracy given in dlem-20 datasheet

        # laser algo params
        self._laser_adj_loc_weight = 1
        self._laser_meas_err_weight = 0.3
        self._laser_max_adj_dist = 100  # in meters
Exemplo n.º 18
0
if __name__ == '__main__':
    size = (1024, 1024)  # (256, 256)
    imgs = [None]*6

    sm = RosettaSystemModel()
    #img = 'ROS_CAM1_20140823T021833'
    img = 'ROS_CAM1_20140822T020718'

    real = cv2.imread(os.path.join(sm.asteroid.image_db_path, img + '_P.png'), cv2.IMREAD_GRAYSCALE)
    imgs[0] = cv2.resize(real, size)

    lblloader.load_image_meta(os.path.join(sm.asteroid.image_db_path, img + '.LBL'), sm)
    sm.swap_values_with_real_vals()

    render_engine = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=16)
    obj_idx = render_engine.load_object(sm.asteroid.hires_target_model_file, smooth=sm.asteroid.render_smooth_faces)
    ab = AlgorithmBase(sm, render_engine, obj_idx)
    imgs[1] = cv2.resize(ab.render(shadows=True, reflection=RenderEngine.REFLMOD_HAPKE), size)
    imgs[2] = np.zeros_like(imgs[1])

    render_engine = RenderEngine(sm.view_width, sm.view_height, antialias_samples=0)
    obj_idx = render_engine.load_object(sm.asteroid.target_model_file, smooth=sm.asteroid.render_smooth_faces)
    ab = AlgorithmBase(sm, render_engine, obj_idx)

    def render(fname):
        with open(fname, 'rb') as fh:
            noisy_model, _loaded_sm_noise = pickle.load(fh)
        render_engine.load_object(objloader.ShapeModel(data=noisy_model), obj_idx, smooth=sm.asteroid.render_smooth_faces)
        return cv2.resize(ab.render(shadows=True, reflection=RenderEngine.REFLMOD_LUNAR_LAMBERT), size)
Exemplo n.º 19
0
def match_ll_with_hapke(img_n=20,
                        iters=1,
                        init_noise=0.0,
                        verbose=True,
                        hapke_params=None,
                        ini_params=None):
    m_ll = RenderEngine.REFLMOD_LUNAR_LAMBERT
    m_hapke = RenderEngine.REFLMOD_HAPKE

    if hapke_params is not None:
        RenderEngine.REFLMOD_PARAMS[m_hapke] = hapke_params

    re = RenderEngine(512, 512)
    re.set_frustum(5, 5, 25 * 0.5, 1250)
    obj = ShapeModel(fname=os.path.join(DATA_DIR, 'test-ball.obj'))
    obj_idx = re.load_object(obj)
    pos = [0, 0, -70 * 0.8 * 2]

    ll_poly = 5
    real_ini_x = np.array(RenderEngine.REFLMOD_PARAMS[m_ll]
                          [:7]) if ini_params is None else ini_params[:7]
    real_ini_x = np.hstack((real_ini_x[0:ll_poly + 1], (real_ini_x[-1], )))
    scales = np.array((3e-03, 2e-05, 1e-06, 1e-08, 5e-11, 1))
    scales = np.hstack((scales[0:ll_poly], (scales[-1], )))

    def set_params(x):
        vals = [1] + list(
            np.array(x)[:-1] *
            scales[:-1]) + [0] * (5 - ll_poly) + [x[-1] * scales[-1], 0, 0, 0]
        RenderEngine.REFLMOD_PARAMS[m_ll] = vals

    # debug 1: real vs synth, 2: err img, 3: both
    def costfun(x, debug=0, verbose=True):
        set_params(x)
        err = 0

        for phase_angle in np.radians(np.linspace(0, 150, img_n)):
            light = tools.q_times_v(tools.ypr_to_q(phase_angle, 0, 0),
                                    np.array([0, 0, -1]))
            synth1 = re.render(obj_idx,
                               pos,
                               np.quaternion(1, 0, 0, 0),
                               tools.normalize_v(light),
                               get_depth=False,
                               reflection=m_hapke)
            synth2 = re.render(obj_idx,
                               pos,
                               np.quaternion(1, 0, 0, 0),
                               tools.normalize_v(light),
                               get_depth=False,
                               reflection=m_ll)

            err_img = (synth1.astype('float') - synth2.astype('float'))**2
            err += np.mean(err_img)
            if debug:
                if debug % 2:
                    cv2.imshow(
                        'hapke vs ll',
                        np.concatenate((synth1.astype('uint8'), 255 * np.ones(
                            (synth2.shape[0], 1), dtype='uint8'), synth2),
                                       axis=1))
                if debug > 1:
                    err_img = err_img**0.2
                    cv2.imshow('err', err_img / np.max(err_img))
                cv2.waitKey()
        err /= img_n
        if verbose:
            print('%s => %f' %
                  (', '.join(['%.4e' % i for i in np.array(x) * scales]), err))
        return err

    best_x = None
    best_err = float('inf')
    for i in range(iters):
        ini_x = tuple(real_ini_x[1:-1] / real_ini_x[0] + init_noise *
                      np.random.normal(0, 1, (len(scales) - 1, )) *
                      scales[:-1]) + (real_ini_x[-1] * real_ini_x[0], )

        if verbose:
            print('\n\n\n==== i:%d ====\n' % i)
        res = minimize(
            costfun,
            tuple(ini_x / scales),
            args=(0, verbose),
            #method="BFGS", options={'maxiter': 10, 'eps': 1e-3, 'gtol': 1e-3})
            method="Nelder-Mead",
            options={
                'maxiter': 120,
                'xtol': 1e-4,
                'ftol': 1e-4
            })
        #method="COBYLA", options={'rhobeg': 1.0, 'maxiter': 200, 'disp': False, 'catol': 0.0002})
        if not verbose:
            print('%s => %f' %
                  (', '.join(['%.5e' % i
                              for i in np.array(res.x) * scales]), res.fun))

        if res.fun < best_err:
            best_err = res.fun
            best_x = res.x

    if verbose:
        costfun(best_x, 3, verbose=True)

    x = (1, ) + tuple(best_x * scales)
    if verbose:
        p = np.linspace(0, 160, 100)
        L = get_graph_L(20, p)
        plt.plot(p, L, p, Lfun(x[:-1], p))
        plt.show()

    return x
Exemplo n.º 20
0
class RenderScene(RenderAbstractObject):
    STAR_DB_URL = 'https://drive.google.com/uc?authuser=0&id=1-_7KAMKc4Xio0RbpiVWmcPSNyuuN8Z2b&export=download'
    STAR_DB = Path(os.path.join(os.path.dirname(__file__), '..', 'data', 'deep_space_objects.sqlite'))

    def __init__(self, name, render_dir, stars=True, lens_effects=False, flux_only=False, normalize=False,
                 hapke_params=RenderObject.HAPKE_PARAMS, verbose=True, debug=False):

        super().__init__(name)
        self._samples = 1
        self._width = None
        self._height = None

        self._render_dir = str(render_dir)
        self._file_format = None
        self._color_depth = None
        self._use_preview = None

        self._cams = {}
        self._objs = {}
        self._sun_loc = None
        self._renderer = None

        self.object_scale = 1000   # objects given in km, locations expected in meters
        self.flux_only = flux_only
        self.normalize = normalize
        self.stars = stars
        self.lens_effects = lens_effects
        self.hapke_params = hapke_params
        self.verbose = verbose
        self.debug = debug

    @property
    def width(self):
        return self._width

    @property
    def height(self):
        return self._height

    def is_dirty(self):
        return super().is_dirty() or np.any([i is None for i, o in self._objs.values()])

    def prepare(self):
        self._check_params()

        if self.is_dirty():
            if self._renderer is not None:
                del self._renderer

            self._renderer = RenderEngine(self._width, self._height, antialias_samples=self._samples)
            if self.verbose:
                print('loading objects to engine...', end='', flush=True)

            for name, (_, obj) in self._objs.items():
                idx = self._renderer.load_object(obj.model)
                self._objs[name][0] = idx

            self.clear_dirty()
            if self.verbose:
                print('done')
        if self.stars:
            if not os.path.exists(RenderScene.STAR_DB):
                if self.verbose:
                    print('downloading star catalog...', end='', flush=True)
                RenderController.download_file(RenderScene.STAR_DB_URL, RenderScene.STAR_DB)
                if self.verbose:
                    print('done')

    def render(self, name_suffix):
        self.prepare()
        for i, o in self._objs.values():
            o.prepare(self)
        for c in self._cams.values():
            c.prepare(self)

        sun_sc_v = np.mean(np.array([o.loc - self._sun_loc for _, o in self._objs.values()]).reshape((-1, 3)), axis=0)
        sun_distance = np.linalg.norm(sun_sc_v)
        obj_idxs = [i for i, o in self._objs.values()]

        for cam_name, c in self._cams.items():
            rel_pos_v = {}
            rel_rot_q = {}
            for i, o in self._objs.values():
                rel_pos_v[i] = tools.q_times_v(c.q.conj(), o.loc - c.loc)
                rel_rot_q[i] = c.q.conj() * o.q

            # make sure correct order, correct scale
            rel_pos_v = [rel_pos_v[i]/self.object_scale for i in obj_idxs]
            rel_rot_q = [rel_rot_q[i] for i in obj_idxs]
            light_v = tools.q_times_v(c.q.conj(), tools.normalize_v(sun_sc_v))

            self._renderer.set_frustum(c.model.x_fov, c.model.y_fov, c.frustum_near, c.frustum_far)
            flux = TestLoop.render_navcam_image_static(None, self._renderer, obj_idxs, rel_pos_v, rel_rot_q,
                                                       light_v, c.q, sun_distance, cam=c.model, auto_gain=False,
                                                       use_shadows=True, use_textures=True, fluxes_only=True,
                                                       stars=self.stars, lens_effects=self.lens_effects,
                                                       reflmod_params=self.hapke_params, star_db=RenderScene.STAR_DB)

            image = flux if self.flux_only else c.model.sense(flux, exposure=c.exposure, gain=c.gain)

            if self.normalize:
                image /= np.max(image)

            if self.debug:
                sc = 1536/image.shape[0]
                img = cv2.resize(image, None, fx=sc, fy=sc) / (np.max(image) if self.flux_only else 1)
                cv2.imshow('result', img)
                cv2.waitKey()

            # save image
            self._save_img(image, cam_name, name_suffix)

    def _check_params(self):
        assert self._sun_loc is not None, 'Sun location not set for scene %s' % self.name
        assert self._width is not None, 'Common camera resolution width not set for scene %s' % self.name
        assert self._height is not None, 'Common camera resolution height not set for scene %s' % self.name
        assert self._file_format is not None, 'Output file format not set for scene %s' % self.name
        assert self._color_depth is not None, 'Output file format not set for scene %s' % self.name
        assert self._use_preview is not None, 'Output file format not set for scene %s' % self.name
        assert len(self._cams) > 0, 'Scene %s does not have any cameras' % self.name
        assert len(self._objs) > 0, 'Scene %s does not have any objects' % self.name

    def _save_img(self, image, cam_name, name_suffix):
        file_ext = '.exr' if self._file_format == RenderController.FORMAT_EXR else '.png'
        filename = os.path.join(self._render_dir, self.name + "_" + cam_name + "_" + name_suffix + file_ext)

        if self._file_format == RenderController.FORMAT_PNG:
            maxval = self._color_depth ** 2 - 1
            image = np.clip(image * maxval, 0, maxval).astype('uint' + str(self._color_depth))
            cv2.imwrite(filename, image)
        else:
            cv2.imwrite(filename, image.astype(np.float32), (cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT))

    def set_samples(self, samples):
        supported = (1, 4, 9, 16)
        assert samples in supported, '%s samples are not supported, only the following are: %s' % (samples, supported)
        if self._samples != samples:
            self.set_dirty()
            self._samples = samples

    def set_resolution(self, res):
        if (self._width, self._height) != tuple(res):
            self._width, self._height = res
            self.set_dirty()
            for c in self._cams.values():
                c.set_dirty()

    def set_output_format(self, file_format, color_depth, use_preview):
        self._file_format = file_format
        self._color_depth = color_depth
        self._use_preview = use_preview

    def link_camera(self, cam: RenderCamera):
        self._cams[cam.name] = cam

    def link_object(self, obj: RenderObject):
        self._objs[obj.name] = [None, obj]

    def set_sun_location(self, loc):
        """
        :param loc: sun location in meters in the same frame (e.g. asteroid/comet centric) used for camera and object locations
        """
        self._sun_loc = np.array(loc)
Exemplo n.º 21
0
if __name__ == '__main__':
    """
    This code here tries to probe the accuracy of the system model "real" values
    """

    from visnav.iotools import lblloader

    sm = RosettaSystemModel(rosetta_batch='mtp025')
    #img = 'ROS_CAM1_20140823T021833'
    #img = 'ROS_CAM1_20140822T020718'
    img = 'ROS_CAM1_20160208T070515'

    lblloader.load_image_meta(
        os.path.join(sm.asteroid.image_db_path, img + '.LBL'), sm)

    re = RenderEngine(sm.view_width, sm.view_height, antialias_samples=0)
    obj_idx = re.load_object(sm.asteroid.target_model_file, smooth=False)

    algo = KeypointAlgo(sm, re, obj_idx)
    model = RenderEngine.REFLMOD_LUNAR_LAMBERT
    size = (1024, 1024)  # (256, 256)

    orig_real = cv2.imread(
        os.path.join(sm.asteroid.image_db_path, img + '_P.png'),
        cv2.IMREAD_GRAYSCALE)
    real = cv2.resize(orig_real, size)
    bar = 255 * np.ones((real.shape[0], 1), dtype='uint8')

    sm.reset_to_real_vals()
    orig_pose = cv2.resize(algo.render(shadows=True, reflection=model), size)
Exemplo n.º 22
0
if __name__ == '__main__':
    if len(sys.argv) < 3 or sys.argv[2] not in ('hi', 'med', 'lo'):
        print('USAGE: python %s <mission> <sm-quality: hi|med|lo>' %
              (sys.argv[0], ))
        quit()

    mission = sys.argv[1]
    sm_quality = sys.argv[2]
    noise = {'hi': '', 'med': 'lo', 'lo': 'hi'}[sm_quality]

    sm = get_system_model(mission)
    with open(sm.asteroid.constant_noise_shape_model[noise], 'rb') as fh:
        noisy_model, sm_noise = pickle.load(fh)

    renderer = RenderEngine(sm.view_width, sm.view_height, antialias_samples=0)
    obj_idx = renderer.load_object(objloader.ShapeModel(data=noisy_model),
                                   smooth=sm.asteroid.render_smooth_faces)
    algo = AlgorithmBase(sm, renderer, obj_idx)
    cache_path = os.path.join(CACHE_DIR, mission)

    # i = 0
    for fn in os.listdir(cache_path):
        m = re.match('^(' + mission + r'_(\d+))\.lbl$', fn)
        if m and float(m[2]) < 2000:
            base_path = os.path.join(cache_path, m[1])
            sm.load_state(base_path + '.lbl')

            # save state in a more user friendly way
            sm.export_state(base_path + '_meta.csv')