예제 #1
0
    def render_navcam_image_static(sm, renderer, obj_idxs, rel_pos_v, rel_rot_q, light_v, use_textures=False):
        model = RenderEngine.REFLMOD_HAPKE
        RenderEngine.REFLMOD_PARAMS[model] = sm.asteroid.reflmod_params[model]
        img, depth = renderer.render(obj_idxs, rel_pos_v, rel_rot_q, light_v,
                                     get_depth=True, shadows=True, textures=use_textures, reflection=model)
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

        # do same gamma correction as the available rosetta navcam images have
        img = ImageProc.adjust_gamma(img, 1.8)

        # coef=2 gives reasonably many stars, star brightness was tuned without gamma correction
        img = ImageProc.add_stars(img.astype('float'), mask=depth>=sm.max_distance-0.1, coef=2.5)

        # ratio seems too low but blurring in images match actual Rosetta navcam images
        img = ImageProc.apply_point_spread_fn(img, ratio=0.2)

        # add background noise
        img = ImageProc.add_ccd_noise(img, mean=7, sd=2)
        img = np.clip(img, 0, 255).astype('uint8')

        if False:
            cv2.imshow('test', img)
            cv2.waitKey()
            quit()

        return img
예제 #2
0
    def loadTargetImage(self, src, remove_bg=True):
        tmp = cv2.imread(src, cv2.IMREAD_GRAYSCALE)
        if tmp is None:
            raise Exception('Cant load image from file %s' % (src, ))

        cam = self.systemModel.cam
        if tmp.shape != (cam.height, cam.width):
            # visit fails to generate 1024 high images
            tmp = cv2.resize(tmp,
                             None,
                             fx=cam.width / tmp.shape[1],
                             fy=cam.height / tmp.shape[0],
                             interpolation=cv2.INTER_CUBIC)

        if BATCH_MODE and self.add_image_noise and self._noise_image:
            tmp = ImageProc.add_noise_to_image(tmp, self._noise_image)

        self.image_file = src
        if remove_bg:
            self.full_image, h, th = ImageProc.process_target_image(tmp)
            self.image_bg_threshold = th
            self.parent().centroid.bg_threshold = th
        else:
            self.full_image = tmp
            self.image_bg_threshold = None
            self.parent().centroid.bg_threshold = None

        self.setImageZoomAndResolution(im_scale=self.im_def_scale)
예제 #3
0
    def render(self, obj_idxs, rel_pos_v, rel_rot_q, light_v, get_depth=False, shadows=True, textures=True,
               gamma=1.0, reflection=REFLMOD_LUNAR_LAMBERT):

        obj_idxs = [obj_idxs] if isinstance(obj_idxs, int) else obj_idxs
        rel_pos_v = np.array(rel_pos_v).reshape((-1, 3))
        rel_rot_q = np.array(rel_rot_q).reshape((-1,))
        light_v = np.array(light_v)
        assert len(obj_idxs) == rel_pos_v.shape[0] == rel_rot_q.shape[0], 'obj_idxs, rel_pos_v and rel_rot_q dimensions dont match'

        if shadows:
            self._render_shadowmap(obj_idxs, rel_rot_q, light_v)

        self._fbo.use()
        self._ctx.enable(moderngl.DEPTH_TEST)
        self._ctx.enable(moderngl.CULL_FACE)
        self._ctx.front_face = 'ccw'  # cull back faces
        self._ctx.clear(0, 0, 0, float('inf'))
        if shadows:
            self._shadow_map.use(RenderEngine._LOC_SHADOW_MAP)
            self._prog['shadow_map'].value = RenderEngine._LOC_SHADOW_MAP

        for i, obj_idx in enumerate(obj_idxs):
            self._set_params(obj_idx, rel_pos_v[i], rel_rot_q[i], light_v, shadows, textures, reflection)
            self._objs[obj_idx].render()

        if self._samples > 0:
            self._ctx.copy_framebuffer(self._fbo2, self._fbo)
            fbo = self._fbo2
            dbo = self._dbo2
        else:
            fbo = self._fbo
            dbo = self._dbo

        data = np.frombuffer(fbo.read(components=3, alignment=1), dtype='u1').reshape((self._height, self._width, 3))
        data = np.flipud(data)

        if get_depth:
            a = -(self._frustum_far - self._frustum_near) / (2.0 * self._frustum_far * self._frustum_near)
            b = (self._frustum_far + self._frustum_near) / (2.0 * self._frustum_far * self._frustum_near)
            depth = np.frombuffer(dbo.read(alignment=1), dtype='f4').reshape((self._height, self._width))
            depth = np.divide(1.0, (2.0 * a) * depth - (a - b))  # 1/((2*X-1)*a+b)
            depth = np.flipud(depth)

        if gamma != 1.0:
            data = ImageProc.adjust_gamma(data, gamma)

        return (data, depth) if get_depth else data
예제 #4
0
    def adjust_iteratively(self, sce_img, outfile=None, **kwargs):
        self.debug_filebase = outfile
        self._bg_threshold = kwargs.get('bg_threshold', self._bg_threshold)
        sce_img = self.maybe_load_scene_image(sce_img)

        if DEBUG:
            cv2.imshow('target img', sce_img)

        self.system_model.spacecraft_pos = (0, 0, -self.system_model.min_med_distance)
        for i in range(self.MAX_ITERATIONS):
            ox, oy, oz = self.system_model.spacecraft_pos
            od = math.sqrt(ox**2 + oy**2 + oz**2)
            
            if not DEBUG:
                self.adjust(sce_img)
            else:
                try:
                    self.adjust(sce_img)
                except PositioningException as e:
                    print(str(e))
                    break
                finally:
                    cv2.imshow('rendered img', self._ref_img)
                    cv2.waitKey()
            
            nx, ny, nz = self.system_model.spacecraft_pos
            ch = math.sqrt((nx-ox)**2 + (ny-oy)**2 + (nz-oz)**2)
            if DEBUG:
                print('i%d: d0=%.2f, ch=%.2f, rel_ch=%.2f%%'%(i, od, ch, ch/od*100))
            if ch/od < self.ITERATION_TOL:
                break
        
        if self.CHECK_RESULT_VALIDITY:
            result_quality = ImageProc.norm_xcorr(sce_img, self._ref_img)
            if result_quality < self.MIN_RESULT_XCORR:
                raise PositioningException('Result failed quality test with score: %.3f'%(result_quality,))
        
        if BATCH_MODE and self.debug_filebase:
            img = self.render(shadows=self.RENDER_SHADOWS)
            cv2.imwrite(self.debug_filebase+'r.png', img)

        if DEBUG:
            cv2.waitKey()
            cv2.destroyAllWindows()
예제 #5
0
    def setImageZoomAndResolution(self,
                                  im_xoff=0,
                                  im_yoff=0,
                                  im_width=None,
                                  im_height=None,
                                  im_scale=1):

        self.im_xoff = im_xoff
        self.im_yoff = im_yoff
        self.im_width = im_width or self.systemModel.cam.width
        self.im_height = im_height or self.systemModel.cam.height
        self.im_scale = im_scale

        self.image = ImageProc.crop_and_zoom_image(self.full_image,
                                                   self.im_xoff, self.im_yoff,
                                                   self.im_width,
                                                   self.im_height,
                                                   self.im_scale)
        self._image_h = self.image.shape[0]
        self._image_w = self.image.shape[1]

        if self._show_target_image:
            # form _gl_image that is used for rendering
            # black => 0 alpha, non-black => white => .5 alpha
            im = self.image.copy()
            alpha = np.zeros(im.shape, im.dtype)
            #im[im > 0] = 255
            alpha[im > 0] = 128
            self._gl_image = np.flipud(cv2.merge(
                (im, im, im, alpha))).tobytes()

        self.updateFrustum()

        # WORK-AROUND: for some reason wont use new frustum if window not resized
        s = self.parent().size()
        self.parent().resize(s.width() + 1, s.height())
        self.parent().resize(s.width(), s.height())
        self.update()
        QCoreApplication.processEvents()
예제 #6
0
 def remove_background(self, img):
     res_img, h, th = ImageProc.process_target_image(img)
     return res_img, th
예제 #7
0
    def solve_pnp(self,
                  orig_sce_img,
                  outfile,
                  feat=ORB,
                  use_feature_db=False,
                  adjust_sc_rot=False,
                  add_noise=False,
                  scale_cam_img=False,
                  vary_scale=False,
                  match_mask_params=None,
                  processing=True,
                  win=35,
                  sigma_col=5,
                  sigma_space=10,
                  sigma_coeff=100,
                  **kwargs):

        # set max mem usable by reference features, scene features use rest of MAX_WORK_MEM
        ref_max_mem = KeypointAlgo.FDB_MAX_MEM if use_feature_db else KeypointAlgo.MAX_WORK_MEM / 2
        sm = self.system_model
        self._ransac_err = KeypointAlgo.DEF_RANSAC_ERROR
        self._render_z = -sm.min_med_distance
        init_z = kwargs.get('init_z', self._render_z)
        ref_img_sc = min(
            1, self._render_z / init_z) * (sm.view_width if scale_cam_img else
                                           self._cam.width) / sm.view_width
        self.extra_values = None

        if outfile is not None:
            self.debug_filebase = outfile + (self.DEBUG_IMG_POSTFIX
                                             if isinstance(orig_sce_img, str)
                                             else '')

        if self.est_real_ast_orient:
            # so that can track rotation of 67P
            sm.reset_to_real_vals()

        if use_feature_db and self._fdb_helper is None:
            from algo.fdbgen import FeatureDatabaseGenerator
            self._fdb_helper = FeatureDatabaseGenerator(
                self.system_model, self.render_engine, self.obj_idx)

        # maybe load scene image
        if isinstance(orig_sce_img, str):
            orig_sce_img = self.load_target_image(orig_sce_img)

        if add_noise:
            self._shape_model_rng = np.max(
                np.ptp(sm.asteroid.real_shape_model.vertices, axis=0))

        self.timer = Stopwatch()
        self.timer.start()

        if use_feature_db:
            if KeypointAlgo.FDB_REAL:
                # find correct set of keypoints & descriptors from features db
                ref_desc, ref_kp_3d, ref_kp, ref_img = self._query_fdb(feat)
            else:
                # calculate on-the-fly exactly the same features that would be returned from a feature db
                ref_desc, ref_kp_3d, ref_kp, ref_img = self._fake_fdb(feat)
        else:
            # render model image
            ref_img, depth_result = self.render_ref_img(ref_img_sc)

            if False:
                # normalize ref_img to match sce_img
                ref_img = ImageProc.equalize_brightness(ref_img,
                                                        orig_sce_img,
                                                        percentile=99.999,
                                                        image_gamma=1.8)
            if processing:
                #print("\033[0;32mINFO\033[00m: Bilateral Filter Applied")
                ref_img = ImageProc.bilateral_filtering(
                    ref_img, win, sigma_col, sigma_space, sigma_coeff)
                orig_sce_img = ImageProc.bilateral_filtering(
                    orig_sce_img, win, sigma_col, sigma_space, sigma_coeff)
            if False:
                gamma = 1.0 / 1.8
                ref_img = ImageProc.adjust_gamma(ref_img, gamma)
                orig_sce_img = ImageProc.adjust_gamma(orig_sce_img, gamma)

            # get keypoints and descriptors
            ee = sm.pixel_extent(abs(self._render_z))
            ref_kp, ref_desc, self._latest_detector = KeypointAlgo.detect_features(
                ref_img,
                feat,
                maxmem=ref_max_mem,
                max_feats=KeypointAlgo.MAX_FEATURES,
                for_ref=True,
                expected_pixel_extent=ee)

        if BATCH_MODE and self.debug_filebase:
            # save start situation in log archive
            self.timer.stop()
            img1 = cv2.resize(orig_sce_img, (sm.view_width, sm.view_height))
            img2 = cv2.resize(ref_img, (sm.view_width, sm.view_height))
            cv2.imwrite(self.debug_filebase + 'a.png',
                        np.concatenate((img1, img2), axis=1))
            if DEBUG:
                cv2.imshow('compare', np.concatenate((img1, img2), axis=1))
            self.timer.start()

        # AKAZE, SIFT, SURF are truly scale invariant, couldnt get ORB to work as good
        vary_scale = vary_scale if feat == self.ORB else False

        if len(ref_kp) < KeypointAlgo.MIN_FEATURES:
            raise PositioningException(
                'Too few (%d) reference features found' % (len(ref_kp), ))

        ok = False
        for i in range(self.MAX_SCENE_SCALE_STEPS):
            try:
                # resize scene image if necessary
                sce_img_sc = (sm.view_width
                              if scale_cam_img else self._cam.width
                              ) / self._cam.width / self.SCENE_SCALE_STEP**i
                if np.isclose(sce_img_sc, 1):
                    sce_img = orig_sce_img
                else:
                    sce_img = cv2.resize(orig_sce_img,
                                         None,
                                         fx=sce_img_sc,
                                         fy=sce_img_sc,
                                         interpolation=cv2.INTER_CUBIC)

                # detect features in scene image
                sce_max_mem = KeypointAlgo.MAX_WORK_MEM - (
                    KeypointAlgo.BYTES_PER_FEATURE[feat] + 12) * len(ref_desc)
                ee = sm.pixel_extent(abs(match_mask_params[2])
                                     ) if match_mask_params is not None else 0
                sce_kp, sce_desc, self._latest_detector = KeypointAlgo.detect_features(
                    sce_img,
                    feat,
                    maxmem=sce_max_mem,
                    max_feats=KeypointAlgo.MAX_FEATURES,
                    expected_pixel_extent=ee)
                if len(sce_kp) < KeypointAlgo.MIN_FEATURES:
                    raise PositioningException(
                        'Too few (%d) scene features found' % (len(sce_kp), ))

                # match descriptors
                try:
                    mask = None
                    if match_mask_params is not None:
                        mask = KeypointAlgo.calc_match_mask(
                            sm, sce_kp, ref_kp, self._render_z, sce_img_sc,
                            ref_img_sc, *match_mask_params)
                    matches = KeypointAlgo.match_features(
                        sce_desc,
                        ref_desc,
                        self._latest_detector.defaultNorm(),
                        mask=mask,
                        method='brute')
                    error = None
                except PositioningException as e:
                    matches = []
                    error = e

                # debug by drawing matches
                if not BATCH_MODE or DEBUG:
                    print('matches: %s/%s' %
                          (len(matches), min(len(sce_kp), len(ref_kp))),
                          flush=True,
                          end=", ")
                self._draw_matches(sce_img,
                                   sce_img_sc,
                                   sce_kp,
                                   ref_img,
                                   ref_img_sc,
                                   ref_kp,
                                   matches,
                                   pause=False,
                                   show=DEBUG)

                if error is not None:
                    raise error

                # select matched scene feature image coordinates
                sce_kp_2d = np.array([
                    tuple(np.divide(sce_kp[m.queryIdx].pt, sce_img_sc))
                    for m in matches
                ],
                                     dtype='float')

                # prepare reference feature 3d coordinates (for only matched features)
                if use_feature_db:
                    ref_kp_3d = ref_kp_3d[[m.trainIdx for m in matches], :]
                    if add_noise:
                        # add noise to noiseless 3d ref points from fdb
                        self.timer.stop()
                        ref_kp_3d, self.sm_noise, _ = tools.points_with_noise(
                            ref_kp_3d,
                            only_z=True,
                            noise_lv=SHAPE_MODEL_NOISE_LV[add_noise],
                            max_rng=self._shape_model_rng)
                        self.timer.start()
                else:
                    # get feature 3d points using 3d model
                    ref_kp_3d = KeypointAlgo.inverse_project(
                        sm, [ref_kp[m.trainIdx].pt for m in matches],
                        depth_result, self._render_z, ref_img_sc)

                if KeypointAlgo.DISCARD_OFF_OBJECT_FEATURES:
                    I = np.where(np.logical_not(np.isnan(ref_kp_3d[:, 0])))[0]
                    if len(I) < self.MIN_FEATURES:
                        raise PositioningException('Too few matches found')
                    sce_kp_2d = sce_kp_2d[I, :]
                    ref_kp_3d = ref_kp_3d[I, :]
                    matches = [matches[i] for i in I]

                # finally solve pnp with ransac
                rvec, tvec, inliers = KeypointAlgo.solve_pnp_ransac(
                    sm, sce_kp_2d, ref_kp_3d, self._ransac_err)

                # debug by drawing inlier matches
                self._draw_matches(sce_img,
                                   sce_img_sc,
                                   sce_kp,
                                   ref_img,
                                   ref_img_sc,
                                   ref_kp, [matches[i[0]] for i in inliers],
                                   label='c) inliers',
                                   pause=self._pause)

                inlier_count = self.count_inliers(sce_kp, ref_kp, matches,
                                                  inliers)
                if DEBUG:
                    print('inliers: %s/%s, ' % (inlier_count, len(matches)),
                          end='',
                          flush=True)
                if inlier_count < KeypointAlgo.MIN_FEATURES:
                    raise PositioningException(
                        'RANSAC algorithm was left with too few inliers')

                # dont try again if found enough inliers
                ok = True
                break

            except PositioningException as e:
                if not vary_scale:
                    raise e
                # maybe try again using scaled down scene image

        if not ok:
            raise PositioningException(
                'Not enough inliers even if tried scaling scene image down x%.1f'
                % (1 / sce_img_sc))
        elif vary_scale:
            print('success at x%.1f' % (1 / sce_img_sc))

        self.timer.stop()

        # set model params to solved pose & pos
        self._set_sc_from_ast_rot_and_trans(rvec,
                                            tvec,
                                            self.latest_discretization_err_q,
                                            rotate_sc=adjust_sc_rot)

        # debugging
        if not BATCH_MODE or DEBUG:
            rp_err = KeypointAlgo.reprojection_error(self._cam, sce_kp_2d,
                                                     ref_kp_3d, inliers, rvec,
                                                     tvec)
            sh_err = sm.calc_shift_err()

            print(
                'repr-err: %.2f, rel-rot-err: %.2f°, dist-err: %.2f%%, lat-err: %.2f%%, shift-err: %.1fm'
                % (
                    rp_err,
                    math.degrees(sm.rel_rot_err()),
                    sm.dist_pos_err() * 100,
                    sm.lat_pos_err() * 100,
                    sh_err * 1000,
                ),
                flush=True)

        # save result image
        if BATCH_MODE and self.debug_filebase:
            # save result in log archive
            res_img = self.render(shadows=self.RENDER_SHADOWS)
            sce_img = cv2.resize(orig_sce_img, tuple(np.flipud(res_img.shape)))
            cv2.imwrite(self.debug_filebase + 'd.png',
                        np.concatenate((sce_img, res_img), axis=1))
예제 #8
0
    if False:
        for i in range(36):
            image = re.render(obj_idx, [0, 0, -sm.min_med_distance*3], q**i, np.array([1, 0, 0])/math.sqrt(1), get_depth=False)
            cv2.imshow('image', image)
            cv2.waitKey()

    elif True:
        RenderEngine.REFLMOD_PARAMS[RenderEngine.REFLMOD_HAPKE] = DidymosPrimary.HAPKE_PARAMS
        RenderEngine.REFLMOD_PARAMS[RenderEngine.REFLMOD_LUNAR_LAMBERT] = DidymosPrimary.LUNAR_LAMBERT_PARAMS
        imgs = ()
        i = 1
        th = math.radians(100)
        #for i in range(4, 7):
        for th in np.linspace(math.radians(90), 0, 4):
            imgs_j = ()
            for j, hapke in enumerate((True, False)):
                model = RenderEngine.REFLMOD_HAPKE if hapke else RenderEngine.REFLMOD_LUNAR_LAMBERT
                if hapke and j == 0:
                    RenderEngine.REFLMOD_PARAMS[model][9] = 0
                if hapke and j == 1:
                    RenderEngine.REFLMOD_PARAMS[model][9] = 1
                light = tools.q_times_v(tools.ypr_to_q(th, 0, 0), np.array([0, 0, -1]))
                image = re.render(obj_idx, pos, q**i, tools.normalize_v(light), get_depth=False, reflection=model)
                image = ImageProc.adjust_gamma(image, 1.8)
                imgs_j += (image,)
            imgs += (np.vstack(imgs_j),)

        #cv2.imshow('depth', np.clip((sm.min_med_distance+sm.asteroid.mean_radius - depth)/5, 0, 1))
        cv2.imshow('images', np.hstack(imgs))
        cv2.waitKey()
예제 #9
0
def est_refl_model(hapke=True, iters=1, init_noise=0.0, verbose=True):
    sm = RosettaSystemModel()
    imgsize = (512, 512)
    imgs = {
        'ROS_CAM1_20140831T104353': 3.2,   # 60, 3.2s
        'ROS_CAM1_20140831T140853': 3.2,   # 62, 3.2s
        'ROS_CAM1_20140831T103933': 3.2,   # 65, 3.2s
        'ROS_CAM1_20140831T022253': 3.2,   # 70, 3.2s
        'ROS_CAM1_20140821T100719': 2.8,   # 75, 2.8s
        'ROS_CAM1_20140821T200718': 2.0,   # 80, 2.0s
        'ROS_CAM1_20140822T113854': 2.0,   # 85, 2.0s
        'ROS_CAM1_20140823T021833': 2.0,   # 90, 2.0s
        'ROS_CAM1_20140819T120719': 2.0,   # 95, 2.0s
        'ROS_CAM1_20140824T021833': 2.8,   # 100, 2.8s
        'ROS_CAM1_20140824T020853': 2.8,   # 105, 2.8s
        'ROS_CAM1_20140824T103934': 2.8,   # 110, 2.8s
        'ROS_CAM1_20140818T230718': 2.0,   # 113, 2.0s
        'ROS_CAM1_20140824T220434': 2.8,   # 120, 2.8s
        'ROS_CAM1_20140828T020434': 2.8,   # 137, 2.8s
        'ROS_CAM1_20140827T140434': 3.2,   # 145, 3.2s
        'ROS_CAM1_20140827T141834': 3.2,   # 150, 3.2s
        'ROS_CAM1_20140827T061834': 3.2,   # 155, 3.2s
        'ROS_CAM1_20140827T021834': 3.2,   # 157, 3.2s
        'ROS_CAM1_20140826T221834': 2.8,   # 160, 2.8s
    }

    target_exposure = np.min(list(imgs.values()))
    for img, exposure in imgs.items():
        real = cv2.imread(os.path.join(sm.asteroid.image_db_path, img + '_P.png'), cv2.IMREAD_GRAYSCALE)
        real = ImageProc.adjust_gamma(real, 1/1.8)
        #dark_px_lim = np.percentile(real, 0.1)
        #dark_px = np.mean(real[real<=dark_px_lim])
        real = cv2.resize(real, imgsize)
        # remove dark pixel intensity and normalize based on exposure
        #real = real - dark_px
        #real *= (target_exposure / exposure)
        imgs[img] = real

    re = RenderEngine(*imgsize, antialias_samples=0)
    obj_idx = re.load_object(sm.asteroid.hires_target_model_file, smooth=False)
    ab = AlgorithmBase(sm, re, obj_idx)

    model = RenderEngine.REFLMOD_HAPKE if hapke else RenderEngine.REFLMOD_LUNAR_LAMBERT
    defs = RenderEngine.REFLMOD_PARAMS[model]

    if hapke:
        # L, th, w, b (scattering anisotropy), c (scattering direction from forward to back), B0, hs
        #real_ini_x = [515, 16.42, 0.3057, 0.8746]
        sppf_n = 2
        real_ini_x = defs[:2] + defs[3:3+sppf_n]
        scales = np.array((500, 20, 3e-1, 3e-1))[:2+sppf_n]
    else:
        ll_poly = 5
        #real_ini_x = np.array(defs[:7])
        real_ini_x = np.array((9.95120e-01, -6.64840e-03, 3.96267e-05, -2.16773e-06, 2.08297e-08, -5.48768e-11, 1))  # theta=20
        real_ini_x = np.hstack((real_ini_x[0:ll_poly+1], (real_ini_x[-1],)))
        scales = np.array((3e-03, 2e-05, 1e-06, 1e-08, 5e-11, 1))
        scales = np.hstack((scales[0:ll_poly], (scales[-1],)))

    def set_params(x):
        if hapke:
            # optimize J, th, w, b, (c), B_SH0, hs
            xsc = list(np.array(x) * scales)
            vals = xsc[:2] + [defs[2]] + xsc[2:] + defs[len(xsc)+1:]
        else:
            vals = [1] + list(np.array(x)[:-1] * scales[:-1]) + [0]*(5-ll_poly) + [x[-1]*scales[-1], 0, 0, 0]
        RenderEngine.REFLMOD_PARAMS[model] = vals

    # debug 1: real vs synth, 2: err img, 3: both
    def costfun(x, debug=0, verbose=True):
        set_params(x)
        err = 0
        for file, real in imgs.items():
            lblloader.load_image_meta(os.path.join(sm.asteroid.image_db_path, file + '.LBL'), sm)
            sm.swap_values_with_real_vals()
            synth2 = ab.render(shadows=True, reflection=model, gamma=1)
            err_img = (synth2.astype('float') - real)**2
            lim = np.percentile(err_img, 99)
            err_img[err_img > lim] = 0
            err += np.mean(err_img)
            if debug:
                if debug%2:
                    cv2.imshow('real vs synthetic', np.concatenate((real.astype('uint8'), 255*np.ones((real.shape[0], 1), dtype='uint8'), synth2), axis=1))
                if debug>1:
                    err_img = err_img**0.2
                    cv2.imshow('err', err_img/np.max(err_img))
                cv2.waitKey()
        err /= len(imgs)
        if verbose:
            print('%s => %f' % (', '.join(['%.4e' % i for i in np.array(x)*scales]), err))
        return err

    best_x = None
    best_err = float('inf')
    for i in range(iters):
        if hapke:
            ini_x = tuple(real_ini_x + init_noise*np.random.normal(0, 1, (len(scales),))*scales)
        else:
            ini_x = tuple(real_ini_x[1:-1]/real_ini_x[0] + init_noise*np.random.normal(0, 1, (len(scales)-1,))*scales[:-1]) + (real_ini_x[-1]*real_ini_x[0],)

        if verbose:
            print('\n\n\n==== i:%d ====\n'%i)
        res = minimize(costfun, tuple(ini_x/scales), args=(0, verbose),
                        #method="BFGS", options={'maxiter': 10, 'eps': 1e-3, 'gtol': 1e-3})
                        method="Nelder-Mead", options={'maxiter': 120, 'xtol': 1e-4, 'ftol': 1e-4})
                        #method="COBYLA", options={'rhobeg': 1.0, 'maxiter': 200, 'disp': False, 'catol': 0.0002})
        if not verbose:
            print('%s => %f' % (', '.join(['%.5e' % i for i in np.array(res.x)*scales]), res.fun))

        if res.fun < best_err:
            best_err = res.fun
            best_x = res.x

    if verbose:
        costfun(best_x, 3, verbose=True)

    if hapke:
        x = tuple(best_x * scales)
    else:
        x = (1,) + tuple(best_x * scales)
        if verbose:
            p = np.linspace(0, 160, 100)
            L = get_graph_L(20, p)
            plt.plot(p, L, p, Lfun(x[:-1], p))
            plt.show()

    return x