예제 #1
0
def capture(video):
    cap = cv2.VideoCapture(video)
    frames = []
    while True:
        ret, frame = cap.read()
        if ret is False:
            break

        img = cv2.resize(frame, (W, H))
        frame = Frame(img, K)
        frames.append(frame)

        if len(frames) < 2:
            continue

        f1 = frames[-1]
        f2 = frames[-2]
        idx1, idx2, Rt = match_frames(f1, f2, K)

        f1.pose = np.dot(Rt, f2.pose)
        print(f1.pose)

        points4d = triangulate(f1.pose, f2.pose, f1.kps[idx1], f2.kps[idx2])
        points4d /= points4d[:, 3:]

        display_frame(img, f1.kps[idx1], f2.kps[idx2])

    cap.release()
    cv2.destroyAllWindows()
예제 #2
0
def process_frame(img):
    img = cv2.resize(img, (W, H))
    frame = Frame(img, k)
    frames.append(frame)
    if len(frames) <= 1:
        return

    idx1, idx2, Rt = match_frames(frames[-1], frames[-2])
    frames[-1].pose = np.dot(Rt, frames[-2].pose)

    #homogenous 3-D COORDS
    pts4d = triangulate(frames[-1].pose, frames[-2].pose, frames[-1].pts[idx1],
                        frames[-2].pts[idx2])
    pts4d /= pts4d[:, 3:]

    #reject points behind the camera
    good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] > 0)

    for i, p in enumerate(pts4d):
        if not good_pts4d[i]:
            continue
        pt = Point(p)
        pt.add_observation(frames[-1], idx1[i])
        pt.add_observation(frames[-2], idx2[i])

    for pt1, pt2 in zip(frames[-1].pts[idx1], frames[-2].pts[idx2]):
        u1, v1 = denormalize(k, pt1)
        u2, v2 = denormalize(k, pt2)
        cv2.circle(img, (u1, v1), color=(0, 255, 0), radius=3)
        cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0))
    disp.paint(img)
예제 #3
0
def process_frame(img):
    img = cv2.resize(img, (W, H))
    frame = Frame(mapp, img, K)
    if frame.id == 0:
        return

    f1 = mapp.frames[-1]
    f2 = mapp.frames[-2]
    idx1, idx2, Rt = match_frames(f1, f2)
    f1.pose = np.dot(Rt, f2.pose)

    pts4d = triangulate(f1.pose, f2.pose, f1.pts[idx1], f2.pts[idx2])
    pts4d /= pts4d[:, 3:]

    good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] > 0)
    pts4d = pts4d[good_pts4d]

    for i, p in enumerate(pts4d):
        if not good_pts4d[i]:
            continue
        pt = Point(mapp, p)
        pt.add_observation(f1, idx1[i])
        pt.add_observation(f2, idx2[i])

    for pt1, pt2 in zip(f1.pts[idx1], f2.pts[idx2]):
        u1, v1 = denormalize(K, pt1)
        u2, v2 = denormalize(K, pt2)
        cv2.circle(img, (u1, v1), color=(0, 255, 0), radius=3)
        cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0))

    #display.paint(img)
    mapp.display()
예제 #4
0
def process_frame(img):
    img = cv2.resize(img, (W, H))
    frame = Frame(img, K)
    frames.append(frame)
    if len(frames) <= 1:
        return

    idx1, idx2, Rt = match_frames(frames[-1], frames[-2])
    frames[-1].pose = np.dot(Rt, frames[-2].pose)

    pts4d = triangulate(frames[-1].pose, frames[-2].pose, frames[-1].pts[idx1],
                        frames[-2].pts[idx2])
    pts4d /= pts4d[:, 3:]

    good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] > 0)

    #print(f'{len(matches)} matches')

    for i, p in enumerate(good_pts4d):
        if not good_pts4d[i]:
            continue
        pt = Point(p)
        pt.add_observation(frames[-1], idx1[i])
        pt.add_observation(frames[-2], idx2[i])

    for p1, p2 in zip(frames[-1].pts[idx1], frames[-2].pts[idx2]):
        u1, v1 = denormalize(K, p1)
        u2, v2 = denormalize(K, p2)
        cv2.circle(img, (u1, v1), radius=3, color=(0, 255, 0))
        cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0))
    screen.paint(img)
예제 #5
0
파일: slam.py 프로젝트: jsBrique/pyslam-1
    def track_reference_frame(self, f_ref, f_cur, name=''):
        print('>>>> tracking reference %d ...' % (f_ref.id))
        if f_ref is None:
            return
        # find keypoint matches between f_cur and kf_ref
        print('matching keypoints with ', Frame.feature_matcher.type.name)
        self.timer_match.start()
        idxs_cur, idxs_ref = match_frames(f_cur, f_ref)
        self.timer_match.refresh()
        self.num_matched_kps = idxs_cur.shape[0]
        print("# keypoints matched: %d " % self.num_matched_kps)
        if kUseEssentialMatrixFitting:
            # estimate camera orientation and inlier matches by fitting and essential matrix (see the limitations above)
            idxs_ref, idxs_cur = self.estimate_pose_by_fitting_ess_mat(
                f_ref, f_cur, idxs_ref, idxs_cur)

        if kUseDynamicDesDistanceTh:
            self.descriptor_distance_sigma = self.dyn_config.update_descriptor_stat(
                f_ref, f_cur, idxs_ref, idxs_cur)

        # propagate map point matches from kf_ref to f_cur  (do not override idxs_ref, idxs_cur)
        num_found_map_pts_inter_frame, idx_ref_prop, idx_cur_prop = propagate_map_point_matches(
            f_ref,
            f_cur,
            idxs_ref,
            idxs_cur,
            max_descriptor_distance=self.descriptor_distance_sigma)
        print("# matched map points in prev frame: %d " %
              num_found_map_pts_inter_frame)

        if kDebugDrawMatches and True:
            img_matches = draw_feature_matches(f_ref.img,
                                               f_cur.img,
                                               f_ref.kps[idx_ref_prop],
                                               f_cur.kps[idx_cur_prop],
                                               f_ref.sizes[idx_ref_prop],
                                               f_cur.sizes[idx_cur_prop],
                                               horizontal=False)
            cv2.imshow('tracking frame (no projection) - matches', img_matches)
            cv2.waitKey(1)

        # store tracking info (for possible reuse)
        self.idxs_ref = idxs_ref
        self.idxs_cur = idxs_cur

        # f_cur pose optimization using last matches with kf_ref:
        # here, we use first guess of f_cur pose and propated map point matches from f_ref (matched keypoints)
        self.pose_optimization(f_cur, name)
        # update matched map points; discard outliers detected in last pose optimization
        num_matched_points = f_cur.clean_outlier_map_points()
        print('      # num_matched_map_points: %d' %
              (self.num_matched_map_points))
        #print('     # matched points: %d' % (num_matched_points) )
        if not self.pose_is_ok or self.num_matched_map_points < kNumMinInliersPoseOptimizationTrackFrame:
            f_cur.remove_frame_views(idxs_cur)
            f_cur.reset_points()
            Printer.red(
                'failure in tracking reference %d, # matched map points: %d' %
                (f_ref.id, self.num_matched_map_points))
            self.pose_is_ok = False
예제 #6
0
파일: slam.py 프로젝트: AmulyaH/Visual-SLAM
def process_frame(img):
  img = cv2.resize(img, (W,H))
  frame = Frame(mapp, img, K)
  if frame.id == 0:
    return
  print("\n*** frame %d ***" % (frame.id,))

  f1 = mapp.frames[-1]
  f2 = mapp.frames[-2]

  idx1, idx2, Rt = match_frames(f1, f2)
  f1.pose = np.dot(Rt, f2.pose)

  for i,idx in enumerate(idx2):
    if f2.pts[idx] is not None:
      f2.pts[idx].add_observation(f1, idx1[i])

  good_pts4d = np.array([f1.pts[i] is None for i in idx1])

  # locally in front of camera
  # reject pts without enough "parallax" (this right?)
  pts_tri_local = triangulate(Rt, np.eye(4), f1.kps[idx1], f2.kps[idx2])
  good_pts4d &= np.abs(pts_tri_local[:, 3]) > 0.005

  # homogeneous 3-D coords
  # reject points behind the camera
  pts_tri_local /= pts_tri_local[:, 3:]
  good_pts4d &= pts_tri_local[:, 2] > 0

  # project into world
  pts4d = np.dot(np.linalg.inv(f1.pose), pts_tri_local.T).T

  print("Adding:   %d points" % np.sum(good_pts4d))

  for i,p in enumerate(pts4d):
    if not good_pts4d[i]:
      continue
    u,v = int(round(f1.kpus[idx1[i],0])), int(round(f1.kpus[idx1[i],1]))
    pt = Point(mapp, p, img[v,u])
    pt.add_observation(f1, idx1[i])
    pt.add_observation(f2, idx2[i])

  for pt1, pt2 in zip(f1.kps[idx1], f2.kps[idx2]):
    u1, v1 = denormalize(K, pt1)
    u2, v2 = denormalize(K, pt2)
    cv2.circle(img, (u1, v1), color=(0,255,0), radius=3)
    cv2.line(img, (u1, v1), (u2, v2), color=(255,0,0))

  # 2-D display
  if disp is not None:
    disp.paint(img)

  # optimize the map
  if frame.id >= 4:
    err = mapp.optimize()
    print("Optimize: %f units of error" % err)

  # 3-D display
  mapp.display()
예제 #7
0
파일: slam.py 프로젝트: herozxb/xSlam
def process_frame(img):
    img = cv2.resize(img, (W, H))
    frame = Frame(mapp, img, K)
    if frame.id == 0:
        return

    print("\n*** frame %d ***" % (frame.id, ))

    f1 = mapp.frames[-1]
    f2 = mapp.frames[-2]

    idx1, idx2, Rt = match_frames(f1, f2)
    #print("=============================", idx1, idx2, Rt,f1.pts,f2.pts)
    f1.pose = np.dot(Rt, f2.pose)

    for i, idx in enumerate(idx2):
        if f2.pts[idx] is not None:
            f2.pts[idx].add_observation(f1, idx1[i])

    # homogeneous 3-D coords
    pts4d = triangulate(f1.pose, f2.pose, f1.kps[idx1], f2.kps[idx2])
    pts4d /= pts4d[:, 3:]
    #print(pts4d)

    # reject pts without enough "parallax" (this right?)
    # reject points behind the camera
    unmatched_points = np.array([f1.pts[i] is None for i in idx1])
    print("Adding:  %d points" % np.sum(unmatched_points))
    good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] >
                                                  0) & unmatched_points
    #print(sum(good_pts4d), len(good_pts4d))
    #print(good_pts4d)

    for i, p in enumerate(pts4d):
        if not good_pts4d[i]:
            continue
        pt = Point(mapp, p)
        pt.add_observation(f1, idx1[i])
        pt.add_observation(f2, idx2[i])

    for pt1, pt2 in zip(f1.kps[idx1], f2.kps[idx2]):
        u1, v1 = denormalize(K, pt1)
        u2, v2 = denormalize(K, pt2)
        cv2.circle(img, (u1, v1), color=(0, 255, 0), radius=3)
        cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0))

    # 2-D display
    if disp is not None:
        disp.paint(img)

    # optimize the map
    if frame.id >= 4:
        mapp.optimize()

    # 3-D display
    mapp.display()
예제 #8
0
def process_frame(img):
    img = cv2.resize(img, (W, H))
    frame = Frame(img, mapp, K)

    if frame.id == 0:
        return

    f1 = mapp.frames[-1]
    f2 = mapp.frames[-2]

    idx1, idx2, Rt = match_frames(f1, f2)
    f1.pose = np.dot(Rt, f2.pose)

    pts4d = triangulate(f1.pose, f2.pose, f1.pts[idx1], f2.pts[idx2])

    #homogeneous 3-Dcoords
    pts4d /= pts4d[:, 3:]

    #rejecting pts without good parallax
    #reject pts behind cam
    good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] > 0)

    #print(sum(good_pts4d), len(good_pts4d))

    for i, p in enumerate(pts4d):
        if not good_pts4d[i]:
            continue
        pt = Point(mapp, p)
        pt.add_observation(f1, idx1[i])
        pt.add_observation(f2, idx2[i])

    #print(f1.pose)
    #print(pts4d)

    for pt1, pt2 in zip(f1.pts[idx1], f2.pts[idx2]):
        #u1, v1 = map(lambda x: int(round(x)), pt1)
        #u2, v2 = map(lambda x: int(round(x)), pt2)

        u1, v1 = denormalize(K, pt1)
        u2, v2 = denormalize(K, pt2)

        cv2.circle(img, (u1, v1), color=(0, 255, 0), radius=3)
        cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0))

    #print(img.shape)

    # 2-D display
    if disp is not None: disp.paint(img)

    #3-D display
    mapp.display()
예제 #9
0
def process_frame(img):
    img = cv2.resize(img, (W, H))
    frame = Frame(mapp, img, K)

    if frame.id == 0:
        return

    f1 = mapp.frames[-1]
    f2 = mapp.frames[-2]

    idx1, idx2, Rt = match_frames(f1, f2)

    # 삼각측량
    # triangulatePoints(첫번째 카메라의 3X4 투영 행렬, 두번째 카메라의 3X4 투영 행렬, 첫번째 이미지의 특징점, 두번째 이미지의 특징점)
    # 반환되는 내용은 homogeneous 좌표에서 재구성된 4XN 배열
    # DLT 찾아보길
    f1.pose = np.dot(Rt, f2.pose)

    pts4d = triangulate(f1.pose, f2.pose, f1.pts[idx1], f2.pts[idx2])

    # homogenous 좌표계의 w를 나누는 것?
    # homogenous 3-D coords
    pts4d /= pts4d[:, 3:]

    # reject pts without enough "parallax"
    # reject pts behind camera

    good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] > 0)
    pts4d = pts4d[good_pts4d]

    for i, p in enumerate(pts4d):
        if not good_pts4d[i]:
            continue
        pt = Point(mapp, p)
        pt.add_observation(f1, idx1[i])
        pt.add_observation(f2, idx2[i])

    # denormalize for display
    # 정규화한 포인트들을 다시 화면에 맞춤
    for pt1, pt2 in zip(f1.pts[idx1], f2.pts[idx2]):
        u1, v1 = denormalize(K, pt1)
        u2, v2 = denormalize(K, pt2)

        cv2.circle(img, (u1, v1), color=(0, 255, 0), radius=3)
        cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0))

    mapp.display()

    cv2.imshow("image", img)
    cv2.waitKey(1)
예제 #10
0
def process_frame(img):
    img = cv2.resize(img, (W, H))
    frame = Frame(mapp, img, K)
    if frame.id == 0:
        return

    f1 = mapp.frames[-1]
    f2 = mapp.frames[-2]
    idx1, idx2, Rt = match_frames(f1, f2)
    f1.pose = np.dot(Rt, f2.pose)

    for i, idx in enumerate(idx2):
        if f2.pts[idx] is not None:
            f2.pts[idx].add_observation(f1, idx1[i])
    # homogeneous 3-D coords
    pts4d = triangulate(f1.pose, f2.pose, f1.kps[idx1], f2.kps[idx2])
    pts4d /= pts4d[:, 3:]

    # reject pts without enough "parallax" (this right?)
    # reject points behind the camera
    unmatched_points = np.array([f1.pts[i] is None for i in idx1])
    print("Adding: {} points".format(np.sum(unmatched_points)))
    good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] >
                                                  0) & unmatched_points
    for i, p in enumerate(pts4d):
        if not good_pts4d[i]:
            continue
        # print(idx1[i])
        # print(f1.pts[idx1[i]])
        # print(f1.pts)
        u, v = int(round(f1.kpus[idx1[i], 0])), int(round(f1.kpus[idx1[i], 1]))
        pt = Point(mapp, p, img[v, u])
        pt.add_observation(f1, idx1[i])
        pt.add_observation(f2, idx2[i])

    for pt1, pt2 in zip(f1.kps[idx1], f2.kps[idx2]):
        u1, v1 = denormalize(K, pt1)
        u2, v2 = denormalize(K, pt2)
        cv2.circle(img, (u1, v1), color=(0, 255, 0), radius=3)
        cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0))
    #   cv2.imshow("result", img)
    #   cv2.waitKey(1)
    if frame.id >= 4:
        mapp.optimize()
    # 3-D
    mapp.display()
예제 #11
0
파일: main.py 프로젝트: dragonSwords98/SLAM
def process_frame(img):
    img = cv2.resize(img, (W, H))
    frame = Frame(mapp, img, K)
    if frame.id == 0:
        return

    f1 = mapp.frames[-1]
    f2 = mapp.frames[-2]

    idx1, idx2, Rt = match_frames(f1, f2)
    f1.pose = np.dot(Rt, f2.pose)

    # homogeneous 3-D coords
    pts4d = triangulate(f1.pose, f2.pose, f1.pts[idx1], f2.pts[idx2])
    pts4d /= pts4d[:, 3:]

    # reject pts without enough "parallax" (this right?)
    # reject points behind the camera
    good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] > 0)

    for i, p in enumerate(pts4d):
        if not good_pts4d[i]:
            continue
        pt = Point(mapp, p)
        pt.add_observation(f1, idx1[i])
        pt.add_observation(f2, idx2[i])

    for pt1, pt2 in zip(f1.pts[idx1], f2.pts[idx2]):
        u1, v1 = denormalize(K, pt1)
        u2, v2 = denormalize(K, pt2)
        cv2.circle(img, (u1, v1), color=(0, 255, 0), radius=3)
        cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0))

    # 2-D display
    if disp is not None:
        disp.paint(img)

    # 3-D display
    mapp.display()
예제 #12
0
def process_frame(img):
  start_time = time.time()
  img = cv2.resize(img, (W,H))
  frame = Frame(mapp, img, K)
  if frame.id == 0:
    return

  f1 = mapp.frames[-1]
  f2 = mapp.frames[-2]

  idx1, idx2, Rt = match_frames(f1, f2)

  if frame.id < 5:
    # get initial positions from fundamental matrix
    f1.pose = np.dot(Rt, f2.pose)
  else:
    # kinematic model
    velocity = np.dot(f2.pose, np.linalg.inv(mapp.frames[-3].pose))
    f1.pose = np.dot(velocity, f2.pose)

  for i,idx in enumerate(idx2):
    if f2.pts[idx] is not None:
      f2.pts[idx].add_observation(f1, idx1[i])

  # pose optimization
  #print(f1.pose)
  pose_opt = mapp.optimize(local_window=1, fix_points=True)
  print("Pose:     %f" % pose_opt)
  #print(f1.pose)

  # search by projection
  sbp_pts_count = 0
  if len(mapp.points) > 0:
    map_points = np.array([p.homogeneous() for p in mapp.points])
    projs = np.dot(np.dot(K, f1.pose[:3]), map_points.T).T
    projs = projs[:, 0:2] / projs[:, 2:]
    good_pts = (projs[:, 0] > 0) & (projs[:, 0] < W) & \
               (projs[:, 1] > 0) & (projs[:, 1] < H)
    for i, p in enumerate(mapp.points):
      if not good_pts[i]:
        continue
      q = f1.kd.query_ball_point(projs[i], 5)
      for m_idx in q:
        if f1.pts[m_idx] is None:
          # if any descriptors within 32
          for o in p.orb():
            o_dist = hamming_distance(o, f1.des[m_idx])
            if o_dist < 32.0:
              p.add_observation(f1, m_idx)
              sbp_pts_count += 1
              break

  good_pts4d = np.array([f1.pts[i] is None for i in idx1])

  # reject pts without enough "parallax" (this right?)
  pts4d = triangulate(f1.pose, f2.pose, f1.kps[idx1], f2.kps[idx2])
  good_pts4d &= np.abs(pts4d[:, 3]) > 0.005

  # homogeneous 3-D coords
  pts4d /= pts4d[:, 3:]

  # locally in front of camera
  # NOTE: This check is broken and maybe unneeded
  #pts_tri_local = np.dot(f1.pose, pts4d.T).T
  #good_pts4d &= pts_tri_local[:, 2] > 0

  print("Adding:   %d new points, %d search by projection" % (np.sum(good_pts4d), sbp_pts_count))

  for i,p in enumerate(pts4d):
    if not good_pts4d[i]:
      continue
    u,v = int(round(f1.kpus[idx1[i],0])), int(round(f1.kpus[idx1[i],1]))
    pt = Point(mapp, p[0:3], img[v,u])
    pt.add_observation(f1, idx1[i])
    pt.add_observation(f2, idx2[i])

  for i1, i2 in zip(idx1, idx2):
    pt1 = f1.kps[i1]
    pt2 = f2.kps[i2]
    u1, v1 = denormalize(K, pt1)
    u2, v2 = denormalize(K, pt2)
    if f1.pts[i1] is not None:
      if len(f1.pts[i1].frames) >= 5:
        cv2.circle(img, (u1, v1), color=(0,255,0), radius=3)
      else:
        cv2.circle(img, (u1, v1), color=(0,128,0), radius=3)
    else:
      cv2.circle(img, (u1, v1), color=(0,0,0), radius=3)
    cv2.line(img, (u1, v1), (u2, v2), color=(255,0,0))

  # 2-D display
  if disp is not None:
    disp.paint(img)

  # optimize the map
  if frame.id >= 4 and frame.id%5 == 0:
    err = mapp.optimize()
    print("Optimize: %f units of error" % err)

  # 3-D display
  mapp.display()
  print("Map:      %d points, %d frames" % (len(mapp.points), len(mapp.frames)))
  print("Time:     %.2f ms" % ((time.time()-start_time)*1000.0))
예제 #13
0
    def track(self, img, frame_id, pose=None, verts=None):
        # check image size is coherent with camera params
        print("img.shape ", img.shape)
        print("cam ", self.H, " x ", self.W)
        assert img.shape[0:2] == (self.H, self.W)

        self.timer_main_track.start()

        # build current frame
        self.timer_frame.start()
        f_cur = Frame(self.map, img, self.K, self.Kinv, self.D, des=verts)
        self.timer_frame.refresh()

        if self.stage == SLAMStage.NO_IMAGES_YET:
            # push first frame in the inizializer
            self.intializer.init(f_cur)
            self.stage = SLAMStage.NOT_INITIALIZED
            return  # EXIT (jump to second frame)

        if self.stage == SLAMStage.NOT_INITIALIZED:
            # try to inizialize
            initializer_output, is_ok = self.intializer.initialize(f_cur, img)
            if is_ok:
                f_ref = self.intializer.f_ref
                # add the two initialized frames in the map
                self.map.add_frame(
                    f_ref)  # add first frame in map and update its id
                self.map.add_frame(
                    f_cur)  # add second frame in map and update its id
                # add points in map
                new_pts_count, _ = self.map.add_points(
                    initializer_output.points4d, None, f_cur, f_ref,
                    initializer_output.idx_cur, initializer_output.idx_ref,
                    img)
                Printer.green("map: initialized %d new points" %
                              (new_pts_count))
                self.stage = SLAMStage.OK
            return  # EXIT (jump to next frame)

        f_ref = self.map.frames[-1]  # get previous frame in map
        self.map.add_frame(f_cur)  # add f_cur to map

        # udpdate (velocity) motion model (kinematic without damping)
        self.velocity = np.dot(f_ref.pose,
                               np.linalg.inv(self.map.frames[-2].pose))
        predicted_pose = np.dot(self.velocity, f_ref.pose)

        if kUseMotionModel is True:
            print('using motion model')
            # set intial guess for current pose optimization
            f_cur.pose = predicted_pose.copy()
            #f_cur.pose = f_ref.pose.copy()  # get the last pose as an initial guess for optimization

            if kUseSearchFrameByProjection:
                # search frame by projection: match map points observed in f_ref with keypoints of f_cur
                print('search frame by projection...')
                idx_ref, idx_cur, num_found_map_pts = search_frame_by_projection(
                    f_ref, f_cur)
                print("# found map points in prev frame: %d " %
                      num_found_map_pts)
            else:
                self.timer_match.start()
                # find keypoint matches between f_cur and f_ref
                idx_cur, idx_ref = match_frames(f_cur, f_ref)
                self.num_matched_kps = idx_cur.shape[0]
                print('# keypoint matches: ', self.num_matched_kps)
                self.timer_match.refresh()

        else:
            print('estimating pose by fitting essential mat')

            self.timer_match.start()
            # find keypoint matches between f_cur and f_ref
            idx_cur, idx_ref = match_frames(f_cur, f_ref)
            self.num_matched_kps = idx_cur.shape[0]
            print('# keypoint matches: ', self.num_matched_kps)
            self.timer_match.refresh()

            # N.B.: please, in order to understand the limitations of fitting an essential mat, read the comments of the method self.estimate_pose_ess_mat()
            self.timer_pose_est.start()
            # estimate inter frame camera motion by using found keypoint matches
            Mrc = self.estimate_pose_ess_mat(f_ref.kpsn[idx_ref],
                                             f_cur.kpsn[idx_cur])
            Mcr = np.linalg.inv(poseRt(Mrc[:3, :3], Mrc[:3, 3]))
            f_cur.pose = np.dot(Mcr, f_ref.pose)
            self.timer_pose_est.refresh()

            # remove outliers from keypoint matches by using the mask computed with inter frame pose estimation
            mask_index = (self.mask_match.ravel() == 1)
            self.num_inliers = sum(mask_index)
            print('# inliers: ', self.num_inliers)
            idx_ref = idx_ref[mask_index]
            idx_cur = idx_cur[mask_index]

            # if too many outliers reset estimated pose
            if self.num_inliers < kNumMinInliersEssentialMat:
                f_cur.pose = f_ref.pose.copy(
                )  # reset estimated pose to previous frame
                Printer.red('Essential mat: not enough inliers!')

            # set intial guess for current pose optimization:
            # keep the estimated rotation and override translation with ref frame translation (we do not have a proper scale for the translation)
            f_cur.pose[:, 3] = f_ref.pose[:, 3].copy()
            #f_cur.pose[:,3] = predicted_pose[:,3].copy()  # or use motion model for translation

        # populate f_cur with map points by propagating map point matches of f_ref:
        # we use map points observed in f_ref and keypoint matches between f_ref and f_cur
        num_found_map_pts_inter_frame = 0
        if not kUseSearchFrameByProjection:
            for i, idx in enumerate(idx_ref):  # iterate over keypoint matches
                if f_ref.points[
                        idx] is not None:  # if we have a map point P for i-th matched keypoint in f_ref
                    f_ref.points[idx].add_observation(
                        f_cur, idx_cur[i]
                    )  # then P is automatically matched to the i-th matched keypoint in f_cur
                    num_found_map_pts_inter_frame += 1
            print("# matched map points in prev frame: %d " %
                  num_found_map_pts_inter_frame)

        # f_cur pose optimization 1  (here we use first available information coming from first guess of f_cur pose and map points of f_ref matched keypoints )
        self.timer_pose_opt.start()
        pose_opt_error, pose_is_ok, self.num_vo_map_points = optimizer_g2o.poseOptimization(
            f_cur, verbose=False)
        print("pose opt err1: %f,  ok: %d" % (pose_opt_error, int(pose_is_ok)))
        # discard outliers detected in pose optimization (in current frame)
        #f_cur.reset_outlier_map_points()

        if pose_is_ok is True:
            # discard outliers detected in f_cur pose optimization (in current frame)
            f_cur.reset_outlier_map_points()
        else:
            # if current pose optimization failed, reset f_cur pose to f_ref pose
            f_cur.pose = f_ref.pose.copy()

        self.timer_pose_opt.pause()

        # TODO: add recover in case of f_cur pose optimization failure

        # now, having a better estimate of f_cur pose, we can find more map point matches:
        # find matches between {local map points} (points in the built local map) and {unmatched keypoints of f_cur}
        if pose_is_ok is True and not self.map.local_map.is_empty():
            self.timer_seach_map.start()
            #num_found_map_pts = search_local_frames_by_projection(self.map, f_cur)
            num_found_map_pts = search_map_by_projection(
                self.map.local_map.points, f_cur)  # use the built local map
            print("# matched map points in local map: %d " % num_found_map_pts)
            self.timer_seach_map.refresh()

            # f_cur pose optimization 2 with all the matched map points
            self.timer_pose_opt.resume()
            pose_opt_error, pose_is_ok, self.num_vo_map_points = optimizer_g2o.poseOptimization(
                f_cur, verbose=False)
            print("pose opt err2: %f,  ok: %d" %
                  (pose_opt_error, int(pose_is_ok)))
            print("# valid matched map points: %d " % self.num_vo_map_points)
            # discard outliers detected in pose optimization (in current frame)
            if pose_is_ok is True:
                f_cur.reset_outlier_map_points()
            self.timer_pose_opt.refresh()

        if kUseSearchFrameByProjection:
            print("search for triangulation with epipolar lines...")
            idx_ref, idx_cur, self.num_matched_kps, _ = search_frame_for_triangulation(
                f_ref, f_cur, img)
            print("# matched keypoints in prev frame: %d " %
                  self.num_matched_kps)

        # if pose is ok, then we try to triangulate the matched keypoints (between f_cur and f_ref) that do not have a corresponding map point
        if pose_is_ok is True and len(idx_ref) > 0:
            self.timer_triangulation.start()

            # TODO: this triangulation should be done from keyframes!
            good_pts4d = np.array([
                f_cur.points[i] is None for i in idx_cur
            ])  # matched keypoints of f_cur without a corresponding map point
            # do triangulation in global frame
            pts4d = triangulate_points(f_cur.pose, f_ref.pose,
                                       f_cur.kpsn[idx_cur],
                                       f_ref.kpsn[idx_ref], good_pts4d)
            good_pts4d &= np.abs(pts4d[:, 3]) != 0
            #pts4d /= pts4d[:, 3:]
            pts4d[good_pts4d] /= pts4d[good_pts4d,
                                       3:]  # get homogeneous 3-D coords

            new_pts_count, _ = self.map.add_points(pts4d,
                                                   good_pts4d,
                                                   f_cur,
                                                   f_ref,
                                                   idx_cur,
                                                   idx_ref,
                                                   img,
                                                   check_parallax=True)
            print("# added map points: %d " % (new_pts_count))
            self.timer_triangulation.refresh()

        # local optimization
        self.time_local_opt.start()
        err = self.map.locally_optimize(local_window=kLocalWindow)
        print("local optimization error:   %f" % err)
        self.time_local_opt.refresh()

        # large window optimization of the map
        # TODO: move this in a seperate thread with local mapping
        if kUseLargeWindowBA is True and f_cur.id >= parameters.kEveryNumFramesLargeWindowBA and f_cur.id % parameters.kEveryNumFramesLargeWindowBA == 0:
            err = self.map.optimize(
                local_window=parameters.kLargeWindow)  # verbose=True)
            Printer.blue("large window optimization error:   %f" % err)

        print("map: %d points, %d frames" %
              (len(self.map.points), len(self.map.frames)))
        #self.updateHistory()

        self.timer_main_track.refresh()
예제 #14
0
파일: slam.py 프로젝트: mmajewsk/twitchslam
 def track(self):
     return match_frames(self.frame_1, self.frame_2)
예제 #15
0
def process_frame(img, pose=None):
  start_time = time.time()
  img = cv2.resize(img, (W,H))
  frame = Frame(mapp, img, K)
  if frame.id == 0:
    return

  f1 = mapp.frames[-1]
  f2 = mapp.frames[-2]

  idx1, idx2, Rt = match_frames(f1, f2)

  # add new observations if the point is already observed in the previous frame
  # TODO: consider tradeoff doing this before/after search by projection
  for i,idx in enumerate(idx2):
    if f2.pts[idx] is not None and f1.pts[idx1[i]] is None:
      f2.pts[idx].add_observation(f1, idx1[i])

  if frame.id < 5:
    # get initial positions from fundamental matrix
    f1.pose = np.dot(Rt, f2.pose)
  else:
    # kinematic model
    velocity = np.dot(f2.pose, np.linalg.inv(mapp.frames[-3].pose))
    f1.pose = np.dot(velocity, f2.pose)

  # pose optimization
  if pose is None:
    #print(f1.pose)
    pose_opt = mapp.optimize(local_window=1, fix_points=True)
    print("Pose:     %f" % pose_opt)
    #print(f1.pose)
  else:
    # have ground truth for pose
    f1.pose = pose

  # search by projection
  sbp_pts_count = 0
  if len(mapp.points) > 0:
    map_points = np.array([p.homogeneous() for p in mapp.points])
    projs = np.dot(np.dot(K, f1.pose[:3]), map_points.T).T
    projs = projs[:, 0:2] / projs[:, 2:]
    good_pts = (projs[:, 0] > 0) & (projs[:, 0] < W) & \
               (projs[:, 1] > 0) & (projs[:, 1] < H)
    for i, p in enumerate(mapp.points):
      if not good_pts[i]:
        continue
      q = f1.kd.query_ball_point(projs[i], 5)
      for m_idx in q:
        if f1.pts[m_idx] is None:
          # if any descriptors within 32
          for o in p.orb():
            o_dist = hamming_distance(o, f1.des[m_idx])
            if o_dist < 32.0:
              p.add_observation(f1, m_idx)
              sbp_pts_count += 1
              break

  # triangulate the points we don't have matches for
  good_pts4d = np.array([f1.pts[i] is None for i in idx1])

  # do triangulation in local frame
  lpose = np.dot(f1.pose, np.linalg.inv(f2.pose))
  pts_local = triangulate(lpose, np.eye(4), f1.kps[idx1], f2.kps[idx2])
  good_pts4d &= np.abs(pts_local[:, 3]) > 0.01
  pts_local /= pts_local[:, 3:]       # homogeneous 3-D coords
  good_pts4d &= pts_local[:, 2] > 0   # locally in front of camera
  pts4d = np.dot(np.linalg.inv(f2.pose), pts_local.T).T

  print("Adding:   %d new points, %d search by projection" % (np.sum(good_pts4d), sbp_pts_count))

  # adding new points to the map from pairwise matches
  for i,p in enumerate(pts4d):
    if not good_pts4d[i]:
      continue
    u,v = int(round(f1.kpus[idx1[i],0])), int(round(f1.kpus[idx1[i],1]))
    pt = Point(mapp, p[0:3], img[v,u])
    pt.add_observation(f1, idx1[i])
    pt.add_observation(f2, idx2[i])

  # 2-D display
  if disp2d is not None:
    # paint annotations on the image
    for i1, i2 in zip(idx1, idx2):
      u1, v1 = int(round(f1.kpus[i1][0])), int(round(f1.kpus[i1][1]))
      u2, v2 = int(round(f2.kpus[i2][0])), int(round(f2.kpus[i2][1]))
      if f1.pts[i1] is not None:
        if len(f1.pts[i1].frames) >= 5:
          cv2.circle(img, (u1, v1), color=(0,255,0), radius=3)
        else:
          cv2.circle(img, (u1, v1), color=(0,128,0), radius=3)
      else:
        cv2.circle(img, (u1, v1), color=(0,0,0), radius=3)
      cv2.line(img, (u1, v1), (u2, v2), color=(255,0,0))
    disp2d.paint(img)

  # optimize the map
  if frame.id >= 4 and frame.id%5 == 0:
    err = mapp.optimize()
    print("Optimize: %f units of error" % err)

  # 3-D display
  if disp3d is not None:
    disp3d.paint(mapp)

  print("Map:      %d points, %d frames" % (len(mapp.points), len(mapp.frames)))
  print("Time:     %.2f ms" % ((time.time()-start_time)*1000.0))
  print(np.linalg.inv(f1.pose))
예제 #16
0
    def initialize(self, f_cur, img_cur):

        if self.num_failures > kNumOfFailuresAfterWichNumMinTriangulatedPointsIsHalved:
            self.num_min_triangulated_points = 0.5 * Parameters.kInitializerNumMinTriangulatedPoints
            self.num_failures = 0
            Printer.orange(
                'Initializer: halved min num triangulated features to ',
                self.num_min_triangulated_points)

        # prepare the output
        out = InitializerOutput()
        is_ok = False

        #print('num frames: ', len(self.frames))

        # if too many frames have passed, move the current idx_f_ref forward
        # this is just one very simple policy that can be used
        if self.f_ref is not None:
            if f_cur.id - self.f_ref.id > kMaxIdDistBetweenIntializingFrames:
                self.f_ref = self.frames[-1]  # take last frame in the buffer
                #self.idx_f_ref = len(self.frames)-1  # take last frame in the buffer
                #self.idx_f_ref = self.frames.index(self.f_ref)  # since we are using a deque, the code of the previous commented line is not valid anymore
                #print('*** idx_f_ref:',self.idx_f_ref)
        #self.f_ref = self.frames[self.idx_f_ref]
        f_ref = self.f_ref
        #print('ref fid: ',self.f_ref.id,', curr fid: ', f_cur.id, ', idxs_ref: ', self.idxs_ref)

        # append current frame
        self.frames.append(f_cur)

        # if the current frames do no have enough features exit
        if len(f_ref.kps) < self.num_min_features or len(
                f_cur.kps) < self.num_min_features:
            Printer.red('Inializer: ko - not enough features!')
            self.num_failures += 1
            return out, is_ok

        # find keypoint matches
        idxs_cur, idxs_ref = match_frames(f_cur, f_ref,
                                          kFeatureMatchRatioTestInitializer)

        #print('|------------')
        #print('deque ids: ', [f.id for f in self.frames])
        #print('initializing frames ', f_cur.id, ', ', f_ref.id)
        #print("# keypoint matches: ", len(idxs_cur))

        Trc = self.estimatePose(f_ref.kpsn[idxs_ref], f_cur.kpsn[idxs_cur])
        Tcr = inv_T(Trc)  # Tcr w.r.t. ref frame
        f_ref.update_pose(np.eye(4))
        f_cur.update_pose(Tcr)

        # remove outliers from keypoint matches by using the mask computed with inter frame pose estimation
        mask_idxs = (self.mask_match.ravel() == 1)
        self.num_inliers = sum(mask_idxs)
        #print('# keypoint inliers: ', self.num_inliers )
        idx_cur_inliers = idxs_cur[mask_idxs]
        idx_ref_inliers = idxs_ref[mask_idxs]

        # create a temp map for initializing
        map = Map()
        f_ref.reset_points()
        f_cur.reset_points()

        #map.add_frame(f_ref)
        #map.add_frame(f_cur)

        kf_ref = KeyFrame(f_ref)
        kf_cur = KeyFrame(f_cur, img_cur)
        map.add_keyframe(kf_ref)
        map.add_keyframe(kf_cur)

        pts3d, mask_pts3d = triangulate_normalized_points(
            kf_cur.Tcw, kf_ref.Tcw, kf_cur.kpsn[idx_cur_inliers],
            kf_ref.kpsn[idx_ref_inliers])

        new_pts_count, mask_points, _ = map.add_points(
            pts3d,
            mask_pts3d,
            kf_cur,
            kf_ref,
            idx_cur_inliers,
            idx_ref_inliers,
            img_cur,
            do_check=True,
            cos_max_parallax=Parameters.kCosMaxParallaxInitializer)
        #print("# triangulated points: ", new_pts_count)

        if new_pts_count > self.num_min_triangulated_points:
            err = map.optimize(verbose=False,
                               rounds=20,
                               use_robust_kernel=True)
            print("init optimization error^2: %f" % err)

            num_map_points = len(map.points)
            print("# map points:   %d" % num_map_points)
            is_ok = num_map_points > self.num_min_triangulated_points

            out.pts = pts3d[mask_points]
            out.kf_cur = kf_cur
            out.idxs_cur = idx_cur_inliers[mask_points]
            out.kf_ref = kf_ref
            out.idxs_ref = idx_ref_inliers[mask_points]

            # set scene median depth to equal desired_median_depth'
            desired_median_depth = Parameters.kInitializerDesiredMedianDepth
            median_depth = kf_cur.compute_points_median_depth(out.pts)
            depth_scale = desired_median_depth / median_depth
            print('forcing current median depth ', median_depth, ' to ',
                  desired_median_depth)

            out.pts[:, :3] = out.pts[:, :3] * depth_scale  # scale points
            tcw = kf_cur.tcw * depth_scale  # scale initial baseline
            kf_cur.update_translation(tcw)

        map.delete()

        if is_ok:
            Printer.green('Inializer: ok!')
        else:
            self.num_failures += 1
            #Printer.red('Inializer: ko!')
        print('|------------')
        return out, is_ok
예제 #17
0
    def process_frame(self, img, pose=None):
        start_time = time.time()
        assert img.shape[0:2] == (self.H, self.W)
        frame = Frame(self.mapp, img, self.K)

        if frame.id == 0:
            return

        f1 = self.mapp.frames[-1]
        f2 = self.mapp.frames[-2]

        idx1, idx2, Rt = match_frames(f1, f2)

        # add new observations if the point is already observed in the previous frame
        # TODO: consider tradeoff doing this before/after search by projection
        for i, idx in enumerate(idx2):
            if f2.pts[idx] is not None and f1.pts[idx1[i]] is None:
                f2.pts[idx].add_observation(f1, idx1[i])

        if frame.id < 5 or True:
            # get initial positions from fundamental matrix
            f1.pose = np.dot(Rt, f2.pose)
        else:
            # kinematic model (not used)
            velocity = np.dot(f2.pose,
                              np.linalg.inv(self.mapp.frames[-3].pose))
            f1.pose = np.dot(velocity, f2.pose)

        # pose optimization
        if pose is None:
            #print(f1.pose)
            pose_opt = self.mapp.optimize(local_window=1, fix_points=True)
            print("Pose:     %f" % pose_opt)
            #print(f1.pose)
        else:
            # have ground truth for pose
            f1.pose = pose

        sbp_pts_count = 0

        # search by projection
        if len(self.mapp.points) > 0:
            # project *all* the map points into the current frame
            map_points = np.array([p.homogeneous() for p in self.mapp.points])
            projs = np.dot(np.dot(K, f1.pose[:3]), map_points.T).T
            projs = projs[:, 0:2] / projs[:, 2:]

            # only the points that fit in the frame
            good_pts = (projs[:, 0] > 0) & (projs[:, 0] < self.W) & \
                       (projs[:, 1] > 0) & (projs[:, 1] < self.H)

            for i, p in enumerate(self.mapp.points):
                if not good_pts[i]:
                    # point not visible in frame
                    continue
                if f1 in p.frames:
                    # we already matched this map point to this frame
                    # TODO: understand this better
                    continue
                for m_idx in f1.kd.query_ball_point(projs[i], 2):
                    # if point unmatched
                    if f1.pts[m_idx] is None:
                        b_dist = p.orb_distance(f1.des[m_idx])
                        # if any descriptors within 64
                        if b_dist < 64.0:
                            p.add_observation(f1, m_idx)
                            sbp_pts_count += 1
                            break

        # triangulate the points we don't have matches for
        good_pts4d = np.array([f1.pts[i] is None for i in idx1])

        # do triangulation in global frame
        pts4d = triangulate(f1.pose, f2.pose, f1.kps[idx1], f2.kps[idx2])
        good_pts4d &= np.abs(pts4d[:, 3]) != 0
        pts4d /= pts4d[:, 3:]  # homogeneous 3-D coords

        # adding new points to the map from pairwise matches
        new_pts_count = 0
        for i, p in enumerate(pts4d):
            if not good_pts4d[i]:
                continue

            # check parallax is large enough
            # TODO: learn what parallax means
            """
      r1 = np.dot(f1.pose[:3, :3], add_ones(f1.kps[idx1[i]]))
      r2 = np.dot(f2.pose[:3, :3], add_ones(f2.kps[idx2[i]]))
      parallax = r1.dot(r2) / (np.linalg.norm(r1) * np.linalg.norm(r2))
      if parallax >= 0.9998:
        continue
      """

            # check points are in front of both cameras
            pl1 = np.dot(f1.pose, p)
            pl2 = np.dot(f2.pose, p)
            if pl1[2] < 0 or pl2[2] < 0:
                continue

            # reproject
            pp1 = np.dot(K, pl1[:3])
            pp2 = np.dot(K, pl2[:3])

            # check reprojection error
            pp1 = (pp1[0:2] / pp1[2]) - f1.kpus[idx1[i]]
            pp2 = (pp2[0:2] / pp2[2]) - f2.kpus[idx2[i]]
            pp1 = np.sum(pp1**2)
            pp2 = np.sum(pp2**2)
            if pp1 > 2 or pp2 > 2:
                continue

            # add the point
            color = img[int(round(f1.kpus[idx1[i], 1])),
                        int(round(f1.kpus[idx1[i], 0]))]
            pt = Point(self.mapp, p[0:3], color)
            pt.add_observation(f2, idx2[i])
            pt.add_observation(f1, idx1[i])
            new_pts_count += 1

        print("Adding:   %d new points, %d search by projection" %
              (new_pts_count, sbp_pts_count))

        # optimize the map
        if frame.id >= 4 and frame.id % 5 == 0:
            err = self.mapp.optimize()  #verbose=True)
            print("Optimize: %f units of error" % err)

        print("Map:      %d points, %d frames" %
              (len(self.mapp.points), len(self.mapp.frames)))
        print("Time:     %.2f ms" % ((time.time() - start_time) * 1000.0))
        print(np.linalg.inv(f1.pose))
예제 #18
0
    def initialize(self, f_cur, img_cur):

        # prepare the output
        out = InitializerOutput()
        is_ok = False

        # if too many frames have passed, move the current id_ref forward
        if (len(self.frames) - 1) - self.id_ref >= kMaxIdDistBetweenFrames:
            self.id_ref = len(self.frames) - 1  # take last frame in the array
        self.f_ref = self.frames[self.id_ref]
        f_ref = self.f_ref

        # append current frame
        self.frames.append(f_cur)

        # if the current frames do no have enough features exit
        if len(f_ref.kps) < kNumMinFeatures or len(
                f_cur.kps) < kNumMinFeatures:
            Printer.red('Inializer: not enough features!')
            return out, is_ok

        # find image point matches
        idx_cur, idx_ref = match_frames(f_cur, f_ref)

        print('├────────')
        print('initializing frames ', f_cur.id, ', ', f_ref.id)
        Mrc = self.estimatePose(f_ref.kpsn[idx_ref], f_cur.kpsn[idx_cur])
        f_cur.pose = np.linalg.inv(poseRt(
            Mrc[:3, :3], Mrc[:3, 3]))  # [Rcr, tcr] w.r.t. ref frame

        # remove outliers
        mask_index = [i for i, v in enumerate(self.mask_match) if v > 0]
        print('num inliers: ', len(mask_index))
        idx_cur_inliers = idx_cur[mask_index]
        idx_ref_inliers = idx_ref[mask_index]

        # create a temp map for initializing
        map = Map()
        map.add_frame(f_ref)
        map.add_frame(f_cur)

        points4d = self.triangulatePoints(f_cur.pose, f_ref.pose,
                                          f_cur.kpsn[idx_cur_inliers],
                                          f_ref.kpsn[idx_ref_inliers])
        #pts4d = triangulate(f_cur.pose, f_ref.pose, f_cur.kpsn[idx_cur], f_ref.kpsn[idx_ref])

        new_pts_count, mask_points = map.add_points(points4d,
                                                    None,
                                                    f_cur,
                                                    f_ref,
                                                    idx_cur_inliers,
                                                    idx_ref_inliers,
                                                    img_cur,
                                                    check_parallax=True)
        print("triangulated:      %d new points from %d matches" %
              (new_pts_count, len(idx_cur)))
        err = map.optimize(verbose=False)
        print("pose opt err:   %f" % err)

        #reset points in frames
        f_cur.reset_points()
        f_ref.reset_points()

        num_map_points = len(map.points)
        #print("# map points:   %d" % num_map_points)
        is_ok = num_map_points > kNumMinTriangulatedPoints

        out.points4d = points4d[mask_points]
        out.f_cur = f_cur
        out.idx_cur = idx_cur_inliers[mask_points]
        out.f_ref = f_ref
        out.idx_ref = idx_ref_inliers[mask_points]

        # set median depth to 'desired_median_depth'
        desired_median_depth = parameters.kInitializerDesiredMedianDepth
        median_depth = f_cur.compute_points_median_depth(out.points4d)
        depth_scale = desired_median_depth / median_depth
        print('median depth: ', median_depth)

        out.points4d = out.points4d * depth_scale  # scale points
        f_cur.pose[:3,
                   3] = f_cur.pose[:3,
                                   3] * depth_scale  # scale initial baseline

        print('├────────')
        Printer.green('Inializer: ok!')
        return out, is_ok