Example #1
0
 def __init__(self, fx, cx, cy):
     self.last_frame = None
     self.R = None
     self.t = None
     self.px_ref = None
     self.px_cur = None
     self.focal = fx
     self.pp = (cx, cy)
     self.detector = cv2.FastFeatureDetector_create(threshold=25,
                                                    nonmaxSuppression=True)
Example #2
0
 def __init__(self,
              parts,
              images,
              topMatches=20,
              drawMatches=True,
              iteration=None):
     super().__init__(parts, images, topMatches, drawMatches, iteration)
     self.bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
     self.fast = cv.FastFeatureDetector_create()
     self.brief = cv.xfeatures2d.BriefDescriptorExtractor_create()
Example #3
0
def main():
    # Initiate FAST object with default values
    fast = cv.FastFeatureDetector_create(threshold=THRESHOLD)
    print("Processing...")
    timer = utils.Timer()
    timer.start()
    # Create frames from video
    # reader = imageio.get_reader(f'imageio:videos/explosions.mp4')
    # for i, img in enumerate(reader):
    # For creating video
    if not os.path.exists(VIDEOS_DIR):
        os.mkdir(VIDEOS_DIR)
    writer = imageio.get_writer(OUT_VIDEO_FILENAME, fps=FPS)
    print(f"Getting keypoints from video {VIDEO_FILENAME}...")
    # Reading video
    cap = cv.VideoCapture(VIDEO_FILENAME)
    counter = 0
    total_frames = DURATION * FPS
    step_size = np.ceil(total_frames / 100).astype(int)
    bar = Bar("Processing...", max=100, suffix='%(percent)d%%')
    bar.check_tty = False
    particles = []
    h = None
    w = None
    while cap.isOpened():
        ret, frame = cap.read()
        if not h:
            w, h, _ = frame.shape
        kp = fast.detect(frame, None)
        # Get list of keypoints per frame
        keypoints = process_keypoints(kp)
        # Apply effect to keypoints (create particles per frame)
        delta_time = 1 / FPS
        particles = particle_effects(keypoints, particles, delta_time, w, h)
        # Render the particles into an image for the output video
        img_arr = render(particles)
        # Append rendered image into video
        writer.append_data(img_arr)
        # Write rendered image into image file
        if counter < 90:
            img = Image.fromarray(img_arr)
            output_img_filename = f"output/{counter}.jpg"
            img.save(output_img_filename, quality=MAX_QUALITY)
        counter += 1
        if counter % step_size == 0:
            bar.next()
        # Check if this is the end of the video
        if not ret:
            break
    cap.release()
    bar.finish()
    print("Writing video")
    writer.close()
    timer.stop()
    print(f"Total time spent {timer}")
Example #4
0
File: test.py Project: ravikt/zed
def featureDetection(image1):

    fast = cv2.FastFeatureDetector_create(threshold=25, nonmaxSuppression=True)
    #fast = cv2.FastFeatureDetector_create()
    # FAST return list, convert to numpy array
    kp = fast.detect(image1, None)
    kp = np.array([k.pt for k in kp], dtype=np.float32)
    #kp = np.asarray(kp ,dtype=np.float32)
    #img2 = cv2.drawKeypoints(image1, kp, None, color=(0,255,0))
    #kp = cv2.goodFeaturesToTrack(image1, mask = None, **feature_params)
    return kp
def features(lp, lc):
    fast = cv2.FastFeatureDetector_create(threshold=50)
    orb = cv2.ORB_create()  # Feature detection using ORB

    kp_left_prev = fast.detect(lp, None)
    kp_left_cur = fast.detect(lc, None)

    kp_left_prev, des_left_prev = orb.compute(lp, kp_left_prev)
    kp_left_cur, des_left_cur = orb.compute(lc, kp_left_cur)

    return kp_left_prev, kp_left_cur, des_left_prev, des_left_cur
def right_prev(right_image_prev):

    fast = cv2.FastFeatureDetector_create(
        threshold=50)  #, nonmaxSuppression=True)
    orb = cv2.ORB_create()  # Feature detection using ORB

    kp_right_prev = fast.detect(right_image_prev, None)
    kp_right_prev, des_right_prev = orb.compute(right_image_prev,
                                                kp_right_prev)

    return kp_right_prev, des_right_prev
Example #7
0
def read_img(path):
    img=cv2.imread(path)
    img=cv2.resize(img,(w,h), cv2.INTER_LINEAR)
    HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)#灰度图像
    edges = cv2.Canny(gray,50,150)

    #**********hough_channel*********
    hough_channel = np.zeros(gray.shape, np.uint8)
    lines = cv2.HoughLines(edges,1,np.pi/180,10)  #
    try:
        for rho,theta in lines[0]:
            a = np.cos(theta)
            b = np.sin(theta)
            x0 = a*rho
            y0 = b*rho
            x1 = int(x0 + 1000*(-b))
            y1 = int(y0 + 1000*(a))
            x2 = int(x0 - 1000*(-b))
            y2 = int(y0 - 1000*(a))
            cv2.line(hough_channel,(x1,y1),(x2,y2),(255),1)
    except Exception as e:
        print 'There is no lines to be detected!'

    #********Sobel边缘检测************
    sobelX = cv2.Sobel(gray,cv2.CV_64F,1,0)#x方向的梯度
    sobelY = cv2.Sobel(gray,cv2.CV_64F,0,1)#y方向的梯度
    sobelX = np.uint8(np.absolute(sobelX))#x方向梯度的绝对值
    sobelY = np.uint8(np.absolute(sobelY))#y方向梯度的绝对值

    #********fast角点提取*******
    fast_channel = np.zeros(gray.shape, np.uint8)
    fast=cv2.FastFeatureDetector_create(threshold=2,nonmaxSuppression=True,type=cv2.FAST_FEATURE_DETECTOR_TYPE_9_16)#获取FAST角点探测器
    kp=fast.detect(img,None)#描述符
    fast_channel = cv2.drawKeypoints(gray,kp,fast_channel,color=(255))#
    fast_channel = cv2.cvtColor(fast_channel,cv2.COLOR_BGR2GRAY)

    #***********H,s两个通道*******
    HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    H, S, V = cv2.split(HSV)

    #*******hsv过滤***********
    Lower = np.array([0, 0, 0])
    Upper = np.array([255,33, 255])
    mask = cv2.inRange(HSV, Lower, Upper)
    hsv_channel = cv2.bitwise_and(img, img, mask=mask)
    hsv_channel = cv2.cvtColor(hsv_channel,cv2.COLOR_BGR2GRAY)
    mergedByNp = np.dstack([gray,H,sobelX,sobelY])
    x=np.expand_dims(mergedByNp,axis=0)
    # Converting RGB to BGR for VGG
    print("the shape of x ")
    #x = x[:,:,:,::-1]
    print(x.shape)
    return x, mergedByNp,img
Example #8
0
def get_surf_des(filename):
    f = cv2.imread(filename)
    #hessian threshold 800, 64 not 128
    surf = cv2.xfeatures2d.FREAK_create()
    fast = cv2.FastFeatureDetector_create()
    kp = fast.detect(f, None)

    kp, des = surf.compute(f, kp, None)
    print(des)
    des = numpy.float32(des)
    return kp, des
Example #9
0
def sift6():
    img = cv2.imread('rectangle.png', 0)

    fast = cv2.FastFeatureDetector_create()
    kp = fast.detect(img, None)  # 描述符
    img = cv2.drawKeypoints(img, kp, img, color=(255, 255, 0))  # 画到img上面
    print("Threshold: ", fast.getThreshold())  # 输出阈值
    print("nonmaxSuppression: ", fast.getNonmaxSuppression())  # 是否使用非极大值抑制
    print("Total Keypoints with nonmaxSuppression: ", len(kp))  # 特征点个数
    cv2.imshow('fast', img)
    cv2.waitKey(0)
def detect_features(frame):
    fast = cv2.FastFeatureDetector_create()
    fast.setThreshold(20)

    #Using 'fast' algorithm for feature detection
    kp = fast.detect(frame, None)

    #  Converting from Keypoint structure to Point structure
    pts = np.asarray([[[np.float32(p.pt[0]),
                        np.float32(p.pt[1])]] for p in kp])
    return pts
Example #11
0
def FastFindFeatures(img):
    print('Starting Fast Detection')
    time1 = time.time()
    # setup FAST alg
    fast = cv2.FastFeatureDetector_create()
    # disable nonmaxSuppression
    # fast.setNonmaxSuppression(0)
    kp = fast.detect(img, None)
    print('Fast Processing Time: %ss' % (time.time() - time1))
    img2 = cv2.drawKeypoints(img, kp, None, color=(255, 0, 0))
    cv2.imwrite('fastShapes.png', img2)
Example #12
0
def gotobehavior(robot, loc, path=None, th=35, tracking=False):
    ctrl = PIctrl()
    #Set the reference point i.e object position
    ctrl.ref_point = loc
    #robot_pos, rangle = get_pose(robot)
    i = 0
    t0 = time.time()
    runKLT = False
    detector = cv2.FastFeatureDetector_create(threshold=25,
                                              nonmaxSuppression=True)
    while True:
        i += 1
        ## Calc robot current pose
        rangle = robot.pose.rotation.angle_z.radians
        robot_pos = to_nppose(robot.pose.position.x_y_z, rangle)
        ctrl.pos_update(robot_pos)
        ctrl.robot = robot
        ## Update the refrence object for the controller
        ctrl.ref_point = loc
        ## Run the PID controller
        t1 = time.time()
        dt = t1 - t0
        val = ctrl.update(dt)
        t0 = t1
        ## Get get while velocityies from linear and angular velocity
        vr, vl = calc_wheel_velo(val)
        ## Send the velocity commands to the robot
        robot.drive_wheels(vl, vr)
        if path is not None:
            img = add_obj_path(path, robot_pos)
            cv2.imshow('path', img)  #[:,:,::-1])
        if tracking:
            if runKLT == False:
                old_image = robot.world.latest_image
                old_image = np.array(old_image.raw_image.convert("L"))
                p0 = detector.detect(old_image)
                p0 = np.array([x.pt for x in p0], dtype=np.float32)
                runKLT = True
            if runKLT == True:
                new_image = robot.world.latest_image
                new_image = np.array(new_image.raw_image.convert("L"))
                p0, p1, vel = featureTracking(old_image, new_image, p0)
                if p0 is not None:
                    p0 = p1
                    old_image = new_image
                    print(i, 'KLT vel', vel)
                else:
                    break
        err = robot_pos - loc
        print(i, err)
        if np.linalg.norm(err[:2]) < th:
            break
        if cv2.waitKey(10) & 0xFF == ord('q'):
            break
def fast_corner_detection(img):
    # https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_fast/py_fast.html
    fast = cv2.FastFeatureDetector_create()
    img = get_grayscaled_image(img)

    # find and draw the keypoints
    kp = fast.detect(img, None)
    img2 = cv2.drawKeypoints(img, kp, color=(255, 0, 0), outImage=img)
    return img2, [
        tuple((int(keypoint.pt[0]), int(keypoint.pt[1]))) for keypoint in kp
    ]
Example #14
0
def fd_Fast(_image):
    imag = cv2.cvtColor(_image, cv2.COLOR_BGR2HSV)
    gray = cv2.cvtColor(imag, cv2.COLOR_BGR2GRAY)
    fast = cv2.FastFeatureDetector_create()

    # find and draw the keypoints
    kp = fast.detect(gray, None)
    imag2 = cv2.drawKeypoints(gray, kp, None, color=(255, 0, 0))
    cv2.normalize(imag2, imag2)

    return imag2.flatten()
    def __init__(self, config):
        self.config = config

        # Indicate if this is the first image message.
        self.is_first_img = True

        # ID for the next new feature.
        self.next_feature_id = 0

        # Feature detector
        self.detector = cv2.FastFeatureDetector_create(self.config.fast_threshold)

        # IMU message buffer.
        self.imu_msg_buffer = []

        # Previous and current images
        self.cam0_prev_img_msg = None
        self.cam0_curr_img_msg = None
        self.cam1_curr_img_msg = None

        # Pyramids for previous and current image
        self.prev_cam0_pyramid = None
        self.curr_cam0_pyramid = None
        self.curr_cam1_pyramid = None

        # Features in the previous and current image.
        # list of lists of FeatureMetaData
        self.prev_features = [[] for _ in range(self.config.grid_num)]  # Don't use [[]] * N
        self.curr_features = [[] for _ in range(self.config.grid_num)]

        # Number of features after each outlier removal step.
        # keys: before_tracking, after_tracking, after_matching, after_ransac
        self.num_features = defaultdict(int)

        # load config
        # Camera calibration parameters
        self.cam0_resolution = config.cam0_resolution   # vec2
        self.cam0_intrinsics = config.cam0_intrinsics   # vec4
        self.cam0_distortion_model = config.cam0_distortion_model     # string
        self.cam0_distortion_coeffs = config.cam0_distortion_coeffs   # vec4

        self.cam1_resolution = config.cam1_resolution   # vec2
        self.cam1_intrinsics = config.cam1_intrinsics   # vec4
        self.cam1_distortion_model = config.cam1_distortion_model     # string
        self.cam1_distortion_coeffs = config.cam1_distortion_coeffs   # vec4

        # Take a vector from cam0 frame to the IMU frame.
        self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)
        self.R_cam0_imu = self.T_cam0_imu[:3, :3]
        self.t_cam0_imu = self.T_cam0_imu[:3, 3]
        # Take a vector from cam1 frame to the IMU frame.
        self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)
        self.R_cam1_imu = self.T_cam1_imu[:3, :3]
        self.t_cam1_imu = self.T_cam1_imu[:3, 3]
Example #16
0
def feature_detection(featureType, im):
    
    if featureType=="SURF":
        minHessian = 100
        detector = cv2.xfeatures2d.SURF_create(hessianThreshold=minHessian)
        return detector.detect(im)
        
    elif featureType=="FAST":
        # Initiate FAST object with default values
        fast = cv2.FastFeatureDetector_create()
        return fast.detect(im,None)
def daisy_descriptors(image, mask):
    gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray_img = cv2.resize(gray_img, (256, 256), interpolation=cv2.INTER_AREA)
    if mask is not None:
        mask = cv2.resize(mask, (256, 256), interpolation=cv2.INTER_AREA)
        mask = (mask != 0).astype(np.uint8) * 255

    detector = cv2.FastFeatureDetector_create()
    keypoints = detector.detect(gray_img, mask)

    return cv2.xfeatures2d.DAISY_create().compute(gray_img, keypoints)[1]
Example #18
0
def fast_feature_detection(img):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    fast = cv2.FastFeatureDetector_create(15)
    kp = fast.detect(img, None)
    img = cv2.drawKeypoints(gray,
                            kp,
                            None,
                            flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    #flags is optional, try without and see what happens
    #img=cv2.drawKeypoints(gray,kp, None,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    return img
Example #19
0
def savekeyPointsOnImage(image,imname ,thr,w,h):
	fast = cv2.FastFeatureDetector_create(thr)
	kp = fast.detect(image,None)
	img = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB)
	for k in kp:
		x,y=k.pt
		cv2.circle(img, (int(x),int(y)), 2, (50,50,50), thickness=1, lineType=8, shift=0)
		cv2.line(img, (int(x)-2,int(y)), (int(x)+2,int(y)),(0,252,248),1)
		cv2.line(img, (int(x),int(y)+2), (int(x),int(y)-2),(0,252,248),1)
	imwrite(imname,img)
	return;
def run():
    img = cv2.imread("../../dataset/Hands/Hand_0000083.jpg", 0)
    fast = cv2.FastFeatureDetector_create()

    # for finding and drawing keypoints
    kp = fast.detect(img, None)

    img2 = cv2.drawKeypoints(img, kp, img, color=(255, 0, 0))

    cv2.imwrite('out/fast_nms_t100000.jpg', img2)

    print("Total keypoints with nonmaxSuppression: ", len(kp))
Example #21
0
def fast(img):
    """
    Uses the FAST algorithm to track objects
    :param img: Image to track objects
    :return: image with points drawn, Key points
    """
    fast_t = cv2.FastFeatureDetector_create()

    points = fast_t.detect(img, None)
    kimg = cv2.drawKeypoints(img, points, None, color=(0, 255, 0))

    return kimg, points
Example #22
0
    def __init__(
        self,
        file_path,
        focal_length=718.8560,
        pp=(607.1928, 185.2157),
        lk_params=dict(winSize=(21, 21),
                       criteria=(cv2.TERM_CRITERIA_EPS
                                 | cv2.TERM_CRITERIA_COUNT, 30, 0.01)),
        detector=cv2.FastFeatureDetector_create(threshold=25,
                                                nonmaxSuppression=True),
        camera_extrinsic_rotation=np.identity(3)):
        """
        Arguments:
            file_path {str} -- File path that leads to image sequences or video file
        
        Keyword Arguments:
            focal_length {float} -- Focal length of camera used in image sequence (default: {718.8560})
            pp {tuple} -- Principal point of camera in image sequence (default: {(607.1928, 185.2157)})
            lk_params {dict} -- Parameters for Lucas Kanade optical flow (default: {dict(winSize  = (21,21), criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01))})
            detector {cv2.FeatureDetector} -- Most types of OpenCV feature detectors (default: {cv2.FastFeatureDetector_create(threshold=25, nonmaxSuppression=True)})
        
        Raises:
            ValueError -- Raised when file either file paths are not correct, or img_file_path is not configured correctly
        """

        self.file_path = file_path
        self.detector = detector
        self.lk_params = lk_params
        self.focal = focal_length
        self.extrinsic_rotation = camera_extrinsic_rotation
        self.pp = pp
        self.R = np.identity(3)
        self.t = np.zeros(3)
        self.id = 0
        self.n_features = 0
        self.old_frame = None
        self.current_frame = None
        self.good_old = None
        self.good_new = None
        self.p0 = None
        self.p1 = None
        self.essential_matrix_point_mask = None

        self.frame_paths = glob.glob(os.path.join(file_path, "*.png"))
        self.frame_paths.sort()
        self.n_frames = len(self.frame_paths)

        self.vid_cap = None
        if self.n_frames == 0:  # may be a video file
            self.vid_cap = cv2.VideoCapture(file_path)
            self.n_frames = self.vid_cap.get(cv2.CAP_PROP_FRAME_COUNT)

        self.process_frame()
Example #23
0
def fast(image,outline=False,nor_max_supperesion=True,verbose=False,**kwargs):
    if outline:
       image=longest_edge(image, verbose=verbose, **kwargs)
    fast = cv2.FastFeatureDetector_create() 
    if nor_max_supperesion is not True:
       kp = fast.detect(img,None)
    else:
       fast.setNonmaxSuppression(0)
       kp = fast.detect(img,None)
    img2 = cv2.drawKeypoints(img, kp, None, color=(127,0,0))
    if verbose:
       display(img2,'FAST Corners')
Example #24
0
    def __init__(self):
        self.has_seen_first_frame = False

        # Initiate STAR detector
        self.orb = cv2.ORB_create()

        # Initiate FAST object with default values
        self.fast = cv2.FastFeatureDetector_create(nonmaxSuppression=True,
                                                   threshold=100)

        # create BFMatcher object
        self.bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
Example #25
0
    def fast_corners(self, image):
        fast = cv2.FastFeatureDetector_create()
        # Applying Gaussian Blurring
        blur = cv2.GaussianBlur(image, (5, 5), 0)

        # Detect keypoints with non max suppression
        keypoints = fast.detect(blur, None)
        # Disable nonmaxSuppression
        # fast.setNonmaxSuppression(False)
        # Detect keypoints without non max suppression
        # keypoints_without_nonmax = fast.detect(gray, None)
        return keypoints
Example #26
0
    def __init__(self, image_size, keypoint_image_border_size,
                 max_keypoint_count, ldescriptor_length,
                 use_scale_orientation):
        super(VGG, self).__init__(
            image_size=image_size,
            keypoint_image_border_size=keypoint_image_border_size,
            max_keypoint_count=max_keypoint_count,
            ldescriptor_length=ldescriptor_length)

        self.feature_detector = cv2.FastFeatureDetector_create()
        self.descriptor_extractor = cv2.xfeatures2d.VGG_create(
            use_scale_orientation=use_scale_orientation)
 def __init__(self, cam):
     self.cam = cam
     self.last_frame = None
     self.new_frame = None
     self.frame_stage = 0
     self.detector = cv2.FastFeatureDetector_create(2, True)
     self.points = None
     self.prev_points = None
     self.cur_R = None
     self.cur_t = None
     self.optical_flow = None
     self.delta = None
Example #28
0
def OnFAST():    
    img  = cv2.imread(sys.argv[1], 1)
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    sift = cv2.FastFeatureDetector_create()
    kp   = sift.detect(gray,None)
    
    xs = np.array([p.pt[0] for p in kp])
    ys = np.array([p.pt[1] for p in kp])
    
    c = ImgCanvas(img)
    c.Point(xs, ys, color = RandColors(ncolor = len(xs)), size = 9, linewidth = 3)
    c.Show()    
Example #29
0
def fast(img):
    # Initiate FAST object with default values
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    fast = cv2.FastFeatureDetector_create()

    # find and draw the keypoints)
    kp = fast.detect(img,None)
    # get all the kp coordinates
    pts = [kp[idx].pt for idx in range(len(kp))]
    print pts
    sys.exit(1)
    return cv2.drawKeypoints(gray, kp, img, color=(255,0,0))
Example #30
0
def get_corners_Shi_Tomasi(image1):
    if len(image1.shape) == 2:
        im1_gray = image1.copy()
    else:
        im1_gray = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)

    detector = cv2.FastFeatureDetector_create()

    corners = cv2.goodFeaturesToTrack(im1_gray, 25, 0.01, 10)
    corners = corners.reshape(corners.shape[0], corners.shape[2]).astype(int)

    return corners