Ejemplo n.º 1
0
    def __init__(self):
        self.image_pub = rospy.Publisher("image_topic_2", Image)
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/usb_cam/image_raw", Image,
                                          self.callback)
        #self.image_sub = rospy.Subscriber("/ocam/image_raw",Image,self.callback)
        #self.image_sub = rospy.Subscriber("/usb2_cam/image_rect_color",Image,self.callback)
        self.CRT = CMT.CMT()

        self.CRT.estimate_scale = 'estimate_scale'
        self.CRT.estimate_rotation = 'estimate_rotation'
        self.pause_time = 10
        ###########################Primer Region

        im0 = cv2.imread('./frame_cap.jpg',
                         flags=cv2.IMREAD_COLOR)  #flags=cv2.IMREAD_GRAYSCALE)
        #im0 = cv2.imread('/home/sergio/catikin_ws_user/src/cmt/scripts/frame.jpg', flags=cv2.IMREAD_COLOR)
        #im0 = cv2.imread('/home/sergio/catikin_ws_user/src/cmt/scripts/frame.jpg',flags=cv2.IMREAD_GRAYSCALE)
        cv2.imshow('im0', im0)
        #im_gray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
        im_gray0 = im0
        im_draw = np.copy(im0)
        print('Selecciona Primer Region')
        tl = (84, 55)
        br = (557, 307)
        #tl=(35,35)
        #bl=(605,325)
        (tl, br) = util.get_rect(im_draw)
        print('usando %i, %i como init bb', tl, br)
        self.CRT.initialise(im_gray0, tl, br)
        self.frame = 1
Ejemplo n.º 2
0
    def __init__(self):
        self.image_pub = rospy.Publisher("image_topic_2", Image)
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/usb_cam/image_raw", Image,
                                          self.callback)
        #self.image_sub = rospy.Subscriber("/usb2_cam/image_rect_color",Image,self.callback)
        self.CRT = CMT.CMT()
        self.CNT = CMT.CMT()
        self.CST = CMT.CMT()

        self.CRT = CMT.CMT()
        self.CNT = CMT.CMT()
        self.CST = CMT.CMT()

        self.CRT.estimate_scale = 'estimate_scale'
        self.CRT.estimate_rotation = 'estimate_rotation'
        self.CNT.estimate_scale = 'estimate_scale'
        self.CNT.estimate_rotation = 'estimate_rotation'
        self.CST.estimate_scale = 'estimate_scale'
        self.CST.estimate_rotation = 'estimate_rotation'
        self.pause_time = 10
        ###########################Primer Region

        im0 = cv2.imread('~/catikin_ws_c/src/cmt/scripts/imas.jpg',
                         flags=cv2.IMREAD_COLOR)
        if im0:
            print(im0.shape)
        im_gray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
        im_draw = np.copy(im0)
        print('Selecciona Primer Region')
        (tl, br) = util.get_rect(im_draw)
        print('using %i, %i as init bb', tl, br)
        self.CRT.initialise(im_gray0, tl, br)
        ###########################Segunda Region
        print('Selecciona Segunda Region')
        (tl, br) = util.get_rect(im_draw)
        print('using %i, %i as init bb', tl, br)
        self.CNT.initialise(im_gray0, tl, br)
        ############################Tercer Region
        print('Selecciona Tercer Region')
        (tl, br) = util.get_rect(im_draw)
        print('using %i, %i as init bb', tl, br)
        self.CST.initialise(im_gray0, tl, br)
        self.frame = 1
Ejemplo n.º 3
0
    def _prompt_for_target(self):
        """Prompt user for target bounding box.

        Launches an image viewer on which the user draws a bounding box.

        Returns:
            (int, int, int, int): bbox in (x, y, w, h) format.
        """

        tl, br = util.get_rect(self.current_image)
        return evaluation.BboxFormats.convert_bbox_format(
            tl + br, evaluation.BboxFormats.TLBR, evaluation.BboxFormats.CCWH)
Ejemplo n.º 4
0
class Car(vehicle.Vehicle):
    vertices = util.get_rect(6, 4)
    height = 4.5

    accel_force = -100.0
    turn_force = 3
    idle_turn_factor = .3
    max_speed = 40
    min_speed = -max_speed * .3
    max_angular_velocity = 4

    seat_offset = 1

    wheel_offset = 2

    side_friction = 3
    friction_speed = 20
    stop_speed = .1

    a_damping = .4

    timer = 0

    def __init__(self, app):
        vehicle.Vehicle.__init__(self, app)

        wheel_tex = 'wheel.jpg'

        self.wheel_rots = set()

        with self.canvas:
            self.rot = Rotate(0, 0, 1, 0)
            Translate(0, 1.5, 0)

            self.app.graphic_data.draw_mesh('models/car',
                                            self.canvas,
                                            texture='metal.jpg')

            axel_width = 2.5

            PushMatrix()
            Translate(2, 0, 0)

            PushMatrix()
            Translate(0, 0, -axel_width)

            self.fl_rot = Rotate(0, 0, 1, 0)
            self.wheel_rots.add(Rotate(0, 0, 0, 1))
            self.app.graphic_data.draw_mesh('models/wheel',
                                            self.canvas,
                                            wheel_tex)
            PopMatrix()

            PushMatrix()
            Translate(0, 0, axel_width)
            self.fr_rot = Rotate(0, 0, 1, 0)
            self.wheel_rots.add(Rotate(0, 0, 0, 1))
            self.app.graphic_data.draw_mesh('models/wheel',
                                            self.canvas,
                                            wheel_tex)
            PopMatrix()
            PopMatrix()

            PushMatrix()
            Translate(-2, 0, 0)
            self.wheel_rots.add(Rotate(0, 0, 0, 1))

            PushMatrix()
            Translate(0, 0, -axel_width)
            self.app.graphic_data.draw_mesh('models/wheel',
                                            self.canvas,
                                            wheel_tex)
            PopMatrix()

            PushMatrix()
            Translate(0, 0, axel_width)
            self.app.graphic_data.draw_mesh('models/wheel',
                                            self.canvas,
                                            wheel_tex)
            PopMatrix()
            PopMatrix()

    def move(self, input_vector, dt):
        self.input_vector = list(input_vector)
        accel = self.throttle_down - self.throttle_up

        self.local_vel = pymunk.Vec2d(self.collision.body.velocity).rotated(-self.collision.body.angle)

        if self.local_vel.x:
            mag = self.local_vel.x
            if not accel:
                mag *= self.idle_turn_factor
            self.input_vector[0] *= mag
        else:
            self.input_vector[0] = 0

        self.collision.body.apply_impulse_at_local_point((accel * self.accel_force * dt,
                                                          self.input_vector[0] * self.turn_force * dt),
                                                         (self.wheel_offset, 0))

    def update(self, dt):
        accel = (self.throttle_down - self.throttle_up)

        target_angle = self.input_vector[0] * accel

        da = accel * 1000 * dt
        for r in self.wheel_rots:
            r.angle += da

        da = (target_angle - self.fr_rot.angle) * .2

        self.fr_rot.angle += da
        self.fl_rot.angle += da

        self.rot.angle = self.collision.body.angle * -57.3

        if not self.collision.standing:
            self.collision.dy -= 10 * dt
            self.collision.move_y(dt)

        vehicle.Vehicle.update(self, dt)
Ejemplo n.º 5
0
            raise Exception('Unable to parse bounding box')
        if len(values) != 4:
            raise Exception('Bounding box must have exactly 4 elements')
        bbox = np.array(values)

        # Convert to point representation, adding singleton dimension
        bbox = util.bb2pts(bbox[None, :])

        # Squeeze
        bbox = bbox[0, :]

        tl = bbox[:2]
        br = bbox[2:4]
    else:
        # Get rectangle input from user
        (tl, br) = util.get_rect(im_draw)

    print 'using', tl, br, 'as init bb'

    CMT.initialise(im_gray0, tl, br)

    frame = 1
    while True:
        # Read image
        status, im = cap.read()
        if not status:
            break
        im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
        im_draw = np.copy(im)

        tic = time.time()
Ejemplo n.º 6
0
def main(estimate_scale=True,
         estimate_rotation=True,
         input_box=None,
         input_pic=None,
         output=None):
    CMT.estimate_scale = estimate_scale
    CMT.estimate_rotation = estimate_rotation
    pause_time = 10
    skip = None
    preview = None
    quiet = False

    if input_pic is not None:
        print input_pic
        input_dir = input_pic
    elif args.inputpath is not None:
        print args.inputpath
        input_dir = args.inputpath
    else:
        input_dir = None

    if output is not None:
        output_dir = output
    elif args.output is not None:
        output_dir = args.output
    else:
        output_dir = None

    if input_box is not None:
        init_box = input_box
    elif args.bbox is not None:
        init_box = args.bbox
    else:
        init_box = None

    if output_dir is not None:
        quiet = True
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)
        elif not os.path.isdir(output_dir):
            raise Exception(output_dir + ' exists, but is not a directory')

    # Clean up
    cv2.destroyAllWindows()

    if input_dir is not None:

        # If a path to a file was given, assume it is a single video file
        if os.path.isfile(input_dir):
            cap = cv2.VideoCapture(input_dir)

            #Skip first frames if required
            if skip is not None:
                cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, skip)

        # Otherwise assume it is a format string for reading images
        else:
            cap = util.FileVideoCapture(input_dir)

            #Skip first frames if required
            if skip is not None:
                cap.frame = 1 + skip

        # By default do not show preview in both cases
        if preview is None:
            preview = False

    else:
        # If no input path was specified, open camera device
        cap = cv2.VideoCapture(0)
        if preview is None:
            preview = True

    # Check if videocapture is working
    if not cap.isOpened():
        print 'Unable to open video input.'
        sys.exit(1)

    while preview:
        status, im = cap.read()
        cv2.imshow('Preview', im)
        k = cv2.waitKey(10)
        if not k == -1:
            break

    # Read first frame
    status, im0 = cap.read()
    im_gray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
    im_draw = np.copy(im0)

    if init_box is not None:
        # Try to disassemble user specified bounding box
        values = init_box.split(',')
        try:
            values = [int(v) for v in values]

        except:
            raise Exception('Unable to parse bounding box')
        if len(values) != 4:
            raise Exception('Bounding box must have exactly 4 elements')
        bbox = np.array(values)

        # Convert to point representation, adding singleton dimension
        bbox = util.bb2pts(bbox[None, :])  #bbox[None, :]=array([[a, b, c,d]])

        # Squeeze
        bbox = bbox[0, :]

        tl = bbox[:2]
        br = bbox[2:4]

    else:
        # Get rectangle input from user
        (tl, br) = util.get_rect(im_draw)

    print 'using', tl, br, 'as init bb'

    #CMT.initialise(im0, tl, br)
    CMT.initialise(im_gray0, tl, br)
    #util.draw_keypoints(selected_keypoints, im_gray0,(255, 0, 0))

    frame = 1
    while True:
        # Read image
        status, im = cap.read()
        if not status:
            break
        im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
        im_draw = np.copy(im)

        tic = time.time()
        CMT.process_frame(im)
        #CMT.process_frame(im_gray)
        toc = time.time()

        # Display results

        # Draw updated estimate
        if CMT.has_result:

            cv2.line(im_draw, CMT.tl, CMT.tr, (255, 0, 0), 2)
            cv2.line(im_draw, CMT.tr, CMT.br, (255, 0, 0), 2)
            cv2.line(im_draw, CMT.br, CMT.bl, (255, 0, 0), 2)
            cv2.line(im_draw, CMT.bl, CMT.tl, (255, 0, 0), 2)
            # cv2.imshow("im_draw1",im_draw)
            # cv2.waitKey()

        #util.draw_keypoints(CMT.tracked_keypoints, im_draw, (255, 255, 255))
        # this is from simplescale
        #util.draw_keypoints(CMT.votes[:, :2], im_draw)  # blue
        #util.draw_keypoints(CMT.outliers[:, :2], im_draw, (0, 0, 255))

        if output_dir is not None:

            # Original image
            # cv2.imwrite('{0}/input_{1:08d}.png'.format(output_dir, frame), im)
            # Output image
            cv2.imwrite('{0}/output_{1:08d}.png'.format(output_dir, frame),
                        im_draw)
            '''
			# Keypoints
			with open('{0}/keypoints_{1:08d}.csv'.format(output_dir, frame), 'w') as f:
				f.write('x y\n')
				np.savetxt(f, CMT.tracked_keypoints[:, :2], fmt='%.2f')

			# Outlier
			with open('{0}/outliers_{1:08d}.csv'.format(output_dir, frame), 'w') as f:
				f.write('x y\n')
				np.savetxt(f, CMT.outliers, fmt='%.2f')

			# Votes
			with open('{0}/votes_{1:08d}.csv'.format(output_dir, frame), 'w') as f:
				f.write('x y\n')
				np.savetxt(f, CMT.votes, fmt='%.2f')
			'''
            # Bounding box
            # with open('{0}/bbox_{1:08d}.csv'.format(output_dir, frame), 'w') as f:
            with open('{0}/bbox.txt'.format(output_dir, frame), 'a') as f:
                # f.write('x y\n')
                # Duplicate entry tl is not a mistake, as it is used as a drawing instruction
                # np.savetxt(f, np.array((CMT.tl, CMT.tr, CMT.br, CMT.bl, CMT.tl)), fmt='%.2f')
                L = []
                for i in [CMT.tl, CMT.tr, CMT.br, CMT.bl]:
                    #st = '(' + str(int(i[0])) + ' ' + str(int(i[1])) + ')'
                    st = '(' + str(float(i[0])) + ' ' + str(float(i[1])) + ')'
                    L.append(st)
                bb = ','.join(L)
                f.write(bb + '\n')

        if not quiet:
            cv2.imshow('main', im_draw)

            # Check key input
            k = cv2.waitKey(pause_time)
            key = chr(k & 255)
            if key == 'q':
                break
            if key == 'd':
                import ipdb
                ipdb.set_trace()

        # Remember image
        im_prev = im_gray

        # Advance frame number
        frame += 1
Ejemplo n.º 7
0
Archivo: run.py Proyecto: eljeilany/CMT
			raise Exception('Unable to parse bounding box')
		if len(values) != 4:
			raise Exception('Bounding box must have exactly 4 elements')
		bbox = np.array(values)

		# Convert to point representation, adding singleton dimension
		bbox = util.bb2pts(bbox[None, :])

		# Squeeze
		bbox = bbox[0, :]

		tl = bbox[:2]
		br = bbox[2:4]
	else:
		# Get rectangle input from user
		(tl, br) = util.get_rect(im_draw)

	print 'using', tl, br, 'as init bb'


	CMT.initialise(im_gray0, tl, br)

	frame = 1
	while True:
		# Read image
		status, im = cap.read()
		if not status:
			break
		im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
		im_draw = np.copy(im)
Ejemplo n.º 8
0
    def callback(self, data):
        global frame
        global started
        #global cv_image
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
            print(self.bridge.imgmsg_to_cv2(data, "bgr8"))
        except CvBridgeError as e:
            print(e)

        # Clean up

        preview = 'preview'
        # If no input path was specified, open camera device
        #cap = cv2.VideoCapture(0)
        if preview is None:
            preview = True

            # Check if videocapture is working
        #if not cap.isOpened():
        #    print("Unable to open video input.")
        #    sys.exit(1)

        #while preview:
        #status, im = cap.read()
        status = True

        im = cv_image

        if started == 0:
            cv2.imshow('Preview', im)
            #cv2.imshow('Preview', cv_image)
            k = cv2.waitKey(10)
            if not k == -1:
                #break
                im0 = cv_image
                im_gray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
                #im_gray0 = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
                im_draw = np.copy(cv_image)

                (tl, br) = util.get_rect(im_draw)
                print("using", tl, br, "as init bb")
                CMT.initialise(im_gray0, tl, br)
                frame = 1
                started = 1
                cv2.destroyAllWindows()

            # Read first frame
        #status, im0 = cap.read()

        if started == 1:
            # Read image
            #status, im = cap.read()
            #im=cv_image
            if not status:
                cv2.destroyAllWindows()
                sys.exit(1)
            im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
            #im_gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
            im_draw = np.copy(im)
            #im_draw = np.copy(cv_image)

            tic = time.time()
            CMT.process_frame(im_gray)
            toc = time.time()

            # Display results

            # Draw updated estimate
            if CMT.has_result:

                cv2.line(im_draw, CMT.tl, CMT.tr, (255, 0, 0), 4)
                cv2.line(im_draw, CMT.tr, CMT.br, (255, 0, 0), 4)
                cv2.line(im_draw, CMT.br, CMT.bl, (255, 0, 0), 4)
                cv2.line(im_draw, CMT.bl, CMT.tl, (255, 0, 0), 4)

            util.draw_keypoints(CMT.tracked_keypoints, im_draw,
                                (255, 255, 255))
            # this is from simplescale
            util.draw_keypoints(CMT.votes[:, :2], im_draw)  # blue
            util.draw_keypoints(CMT.outliers[:, :2], im_draw, (0, 0, 255))
            cv2.imshow('main', im_draw)
            #cv2.imshow('main', im_draw)

            # Check key input
            k = cv2.waitKey(pause_time)
            key = chr(k & 255)
            if key == 'q':
                #cap.release()
                cv2.destroyAllWindows()
                rospy.signal_shutdown("ROSPy Shutdown")
                #break
                sys.exit(1)
            if key == 'd':
                import ipdb
                ipdb.set_trace()

            # Remember image
            im_prev = im_gray

            # Advance frame number
            frame += 1

            print(
                "{5:04d}: center: {0:.2f},{1:.2f} scale: {2:.2f}, active: {3:03d}, {4:04.0f}ms"
                .format(CMT.center[0], CMT.center[1], CMT.scale_estimate,
                        CMT.active_keypoints.shape[0], 1000 * (toc - tic),
                        frame))

        #(rows,cols,channels) = cv_image.shape
        #if cols > 60 and rows > 60 :
        #  cv2.circle(cv_image, (50,50), 10, 255)

        cv2.imshow("Image window", cv_image)
        cv2.waitKey(3)

        try:
            self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
        except CvBridgeError as e:
            print(e)
Ejemplo n.º 9
0
class Helicopter(vehicle.Vehicle):
    vertices = util.get_rect(6, 4)
    height = 5.5

    accel_force = -100.0
    turn_force = 100
    idle_turn_factor = .3
    max_speed = 20
    min_speed = -max_speed * .5
    max_angular_velocity = 2

    max_y_vel = 10

    seat_offset = -.1

    side_friction = 3
    friction_speed = 15
    stop_speed = .1

    a_damping = .4

    prop_vel = 0

    def __init__(self, app):
        vehicle.Vehicle.__init__(self, app)

        with self.canvas:
            self.rot = Rotate(0, 0, 1, 0)
            self.pitch = Rotate(0, 0, 0, 1)
            self.roll = Rotate(0, 1, 0, 0)

            self.app.graphic_data.draw_mesh('models/helicopter',
                                            self.canvas,
                                            texture='metal.jpg')

            PushMatrix()
            self.prop_rot = Rotate(0, 0, 1, 0)

            Translate(0, 4, 0)
            self.app.graphic_data.draw_mesh('models/propellers',
                                            self.canvas,
                                            texture=None)
            PopMatrix()

    def velocity_func(self, body, gravity, damping, dt):
        if self.collision.standing:
            body.velocity *= .5
            body.angular_velocity *= 0
        else:
            if self.input_vector:
                if abs(body.angular_velocity) > self.max_angular_velocity:
                    body.angular_velocity = self.max_angular_velocity * cmp(
                        body.angular_velocity, 0)
                self.local_vel = pymunk.Vec2d(
                    body.velocity).rotated(-body.angle)
                self.local_vel.y *= (1 - self.side_friction * dt)
                self.local_vel.x = min(self.max_speed,
                                       max(self.min_speed, self.local_vel.x))
                body.velocity = self.local_vel.rotated(body.angle)
            else:
                friction = self.friction_speed * dt
                if self.local_vel.x < friction:
                    body.velocity *= .8
                    body.angular_velocity *= 0
                else:
                    body.velocity -= body.velocity.normalized() * friction
        body.angular_velocity *= 1 - (.9 * dt)

    def move(self, input_vector, dt):
        if not self.collision.standing:
            self.local_vel = pymunk.Vec2d(
                self.collision.body.velocity).rotated(
                    -self.collision.body.angle)

            self.input_vector = input_vector
            self.collision.body.apply_impulse_at_local_point(
                (self.input_vector[1] * self.accel_force * dt,
                 self.input_vector[0] * self.turn_force * dt),
                (self.wheel_offset, 0))

    def update(self, dt):
        if self.turned_on:
            accel = (self.throttle_up - self.throttle_down)

            target_vel = accel * 200 + 700
            self.prop_vel += cmp(target_vel, self.prop_vel) * 500 * dt

            self.prop_rot.angle += self.prop_vel * dt

        elif self.prop_vel > 0:
            self.prop_vel -= 200 * dt
            if self.prop_vel < 0:
                self.prop_vel = 0
            else:
                self.prop_rot.angle += self.prop_vel * dt

        self.rot.angle = self.collision.body.angle * -57.3

        if self.collision.standing:
            if self.collision.dy > 0:
                self.collision.standing = None
            self.pitch.angle = 0
            self.roll.angle = 0
        else:
            self.pitch.angle += (self.input_vector[1] * 20 -
                                 self.pitch.angle) * .05
            self.roll.angle += (self.input_vector[0] * 20 -
                                self.roll.angle) * .05

        self.collision.dy += (self.throttle_up - self.throttle_down -
                              .2) * 30 * dt
        if abs(self.collision.dy) > self.max_y_vel:
            self.collision.dy = cmp(self.collision.dy, 0) * self.max_y_vel

        vehicle.Vehicle.update(self, dt)
Ejemplo n.º 10
0
    status, banana = cap.read()
    banana=cv2.imread(args.banana, cv2.IMREAD_GRAYSCALE )
    tl=[0,0]
    br=[banana.shape[1],banana.shape[0]]

    status, im0 = cap.read()
    im_gray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)

    # Trying to overlay banana on top of the captured frame.
    x_offset=y_offset=0
    im_gray0[y_offset:y_offset+banana.shape[0], x_offset:x_offset+banana.shape[1]] = banana

    # Temp drawing of the images. Rectangle is ignored here.
    im_draw = np.copy(im_gray0)
    (tl2, br2) = util.get_rect(im_draw)

    print 'using', tl, br, 'as init bb'
    CMT.initialise(im_gray0, tl, br)
else:
    status, im0 = cap.read()
    im_gray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
    im_draw = np.copy(im0)
    (tl, br) = util.get_rect(im_draw)
    print 'using', tl, br, 'as init bb'
    CMT.initialise(im_gray0, tl, br)
    status, im0 = cap.read()
    im_gray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
    im_draw = np.copy(im0)