Ejemplo n.º 1
0
    def __init__(self, model):
        self.photo = 255 * np.zeros(shape=[480, 640, 3], dtype=np.uint8)
        self.font = cv2.FONT_HERSHEY_SIMPLEX
        self.bridge = CvBridge()
        self.subCamera = rospy.Subscriber('/camera/image_raw/compressed',
                                          CompressedImage, self.getImage)
        self.pubLocation = rospy.Publisher('/move_bot', Vector2D, queue_size=1)
        self.annot_image = None
        self.coordinates = Vector2D()

        # Model and Comparison Parameters
        self.model = model
        self.min_score = 0.2
        self.max_overlap = 0.1
        self.top_k = 200
        # Suppress everything but cat
        self.suppress = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
                         'bus', 'car', 'chair', 'cow', 'diningtable', 'dog',
                         'horse', 'motorbike', 'person', 'pottedplant',
                         'sheep', 'sofa', 'train', 'tvmonitor')

        # Transforms
        self.resize = transforms.Resize((300, 300))
        self.to_tensor = transforms.ToTensor()
        self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224, 0.225])
Ejemplo n.º 2
0
    def detect_blob(self, Blob, T, NIm, H, W, crop, timestamps, result):
        # Percentage of appearance
        apperance_percentage = (1.0 * Blob['N']) / (1.0 * NIm)

        # Frequency estimation based on FFT
        f = np.arange(0.0, 1.0 * NIm + 1.0, 2.0)
        signal_f = scipy.fftpack.fft(Blob['Signal'] - np.mean(Blob['Signal']))
        y_f = 2.0 / NIm * np.abs(signal_f[:NIm / 2 + 1])
        fft_peak_freq = 1.0 * np.argmax(y_f) / (NIm * T)
        #half_freq_dist = 0.8 #1.0*f[1]/2

        #rospy.loginfo('[%s] Appearance perc. = %s, frequency = %s' % (self.node_name, apperance_percentage, fft_peak_freq))
        freq_identified = 0
        # Take decision
        detected = False
        for i in range(len(self.freqIdentify)):
            if (apperance_percentage < 0.8 and apperance_percentage > 0.2
                    and not self.useFFT) or (
                        self.useFFT
                        and abs(fft_peak_freq - self.freqIdentify[i]) < 0.35):
                # Decision
                detected = True
                freq_identified = self.freqIdentify[i]
                # Raw detection
                coord_norm = Vector2D(1.0 * (crop[1][0] + Blob['p'][0]) / W,
                                      1.0 * (crop[0][0] + Blob['p'][1]) / H)
                result.detections.append(
                    LEDDetection(rospy.Time.from_sec(timestamps[0]),
                                 rospy.Time.from_sec(timestamps[-1]),
                                 coord_norm, fft_peak_freq, '', -1, timestamps,
                                 signal_f, f, y_f))

        return detected, result, freq_identified, fft_peak_freq
    def get_gnd_coord(self, x, y):
        normalized_uv = Vector2D()
        normalized_uv.x = float(x) / float(self.width)
        normalized_uv.y = float(y) / float(self.height)

        gp = self.call_service_get_ground_coordinate(normalized_uv, self.veh)

        return gp
Ejemplo n.º 4
0
def mouse_cb(event, x, y, flags, param):
  if event == cv2.EVENT_LBUTTONDOWN:
    w, h = param
    normalized_uv = Vector2D()
    normalized_uv.x = float(x)/float(w)
    normalized_uv.y = float(y)/float(h)

    print("image coordinate: (%d, %d)" % (x, y))
    print("normalized image coordinate: (%f, %f)" % (normalized_uv.x, normalized_uv.y))
Ejemplo n.º 5
0
def mouse_cb(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN:
        w, h = param
        normalized_uv = Vector2D()
        normalized_uv.x = float(x) / float(w)
        normalized_uv.y = float(y) / float(h)

        print(f"image coordinate: ({x}, {y})")
        print(
            f"normalized image coordinate: ({normalized_uv.x}, {normalized_uv.y})"
        )
Ejemplo n.º 6
0
def mouse_cb(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN:
        w, h = param
        normalized_uv = Vector2D()
        normalized_uv.x = float(x) / float(w)
        normalized_uv.y = float(y) / float(h)

        gp = call_service_get_ground_coordinate(normalized_uv, veh)

        print "image coordinate: (%d, %d)" % (x, y)
        print "normalized image coordinate: (%f, %f)" % (normalized_uv.x,
                                                         normalized_uv.y)
        print "ground coordinate: (%f, %f, %f)" % (gp.x, gp.y, gp.z)
Ejemplo n.º 7
0
def predict_segments(sm, gpg):
    """
        Predicts what segments the robot would see.

        Assumes map is in FRAME_AXLE.
    """
    x_frustum = +0.1
    fov = np.deg2rad(150)
    res = []
    for segment in sm.segments:
        p1 = segment.points[0]
        p2 = segment.points[1]

        # If we are both in FRAME_AXLE
        if ((sm.points[p1].id_frame != FRAME_AXLE)
                or (sm.points[p2].id_frame != FRAME_AXLE)):
            msg = "Cannot deal with points not in frame FRAME_AXLE"
            raise NotImplementedError(msg)

        w1 = sm.points[p1].coords
        w2 = sm.points[p2].coords

        coords_inside = clip_to_view([w1, w2], x_frustum, fov)

        if not coords_inside:
            continue
        w1 = coords_inside[0]
        w2 = coords_inside[1]
        point1 = Point(w1[0], w1[1], w1[2])
        point2 = Point(w2[0], w2[1], w2[2])

        pixel1 = gpg.ground2pixel(point1)
        pixel2 = gpg.ground2pixel(point2)

        normalized1 = gpg.pixel2vector(pixel1)
        normalized2 = gpg.pixel2vector(pixel2)

        pixels_normalized = [normalized1, normalized2]
        normal = Vector2D(0, 0)
        points = [point1, point2]

        #         color = segment_color_constant_from_string(segment.color)
        assert segment.color in [Segment.RED, Segment.YELLOW, Segment.WHITE]
        s = Segment(pixels_normalized=pixels_normalized,
                    normal=normal,
                    points=points,
                    color=segment.color)
        res.append(s)

    return SegmentList(segments=res)
Ejemplo n.º 8
0
    def process_msg(self, skeletons_msg):
        skeletons = []
        for skeleton in skeletons_msg.skeletons:
            points = self.geometry.vectors_to_ground(skeleton.cloud)

            skeleton_out_msg = SkeletonMsg()
            skeleton_out_msg.color = skeleton.color
            skeleton_out_msg.cloud = [
                Vector2D(x=point.x, y=point.y) for point in points
            ]
            skeletons.append(skeleton_out_msg)

        skeletons_out_msg = SkeletonsMsg()
        skeletons_out_msg.skeletons = skeletons
        self.pub_skeleton.publish(skeletons_out_msg)
Ejemplo n.º 9
0
def skeleton_to_msg(skeleton, color, original_image_size, top_cutoff=0):
    y_indices, x_indices = skeleton
    height, width = original_image_size

    x_float = x_indices.astype(np.float32)
    y_float = y_indices.astype(np.float32)

    # Normalize the indices in [0, 1]
    y_float = (y_float + top_cutoff) / height
    x_float = x_float / width

    # Create message
    message = SkeletonMsg()
    message.cloud = [Vector2D(x=x, y=y) for (y, x) in zip(y_float, x_float)]
    message.color = color

    return message
Ejemplo n.º 10
0
    def detect_led(self, images, mask, frequencies_to_detect,
                   min_distance_between_LEDs_pixels):

        assert len(images.shape) == 1
        n = images.shape[0]
        if n == 0:
            raise ValueError('No images provided')

        timestamps = images['timestamp']
        rgb = images['rgb']

        rgb0 = rgb[0]
        if not mask.shape == rgb0.shape:
            raise ValueError('Invalid mask')

        if not isinstance(frequencies_to_detect, list):
            raise ValueError(frequencies_to_detect)

        if not min_distance_between_LEDs_pixels > 0:
            raise ValueError(min_distance_between_LEDs_pixels)

        channel = images['rgb'][:, :, :, 0]  # just using first channel

        cell_width = 15
        cell_height = 15

        (cell_vals, crop_offset) = self.downsample(channel, cell_width,
                                                   cell_height)

        candidates_mask = self.get_candidate_cells(cell_vals, 100)
        candidate_cells = [(i, j)
                           for (i, j) in np.ndindex(candidates_mask.shape)
                           if candidates_mask[i, j]]

        # Create result object
        result = LEDDetectionArray()

        # Detect frequencies and discard non-periodic signals
        # ts_tolerance = 0.2 # unnecessary
        f_tolerance = 0.25
        min_num_periods = 3

        for (i, j) in candidate_cells:
            signal = cell_vals[:, i, j]
            signal = signal - np.mean(signal)

            zero_crossings = np.where(np.diff(np.sign(signal)))[0]
            zero_crossings_t = timestamps[zero_crossings]
            led_img_coords = Vector2D((0.5 + j) * cell_width + crop_offset[1],
                                      (0.5 + i) * cell_height + crop_offset[0])
            diffs = [
                b - a for a, b in zip(zero_crossings_t, zero_crossings_t[1:])
            ]

            if (self.verbose):
                logger.info('Coords: %s, %s' %
                            (led_img_coords.x, led_img_coords.y))
                logger.info('Zero crossings: %s' % zero_crossings_t)
                logger.info('Diffs: %s' % diffs)
                logger.info('Zero-crossing measured freq %s' %
                            (0.5 / np.mean(diffs)))

            if (len(zero_crossings) < min_num_periods):
                if (self.verbose):
                    logger.info('Not an LED, discarded\n')
                continue

            # Frequency estimation based on zero crossings - quite bad
            #for f in frequencies_to_detect:
            #    if(all(d-ts_tolerance <= 0.5/f <= d+ts_tolerance for d in diffs)):
            #        if(self.verbose):
            #            logger.info('Confirmed LED with frequency %s\n'%f)
            # recover coordinates of centroid
            #        result.detections.append(LEDDetection(timestamps[0], timestamps[-1],
            #        led_img_coords, f, '', -1)) # -1...confidence not implemented
            #        break

            # Frequency estimation based on FFT
            T = 1.0 / 30  # TODO expecting 30 fps, but RESAMPLE to be sure
            f = np.linspace(0.0, 1.0 / (2.0 * T), n / 2)
            signal_f = scipy.fftpack.fft(signal)
            y_f = 2.0 / n * np.abs(signal_f[:n / 2])
            fft_peak_freq = 1.0 * np.argmax(y_f) / T / n
            if (self.verbose):
                logger.info('FFT peak frequency: %s' % fft_peak_freq)

            # Bin frequency into the ones to detect
            freq = [
                x for x in frequencies_to_detect
                if abs(x - fft_peak_freq) < f_tolerance
            ]
            if (freq):
                result.detections.append(
                    LEDDetection(rospy.Time.from_sec(timestamps[0]),
                                 rospy.Time.from_sec(timestamps[-1]),
                                 led_img_coords, freq[0], '',
                                 -1))  # -1...confidence not implemented
                if (self.verbose):
                    logger.info('LED confirmed, frequency: %s' % freq)
            else:
                logger.info('Could not associate frequency, discarding')

            print(signal.shape)
            print(timestamps[:15].shape)
            # Plot all signals and FFTs
            if (self.ploteverything):
                fig, ax1 = plt.subplots()
                ax1.plot(timestamps[:15], signal)
                fig, ax2 = plt.subplots()
                ax2.plot(f[:15], y_f)
                plt.show()

        plt.imshow(rgb0)
        ax = plt.gca()

        font = {
            'family': 'serif',
            'color': 'red',
            'weight': 'bold',
            'size': 16,
        }

        # Plot all results
        if (self.plotfinal):
            for r in result.detections:
                pos = r.pixels_normalized
                ax.add_patch(
                    Rectangle(
                        (pos.x - 0.5 * cell_width, pos.y - 0.5 * cell_height),
                        cell_width,
                        cell_height,
                        edgecolor="red",
                        linewidth=3,
                        facecolor="none"))
                plt.text(pos.x - 0.5 * cell_width,
                         pos.y - cell_height,
                         str(r.frequency),
                         fontdict=font)

            plt.show()

        return result
Ejemplo n.º 11
0
 def pixel2vector(self, pixel):
     vec = Vector2D()
     vec.x = pixel.u / self.ci_.width
     vec.y = pixel.v / self.ci_.height
     return vec
 def pixel2vector(self, pixel):
     """ Converts a [0,W]*[0,H] representation to [0, 1]x[0, 1]. """
     x = pixel.u / self.ci.width
     y = pixel.v / self.ci.height
     return Vector2D(x, y)
Ejemplo n.º 13
0
    def detect_led(self,
                   images,
                   mask,
                   frequencies_to_detect,
                   min_distance_between_LEDs_pixels):

        assert len(images.shape) == 1
        n = images.shape[0]
        if n == 0:
            raise ValueError('No images provided')

        timestamps = images['timestamp']
        # print('timestamps: {0}'.format(timestamps))
        rgb = images['rgb']

        rgb0 = rgb[0]
        if not mask.shape == rgb0.shape:
            raise ValueError('Invalid mask')

        if not isinstance(frequencies_to_detect, list):
            raise ValueError(frequencies_to_detect)

        if not min_distance_between_LEDs_pixels > 0:
            raise ValueError(min_distance_between_LEDs_pixels)

        #channel = images['rgb'][:,:,:,0] # just using first channel

        # Go for the following lines if you want to use a grayscale image
        # as an input instead of preferring one specific channel
        channel = np.zeros(images['rgb'].shape[0:-1])
        for i in range(n):
            channel[i,:,:] = cv2.cvtColor(images['rgb'][i,:,:,:], cv2.COLOR_BGR2GRAY)

        print('channel.shape {0}'.format(channel.shape))
        W = channel.shape[2]
        H = channel.shape[1]

        cell_width = 15
        cell_height = 15
        var_threshold = 100

        (cell_vals, crop_offset) = self.downsample(channel, cell_width, cell_height)
        candidates_mask = self.get_candidate_cells(cell_vals, var_threshold)

        candidate_cells = [(i,j) for (i,j) in np.ndindex(candidates_mask.shape) if candidates_mask[i,j]]

        if(self.publisher is not None):
            for idx in candidate_cells:
                self.debug_msg.candidates.append(Vector2D(idx[0], idx[1]))
            #self.republish()

        # Create result object
        result = LEDDetectionArray()
        unfiltered = LEDDetectionArray()

        # Detect frequencies and discard non-periodic signals
        # ts_tolerance = 0.2 # unnecessary
        f_tolerance = 0.3

        for (i,j) in candidate_cells:
            signal = cell_vals[:,i,j]
            signal = signal-np.mean(signal)

            led_img_coords = Vector2D((0.5+j)*cell_width+crop_offset[1], (0.5+i)*cell_height+crop_offset[0])
            led_img_coords_norm = Vector2D(1.0*led_img_coords.x/W, 1.0*led_img_coords.y/H)

            if(self.verbose):
                logger.info('Coords: %s, %s'% (led_img_coords.x,led_img_coords.y))

            # Frequency estimation based on FFT
            T = 1.0/30 # TODO expecting 30 fps, but RESAMPLE to be sure
            f = np.linspace(0.0, 1.0/(2.0*T), n/2)
            signal_f = scipy.fftpack.fft(signal)
            y_f =  2.0/n * np.abs(signal_f[:n/2])
            fft_peak_freq = 1.0*np.argmax(y_f)/T/n

            unfiltered.detections.append(LEDDetection(rospy.Time.from_sec(timestamps[0]),
                rospy.Time.from_sec(timestamps[-1]), led_img_coords_norm, fft_peak_freq, '', -1, timestamps, signal, f, y_f)) # -1...confidence not implemented

            if(self.verbose):
                logger.info('FFT peak frequency: %s'% fft_peak_freq)

            # Bin frequency into the ones to detect
            freq = [x for x in frequencies_to_detect if abs(x-fft_peak_freq)<f_tolerance]
            if(freq):
                result.detections.append(LEDDetection(rospy.Time.from_sec(timestamps[0]),
                rospy.Time.from_sec(timestamps[-1]), led_img_coords_norm, freq[0], '', -1, [], [], [], [])) # -1...confidence not implemented
                if(self.verbose):
                    logger.info('LED confirmed, frequency: %s'% freq)
            else:
                logger.info('Could not associate frequency, discarding')

            # Plot all signals and FFTs
            if(self.ploteverything):
                fig, ax1 = plt.subplots()
                ax1.plot(timestamps, signal)
                fig, ax2 = plt.subplots()
                ax2.plot(f,y_f)
                plt.show()

        plt.imshow(rgb0)
        ax = plt.gca()

        font = {'family': 'serif',
                'color':  'red',
                'weight': 'bold',
                'size': 16,
                }

        # Plot all results
        if(self.plotfinal):
            for r in result.detections:
                pos_n = r.pixels_normalized
                pos = Vector2D(1.0*pos_n.x*W, 1.0*pos_n.y*H)
                ax.add_patch(Rectangle((pos.x-0.5*cell_width, pos.y-0.5*cell_height), cell_width, cell_height, edgecolor="red", linewidth=3, facecolor="none"))
                plt.text(pos.x-0.5*cell_width, pos.y-cell_height, str(r.frequency), fontdict=font)

            plt.show()
        if(self.publisher is not None):
            self.debug_msg.led_all_unfiltered = unfiltered
            self.debug_msg.state = 0
            self.republish()

        return result
Ejemplo n.º 14
0
    def cbDetectionsList(self, detections_msg):
        #For ground projection uncomment the next lines
        marker_array = MarkerArray()

        p = Vector2D()
        p2 = Vector2D()
        #p = Pixel()
        #p2 = Pixel()

        count = 0

        projection_list = ObstacleProjectedDetectionList()
        projection_list.list = []
        projection_list.header = detections_msg.header

        minDist = 999999999999999999999999.0
        dists = []
        width = detections_msg.imwidth
        height = detections_msg.imheight
        too_close = False
        for obstacle in detections_msg.list:
            marker = Marker()
            rect = obstacle.bounding_box
            p.x = float(rect.x) / float(width)
            p.y = float(rect.y) / float(height)
            #p.u = float(rect.x)/float(width)
            #p.v = float(rect.y)/float(width)

            p2.x = float(rect.x + rect.w) / float(width)
            p2.y = p.y
            #p2.u = float(rect.x + rect.w)/float(width)
            #p2.v = p.v

            projected_point = self.ground_proj(p)
            projected_point2 = self.ground_proj(p2)

            obj_width = (projected_point2.gp.y - projected_point.gp.y)**2 + (
                projected_point2.gp.x - projected_point.gp.x)**2
            obj_width = obj_width**0.5
            rospy.loginfo("[%s]Width of object: %f" % (self.name, obj_width))
            projection = ObstacleProjectedDetection()
            projection.location = projected_point.gp
            projection.type = obstacle.type

            dist = projected_point.gp.x**2 + projected_point.gp.y**2 + projected_point.gp.z**2
            dist = dist**0.5

            if dist < minDist:
                minDist = dist
            if dist < self.closeness_threshold:
                # Trying not to falsely detect the lane lines as duckies that are too close

                if obstacle.type.type == ObstacleType.DUCKIE and projected_point.gp.y < 0.18:
                    # rospy.loginfo("Duckie too close y: %f dist: %f" %(projected_point.gp.y, minDist))
                    too_close = True
                elif obstacle.type.type == ObstacleType.CONE and obj_width < 0.3:  # and -0.0785< projected_point.gp.y < 0.18:
                    # rospy.loginfo("Cone too close y: %f dist: %f" %(projected_point.gp.y, minDist))
                    too_close = True
            projection.distance = dist
            projection_list.list.append(projection)

            #print projected_point.gp
            marker.header = detections_msg.header
            marker.header.frame_id = self.veh_name
            marker.type = marker.ARROW
            marker.action = marker.ADD
            marker.scale.x = 0.1
            marker.scale.y = 0.01
            marker.scale.z = 0.01
            marker.color.a = 1.0
            marker.color.r = 1.0
            marker.color.g = 0.5
            marker.color.b = 0.0
            marker.pose.orientation.x = 0.0
            marker.pose.orientation.y = -0.7071
            marker.pose.orientation.z = 0
            marker.pose.orientation.w = 0.7071
            marker.pose.position.x = projected_point.gp.x
            marker.pose.position.y = projected_point.gp.y
            marker.pose.position.z = projected_point.gp.z
            marker.id = count
            count = count + 1

            marker_array.markers.append(marker)

        if count > self.maxMarkers:
            self.maxMarkers = count

        while count <= self.maxMarkers:
            marker = Marker()
            marker.action = 2
            marker.id = count
            marker_array.markers.append(marker)
            count = count + 1

        b = BoolStamped()
        b.header = detections_msg.header
        b.data = too_close

        self.pub_too_close.publish(b)
        self.pub_projections.publish(projection_list)
        self.pub_markers.publish(marker_array)
Ejemplo n.º 15
0
    def cbDetectionsList(self, detections_msg):
        #For ground projection uncomment the next lines
        marker_array = MarkerArray()
        img = self.bridge.imgmsg_to_cv2(detections_msg.im, "bgr8")
        p = Vector2D()
        p2 = Vector2D()
        count = 0;

        projection_list = ObstacleProjectedDetectionList()
        projection_list.list = []
        projection_list.header = detections_msg.header

        minDist = 999999999999999999999999.0
        dists = []
        width = detections_msg.imwidth
        height = detections_msg.imheight
        too_close = False
        for obstacle in detections_msg.list: 
            marker = Marker()
            rect = obstacle.bounding_box
        if rect.x!= 0 and rect.y!=0 :
	       # the x,y need to corrected
	       if rect.x > rect.h:
		  rospy.loginfo("this is x,y,w,h")		
	       #rospy.loginfo(str(rect.x))
	       #rospy.loginfo(str(rect.y))
	       #rospy.loginfo(str(rect.w))
	       #rospy.loginfo(str(rect.h))
	       #rect.x = rect.x - rect.w
	       rect.y = rect.y + rect.h 
		  #rect.y = rect.y - rect.w
	       #rospy.loginfo(str(rect.x))
	       #rospy.loginfo(str(rect.y))
                p.x = float(rect.x)/float(width)
                p.y = float(rect.y)/float(height)
                #rospy.loginfo("p.x,p.y")
	       #rospy.loginfo(str(p.x))
	       #rospy.loginfo(str(p.y))
                p2.x = float(rect.x + rect.w)/float(width)
                p2.y = p.y

                projected_point = self.ground_proj(p)
                projected_point2 = self.ground_proj(p2)
	        rospy.loginfo("p.x p.y is")
                rospy.loginfo(projected_point.gp.x)
                rospy.loginfo(projected_point.gp.y)
	    	rospy.loginfo("p.x2 p.y2 is")
            	rospy.loginfo(projected_point2.gp.x)
            	rospy.loginfo(projected_point2.gp.y)
	    	#rospy.loginfo("p2.x p2.y is",projected_point2.gp.x,projected_point2.gp.y)
            	obj_width = (projected_point2.gp.y - projected_point.gp.y)**2 + (projected_point2.gp.x - projected_point.gp.x)**2
            	obj_width = obj_width ** 0.5
            	rospy.loginfo("[%s]Width of object: %f" % (self.name,obj_width))
            	projection = ObstacleProjectedDetection()
            	projection.location = projected_point.gp
            	#   projection.type = obstacle.type
	    #if abs(projected_point.gp.y) <= abs(projected_point2.gp.y):
		#projection.location.y = projected_point.gp.y
	    #else :
		#projection.location.y = projected_point2.gp.y
            	projection.location.y = (projected_point.gp.y + projected_point2.gp.y) / float(2)
            	rospy.loginfo("the location.y is")
	    	rospy.loginfo(str(projection.location.y))
	    	dist = projected_point.gp.x**2 + projected_point.gp.y**2 + projected_point.gp.z**2
            	rospy.loginfo("the projected point of z is")
	    	rospy.loginfo(projected_point.gp.z)
	    	dist = dist ** 0.5
            	rospy.loginfo("distance is ")
            	rospy.loginfo(dist)
            	# draw
            	dst = cv2.line(img,(0,480),(200,250),[0,0,255],3)
            	dst = cv2.line(dst,(100,250),(200,250),[0,0,255],3)
            	font = cv2.FONT_HERSHEY_SIMPLEX
            	dst = cv2.putText(dst,str(dist),(50,400),font,2,[0,255,0],2)
            
            	#if dist<minDist:
            	#    minDist = dist
            	if dist<self.closeness_threshold:
                # Trying not to falsely detect the lane lines as duckies that are too close
                	too_close = True
                #if projected_point.gp.y < 0.18:
                    # rospy.loginfo("Duckie too close y: %f dist: %f" %(projected_point.gp.y, minDist))
                    #too_close = True

            	projection.distance = dist
            	projection_list.list.append(projection)
            
           	 #print projected_point.gp
            	marker.header = detections_msg.header
            	marker.header.frame_id = self.veh_name
            	marker.type = marker.ARROW
            	marker.action = marker.ADD
            	marker.scale.x = 0.1
            	marker.scale.y = 0.01
            	marker.scale.z = 0.01
            	marker.color.a = 1.0
            	marker.color.r = 1.0
            	marker.color.g = 0.5
            	marker.color.b = 0.0
            	marker.pose.orientation.x = 0.0
            	marker.pose.orientation.y = -0.7071
            	marker.pose.orientation.z = 0
            	marker.pose.orientation.w = 0.7071
            	marker.pose.position.x = projected_point.gp.x
            	marker.pose.position.y = projected_point.gp.y
            	marker.pose.position.z = projected_point.gp.z 
            	marker.id = count
            	count = count +1

            	marker_array.markers.append(marker)
                         
        	if count > self.maxMarkers:
            		self.maxMarkers = count

       		 while count <= self.maxMarkers:
            		marker = Marker()
            		marker.action = 2
            		marker.id = count
            		marker_array.markers.append(marker)
            		count = count+1

        	b = BoolStamped()
        	b.header = detections_msg.header
        	b.data = too_close
        	self.pub_drawimg.publish(self.bridge.cv2_to_imgmsg(dst,"bgr8"))
        	self.pub_too_close.publish(b)
        	self.pub_projections.publish(projection_list)
        	self.pub_markers.publish(marker_array)
    def update_car_command(self, timer_event):
        yellow_points = self.yellow_points_tracker.tracked_points
        white_points = self.white_points_tracker.tracked_points

        self.publish_filtered_yellow_points(yellow_points)
        self.publish_filtered_white_points(white_points)
        if len(yellow_points) == 0 and len(white_points) == 0:
            # self.logwarn("NO POINTS")
            if self.v == 0 and self.omega == 0:
                # self.logwarn("Robot is immobile and can't see any lines.")
                self.send_car_command(0.05, 0)
            else:
                self.v *= 0.5
                self.v = np.clip(self.v, 0.05, self.v_max)
                self.logwarn(
                    "Can't see any lines. Reducing speed and moving at same angle. (v={}, omega={})"
                    .format(self.v, self.omega))
                self.send_car_command(self.v, self.omega)
            return

        min_speed = 0.1
        max_speed = self.v_max
        # NOTE: unused, was previously doing mean of centroids.
        # centroid_yellow = self.find_centroid(Color.YELLOW)
        # centroid_white = self.find_centroid(Color.WHITE)
        # target = (centroid_white + centroid_yellow) / 2

        # NOTE: unused, was previously doing mean of best points.
        # best_white_point = self.find_point_closest_to_lookahead_distance(Color.WHITE)
        # best_yellow_point = self.find_point_closest_to_lookahead_distance(Color.YELLOW)
        # target = (best_white_point + best_yellow_point) / 2

        if len(yellow_points) > len(white_points):
            curvyness = np.std(yellow_points[:, 1])
            length = np.amax(yellow_points[:, 0])
            if curvyness < 0.01 and length >= self.lookahead_dist:
                max_speed *= 5.0
            # centroid_yellow = self.find_centroid(yellow_points)
            # target = centroid_yellow
            target = self.find_point_closest_to_lookahead_distance(
                yellow_points)
            target[1] -= self.offset  # shifted to the right.
        else:
            # going slower since we mostly see white points.
            max_speed *= 0.5
            curvyness = np.std(white_points[:, 1])
            length = np.amax(white_points[:, 0])
            if curvyness < 0.01 and length >= self.lookahead_dist:
                max_speed *= 2.0
            if curvyness > 0.1:
                max_speed *= 0.5

                centroid_white = self.find_centroid(white_points)
                target = centroid_white
            else:
                target = self.find_point_closest_to_lookahead_distance(
                    white_points)
            target[1] += self.offset  # shift to the left.
        self.loginfo("std: {} \t length: {} \t max_speed: {}".format(
            curvyness, length, max_speed))

        # elif not self.has_points(Color.YELLOW) point_to_npand self.has_points(Color.WHITE):
        #     self.logwarn("WHITE")
        #     # best_white_point = self.find_point_closest_to_lookahead_distance(Color.WHITE)
        #     white_centroid = self.find_centroid(Color.WHITE)
        #     # assume the white line is always on the right.
        #     target = white_centroid
        #     target[1] += self.offset * 3 # shifted to the left.

        # self.logdebug("Target: {}".format(target))
        self.target = target

        target_msg = Vector2D()
        target_msg.x = target[0]
        target_msg.y = target[1]
        self.pub_follow_point.publish(target_msg)

        hypothenuse = np.sqrt(target.dot(target))
        sin_alpha = target[1] / hypothenuse
        cos_alpha = target[0] / hypothenuse

        # TODO: maybe play around with changing V depending on sin_alpha.
        v = max_speed * (cos_alpha**2)
        self.loginfo("cos^2(alpha) = {}".format((cos_alpha**2)))

        self.v = np.clip(v, min_speed, max_speed)

        omega = 2 * sin_alpha / self.lookahead_dist
        self.send_car_command(self.v, omega)
Ejemplo n.º 17
0
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(LaneControllerNode, self).__init__(node_name=node_name,
                                                 node_type=NodeType.CONTROL)

        # Add the node parameters to the parameters dictionary
        self.params = dict()
        # Construct publishers
        self.pub_car_cmd = rospy.Publisher("~car_cmd",
                                           Twist2DStamped,
                                           queue_size=1,
                                           dt_topic_type=TopicType.CONTROL)

        # Construct subscribers
        self.sub_lane_reading = rospy.Subscriber("~lane_pose",
                                                 LanePose,
                                                 self.cbLanePoses,
                                                 queue_size=1)

        self.sub_segments = rospy.Subscriber(
            "/agent/ground_projection_node/lineseglist_out",
            SegmentList,
            self.cbSegList,
            queue_size=1)

        #self.log("Initialized!")

        # Velocity and Omega
        self.v = 0
        self.omega = 0
        # Velocity and Omega after update
        self.av = 0
        self.ao = 0

        # The Lookahead distance for Pure Pursuit
        self.lookahead_dist = 1.0

        #Length of the dictionary buffer containing of segment points based on their color in a deque.
        self.blength = 150
        self.seg_points = collections.defaultdict(
            lambda: collections.deque(maxlen=self.blength))

        # Variable to control the velocity of the bot
        self.max_velocity = 0.5

        #Setting a timer as suggested by the TA for the car command
        self.d_t = 0.5
        self.cc_timer = rospy.Timer(rospy.Duration.from_sec(self.d_t),
                                    self.car_command)

        # An offset parameter which is quite useful specially at curves
        self.offset = 0.15

        # Thread lock to access points deque
        self.lock_for_points = Lock()

        # Target(Follow) point
        self.target = np.asarray([0, 0])

        # Target(Follow) point message
        self.target_msg = Vector2D()

        self.controller = PurePursuitLaneController(self.params)

        self.car_control_msg = Twist2DStamped()
Ejemplo n.º 18
0
    def detect_led(self,
                   images,
                   frequencies_to_detect,
                   cell_size,
                   crop_rect_norm=[0,0,1.0,1.0]):

        assert len(images.shape) == 1
        n = images.shape[0]
        if n == 0:
            raise ValueError('No images provided')

        timestamps = images['timestamp']
        rgb = images['rgb']
        H, W, _ = rgb[0].shape

        if not isinstance(frequencies_to_detect, list):
            raise ValueError(frequencies_to_detect)

        if(self.publisher is not None):
            self.debug_msg.cell_size = cell_size
            self.debug_msg.crop_rect_norm = crop_rect_norm
        self.republish()

        # Crop + Greyscale
        tlx = floor(1.0*W*crop_rect_norm[0])
        tly = floor(1.0*H*crop_rect_norm[1])
        brx = ceil(1.0*W*crop_rect_norm[2])
        bry = ceil(1.0*H*crop_rect_norm[3])
        croppedshape = [images['rgb'].shape[0], bry-tly, brx-tlx] 
        channel = np.zeros(croppedshape)
        for i in range(n):
            channel[i,:,:] = cv2.cvtColor(images['rgb'][i,tly:bry,tlx:brx,:], cv2.COLOR_BGR2GRAY)

        print('expected shape {0}'.format(croppedshape))
        print('channel.shape {0}'.format(channel.shape))

        cell_width = cell_size[0]
        cell_height = cell_size[1]
        var_threshold = 100

        (cell_vals, crop_offset) = self.downsample(channel, cell_width, cell_height)
        candidates_mask = self.get_candidate_cells(cell_vals, var_threshold)

        candidate_cells = [(i,j) for (i,j) in np.ndindex(candidates_mask.shape) if candidates_mask[i,j]]

        if(self.publisher is not None):
            for idx in candidate_cells:
                self.debug_msg.candidates.append(Vector2D(idx[0]+crop_offset[1]+tly, idx[1]+crop_offset[0]+tlx))

        # Create result object
        result = LEDDetectionArray()
        unfiltered = LEDDetectionArray()

        # Detect frequencies and discard non-periodic signals
        f_tolerance = 0.3

        for (i,j) in candidate_cells:
            signal = cell_vals[:,i,j]
            signal = signal-np.mean(signal)

            led_img_coords = Vector2D((0.5+j)*cell_width+crop_offset[1]+tlx, (0.5+i)*cell_height+crop_offset[0]+tly)
            led_img_coords_norm = Vector2D(1.0*led_img_coords.x/W, 1.0*led_img_coords.y/H)

            if(self.verbose):
                logger.info('Coords: %s, %s'% (led_img_coords.x,led_img_coords.y))

            # Frequency estimation based on FFT
            T = 1.0/30 # TODO expecting 30 fps, but RESAMPLE to be sure
            f = np.linspace(0.0, 1.0/(2.0*T), n/2)
            signal_f = scipy.fftpack.fft(signal)
            y_f =  2.0/n * np.abs(signal_f[:n/2])
            fft_peak_freq = 1.0*np.argmax(y_f)/T/n

            unfiltered.detections.append(LEDDetection(rospy.Time.from_sec(timestamps[0]),
                rospy.Time.from_sec(timestamps[-1]), led_img_coords_norm, fft_peak_freq, '', -1, timestamps, signal, f, y_f)) # -1...confidence not implemented

            if(self.verbose):
                logger.info('FFT peak frequency: %s'% fft_peak_freq)

            # Bin frequency into the ones to detect
            freq = [x for x in frequencies_to_detect if abs(x-fft_peak_freq)<f_tolerance]
            if(freq):
                result.detections.append(LEDDetection(rospy.Time.from_sec(timestamps[0]),
                rospy.Time.from_sec(timestamps[-1]), led_img_coords_norm, freq[0], '', -1, [], [], [], [])) # -1...confidence not implemented
                if(self.verbose):
                    logger.info('LED confirmed, frequency: %s'% freq)
            else:
                logger.info('Could not associate frequency, discarding')

            # Plot all signals and FFTs
            if(self.ploteverything):
                fig, ax1 = plt.subplots()
                ax1.plot(timestamps, signal)
                fig, ax2 = plt.subplots()
                ax2.plot(f,y_f)
                plt.show()
                
        if(self.ploteverything):
            plt.imshow(rgb[0])
            ax = plt.gca()


        font = {'family': 'serif',
                'color':  'red',
                'weight': 'bold',
                'size': 16,
                }

        # Plot all results
        if(self.plotfinal):
            for r in result.detections:
                pos_n = r.pixels_normalized
                pos = Vector2D(1.0*pos_n.x*W, 1.0*pos_n.y*H)
                ax.add_patch(Rectangle((pos.x-0.5*cell_width, pos.y-0.5*cell_height), cell_width, cell_height, edgecolor="red", linewidth=3, facecolor="none"))
                plt.text(pos.x-0.5*cell_width, pos.y-cell_height, str(r.frequency), fontdict=font)

            plt.show()

        if(self.publisher is not None):
            self.debug_msg.led_all_unfiltered = unfiltered
            self.debug_msg.state = 0
            self.republish()

        return result
Ejemplo n.º 19
0
 def pixel_to_vector(self, pixel):
     camera_width = self.camera_info.width
     camera_height = self.camera_info.height
     return Vector2D(x=pixel.u / camera_width, y=pixel.v / camera_height)
Ejemplo n.º 20
0

def call_service_get_ground_coordinate(req, veh):
    rospy.wait_for_service(veh + "/ground_projection/get_ground_coordinate")

    try:
        get_ground_coord = rospy.ServiceProxy(
            veh + '/ground_projection/get_ground_coordinate', GetGroundCoord)
        resp = get_ground_coord(req)
        return resp.gp
    except rospy.ServiceException, e:
        print "Service call failed: %s" % e


if __name__ == "__main__":
    if len(sys.argv) != 2:
        print "usage: " + sys.argv[0] + " vehicle name"
        sys.exit()

    veh = "/" + sys.argv[1]

    rospy.init_node("ex_get_ground_coordinate")

    normalized_uv = Vector2D()
    normalized_uv.x = 0.5
    normalized_uv.y = 0.7

    gp = call_service_get_ground_coordinate(normalized_uv, veh)

    print "ground coordinate: (%f, %f, %f)" % (gp.x, gp.y, gp.z)