예제 #1
0
def draw_judgement(im, judgements, delta=1.0):
    points = judgements['intrinsic_points']
    comparisons = judgements['intrinsic_comparisons']
    id_to_points = {p['id']: p for p in points}
    rows, cols = im.shape[0:2]

    for c in comparisons:
        darker = c['darker']
        if darker not in ('1', '2', 'E'):
            continue

        weight = c['darker_score']
        if weight <= 0 or weight is None:
            continue

        point1 = id_to_points[c['point1']]
        point2 = id_to_points[c['point2']]
        if not point1['opaque'] or not point2['opaque']:
            continue

        x1 = int(point1['x'] * cols)
        y1 = int(point1['y'] * rows)
        x2 = int(point2['x'] * cols)
        y2 = int(point2['y'] * rows)
        if darker == '1':
            cv2.arrowedLine(im, (x2, y2), (x1, y1), (0, 0, 255), 2)
        elif darker == '2':
            cv2.arrowedLine(im, (x1, y1), (x2, y2), (0, 0, 255), 2)
        else:
            cv2.line(im, (x1, y1), (x2, y2), (0, 255, 0), 1)

    return im
예제 #2
0
def drawcoorperspective(vis,points,col_out=black,col_in=red,radius=2):
    """
    Function to draw interaction with points to obtain perspective.

    :param vis: image array.
    :param points: list of points.
    :param col_out: outer color of point.
    :param col_in: inner color of point.
    :param radius: radius of drawn points.
    :return:
    """
    points = np.array(points,INT)
    thickness = radius-1
    if len(points)>1 and len(points)<5:
        for i in range(len(points)-1):
            if i%2:
                for j in range(i+1,min(len(points),i+3)):
                    if j%2:
                        #print "i=",i," j=",j
                        pt1 = (points[i][0], points[i][1])
                        pt2 = (points[j][0], points[j][1])
                        cv2.arrowedLine(vis, pt1, pt2, col_in, thickness)
            else:
                for j in range(i+1,min(len(points),i+3)):
                    #print "i=",i," j=",j
                    pt1 = (points[i][0], points[i][1])
                    pt2 = (points[j][0], points[j][1])
                    cv2.arrowedLine(vis, pt1, pt2, col_in, thickness)
        vis = drawcoorpoints(vis,points,col_out,col_in,radius)
    else:
        vis = drawcoorpoints(vis,points,col_out,col_in,radius)
    return vis
예제 #3
0
 def draw_arrow(overlay, z_rotation, position,
                line_width,
                arrow_length,
                arrow_color):
     x_to = np.round(position[0] + arrow_length * np.cos(z_rotation)).astype(np.int32)
     y_to = np.round(position[1] + arrow_length * np.sin(z_rotation)).astype(np.int32)
     cv2.arrowedLine(overlay, tuple(position), (x_to, y_to),
                     arrow_color, line_width, cv2.LINE_AA)
예제 #4
0
파일: ps6.py 프로젝트: cowens85/Fall2015
def make_quiver(U, V, scale=5):
    stride = 15  # plot every so many rows, columns
    color = (0, 255, 0)  # green
    img_out = np.zeros((V.shape[0], U.shape[1], 3), dtype=np.uint8)
    # print U
    # print V
    for y in xrange(0, V.shape[0], stride):
        for x in xrange(0, U.shape[1], stride):
            cv2.arrowedLine(img_out, (x, y), (x + int(U[y, x] * scale), y + int(V[y, x] * scale)), color, 1, tipLength=.3)

    return img_out
예제 #5
0
파일: OpenCV.py 프로젝트: orobert91/Design3
 def draw_arrowed_line(image, segment, color=None, thickness=1):
     cv2.arrowedLine(
         image.cv_image,
         (round(segment.point1.x), round(segment.point1.y)),
         (round(segment.point2.x), round(segment.point2.y)),
         Color.random().to_bgr() if color == None else color.to_bgr(),
         thickness,
         cv2.LINE_AA,
         0,
         0.2,
     )
예제 #6
0
파일: debugrenderer.py 프로젝트: aevri/mel
 def arrow(self, from_, to):
     if self._image is None:
         return
     cv2.arrowedLine(
         self._image,
         tuple(from_.astype(int)),
         tuple(to.astype(int)),
         (255, 255, 255),
         2,
         cv2.LINE_AA,
     )
예제 #7
0
def drawFeaturesMovementPers(kp_list, image, M):
#                            vg.im_transf = drawFeaturesMovementPers(
#                                           vg.pv.kp_list,
#                                           vg.im_transf,
#                                           vg.map_M)
    for kps in kp_list:
        pt1 = transformPerspective(kps[0], M)
        pt2 = transformPerspective(kps[1], M)
        
        cv2.arrowedLine(image, intT(pt2), intT(pt1), (255,0,0), 2)
    return image
 def dessinerRobot(self):
     if self.stationBase.getCarte().getRobot() is not None:
         position = (self.stationBase.getCarte().getRobot().getX(), self.stationBase.getCarte().getRobot().getY())
         self.anciennePosRobot.append(position)
         if len(self.anciennePosRobot) >= 2:
             for i in reversed(range(len(self.anciennePosRobot) - 1)):
                 cv2.arrowedLine(
                     self.imageVirtuelle, self.anciennePosRobot[i], self.anciennePosRobot[i + 1], (0, 0, 0), 2
                 )
     else:
         self.anciennePosRobot = []
예제 #9
0
파일: polar.py 프로젝트: nadvornik/astro
	def plot2(self, size = 960, area = 0.1):
		ha = celestial_rot() + self.status['gps'][1]
		qha = Quaternion([90-ha, 0, 0])
		
		img = np.zeros((size, size, 3), dtype=np.uint8)
		c = size / 2
		scale = size / area
		
		if self.ra is not None and self.dec is not None:
			t = Quaternion.from_ra_dec_pair([self.ra, self.dec], [self.prec_ra, self.prec_dec])
		else:
			t = Quaternion.from_ra_dec_pair([0.0, 90.0], [self.prec_ra, self.prec_dec])
		
		
		polaris = [37.9529,  89.2642]
		
		polaris_target = self.prec_q.transform_ra_dec(polaris)
		prec = t.transform_ra_dec([0, 90])
		polaris_real = t.transform_ra_dec(polaris_target)

		polaris_target = qha.transform_ra_dec(polaris_target)
		prec = qha.transform_ra_dec(prec)
		polaris_real = qha.transform_ra_dec(polaris_real)

		polaris_target_xyz = ra_dec_to_xyz(polaris_target)
		polaris_r = (polaris_target_xyz[0] ** 2 + polaris_target_xyz[1] ** 2)**0.5
		prec_xyz = ra_dec_to_xyz(prec)
		polaris_real_xyz = ra_dec_to_xyz(polaris_real)

		cv2.circle(img, (c,c), int(polaris_r * scale), (0, 255, 0), 1)
		for i in range (0, 24):
			a = np.deg2rad([i * 360.0 / 24.0])
			sa = math.sin(a)
			ca = math.cos(a)
			cv2.line(img, (int(c + sa * polaris_r * scale), int(c + ca * polaris_r * scale)), (int(c + sa * (polaris_r * scale + 8)), int(c + ca * (polaris_r * scale + 8))), (0, 255, 0), 1)
			
		cv2.circle(img, (int(c + polaris_target_xyz[0] * scale), int(c + polaris_target_xyz[1] * scale)), 4, (0, 255, 0), 2)
		
		cv2.line(img, (0, c), (size, c), (0, 255, 0), 1)
		cv2.line(img, (c, 0), (c, size), (0, 255, 0), 1)
		
		
		if self.ra is not None and self.dec is not None:
			cv2.circle(img, (int(c + prec_xyz[0] * scale), int(c + prec_xyz[1] * scale)), 4, (255, 255, 255), 2)
			cv2.circle(img, (int(c + polaris_real_xyz[0] * scale), int(c + polaris_real_xyz[1] * scale)), 4, (255, 255, 255), 2)
		
			pole_dist = (prec_xyz[0] ** 2 + prec_xyz[1] ** 2) ** 0.5
			if pole_dist >= area / 2:
				cv2.putText(img, "%0.1fdeg" % (90 - prec[1]), (int(c + prec_xyz[0] / pole_dist * area / 5 * scale - 50), int(c + prec_xyz[1] / pole_dist * area / 5 * scale)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2)
				cv2.arrowedLine(img, (int(c + prec_xyz[0] / pole_dist * area / 3 * scale), int(c + prec_xyz[1] / pole_dist * area / 3 * scale)), (int(c + prec_xyz[0] / pole_dist * area / 2 * scale), int(c + prec_xyz[1] / pole_dist * area / 2 * scale)), (255, 255, 255), 2)

		return img
예제 #10
0
def draw_arrow(frame, position, direction, colour, length=750):
    global scale, img_h
    
    end = position + length * direction
    
    x, y, z = position
    pt1 = (int(x*scale), img_h-int(y*scale))
    x, y, z = end
    pt2 = (int(x*scale), img_h-int(y*scale))
    
    # draw
    cv2.circle(frame, pt1, 2, colour, 2)
    cv2.arrowedLine(frame, pt1, pt2, colour, 2)
 def dessinerTrajetPrevu(self):
     if len(self.stationBase.getTrajectoirePrevue()) > 1:
         pointInitial = None
         for pointFinal in self.stationBase.getTrajectoirePrevue():
             if pointInitial is None:
                 pointInitial = pointFinal
             else:
                 cv2.arrowedLine(self.imageVirtuelle, pointFinal, pointInitial, (0, 255, 0), 2)
                 pointInitial = pointFinal
     else:
         cv2.putText(
             self.imageVirtuelle, "Phase d" "alignement", (1000, 800), self.police, 1, (0, 0, 0), 1, cv2.LINE_AA
         )
예제 #12
0
 def visualize_node_arrows(self, img, node_id, size=3, color=(255,0,255)):
     l, r, t, b = self.adjacency[node_id]
     srcX, srcY = self.node_coordinates[node_id]
     for direction, r in enumerate((l,r,t,b)):
         if r == -1: continue
         targetX, targetY = self.node_coordinates[r]
         # Use constant arrow head size. As arrowedLine() takes a fraction of the length, we need to reverse that
         length = np.hypot(targetX - srcX, targetY - srcY)
         arrowHeadSizeWant = 15  #px
         arrowHeadSize = arrowHeadSizeWant / length
         print("Drawing <{3}> arrow from #{0} to #{1}  of length {2}".format(
                 r, node_id, length, {0: "left", 1: "right", 2:"top", 3:"bottom"}[direction]))
         cv2.arrowedLine(img, (int(targetX), int(targetY)), (int(srcX), int(srcY)),
                         color=color, thickness=3, tipLength=arrowHeadSize, line_type=cv2.LINE_AA)
예제 #13
0
def drawArrows(frame, fromCoords, toCoords, color, thickness, **kwargs):
    """
    Draws arrows on an image
    :param frame: image to draw arrows on
    :param fromCoords: list of tuples containing from x/y coorinates
    :param toCoords:  list of tuples containing to x/y coorinates
    :param color: color of arrows
    :param thickness: arrow thicknes in pixels
    :param **kwargs: extra aruments passed to cv2.arrowedLine
    :return: None, edits image in place
    """
    for i in xrange(len(toCoords)):
        fromX,fromY = fromCoords[i]
        toX, toY = toCoords[i]
        cv2.arrowedLine(frame, (fromX,fromY), (toX,toY), color, thickness, **kwargs)
def draw_bot(position, front):
    img = settings.maze['image']
    if hasattr(img, 'shape'):
        image = np.zeros(settings.maze['image'].shape, np.float32)
    else:
        image = np.zeros((img.width(), img.height()), np.float32)
    try:
        arrow_tip = (int(front[0]), int(front[1]))
        position = (int(position[0]), int(position[1]))
        # image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
        cv2.arrowedLine(image, position, arrow_tip, (180, 255, 255), 2)
        return image
    except TypeError:
        print "Bot missing!"
        return image
예제 #15
0
def drawFeaturesMovement(kp_list, image, M, vect):
    for kps in kp_list:    
        pt1 = transformPerspective(kps[0], M)
        pt2 = transformPerspective(kps[1], M)
        
        if isGoodMatch(pt1, pt2, vect):
            cv2.arrowedLine(image, kps[1], kps[0], (255,0,0), 2)
        else:
            cv2.arrowedLine(image, kps[1], kps[0], (0,0,255), 2)
#        cv2.putText(image,
#                    str(dst.euclidean(pt1,pt2)),
#                    (kps[1][0]+5,kps[1][1]+5),
#                    cv2.FONT_HERSHEY_DUPLEX,0.5,(255,0,255))
        
    return image
예제 #16
0
 def animate_contours(self, path='default'):
     """
     Animates the found contours in order
     """
     self.im2 = np.zeros(self.img.shape)
     if path == 'default':
         path = self.contours
     elif path == 'sorted':
         path = self.path
     for contour in path:
         for idx, point in enumerate(contour[0:-1]):
             # Define line as line from first point to next point
             p1 = (int(contour[idx][0]), int(contour[idx][1]))
             p2 = (int(contour[idx+1][0]), int(contour[idx+1][1]))
             cv2.arrowedLine(self.im2, p1, p2, (255,255,255))
             cv2.imshow('contours', self.im2)
             cv2.waitKey(1)
     cv2.waitKey(0)
예제 #17
0
def draw_angled_arrow(image, center, angle):
    """
        Draws a double sided arrow on image centered at center
        at an angle of angle degrees.
    """
    sin, cos = np.sin(radians(angle)), np.cos(radians(angle))
    rotated_dir = np.array(((cos, -sin), (sin, cos))).dot(
                  np.array((0, -1)))

    line_length = min(image.shape[0], image.shape[1]) * 0.17
    line_start = np.array((center)) + rotated_dir * line_length
    line_end = np.array((center)) - rotated_dir * line_length
    def get_tup(vec):
        return int(vec[0]), int(vec[1])

    cv2.arrowedLine(image, get_tup(line_start),
                    get_tup(line_end), (255, 255, 0), 2)
    cv2.arrowedLine(image, get_tup(line_end),
                    get_tup(line_start), (255, 255, 0), 2)
예제 #18
0
def filter_overlay(frame, config):
    world = config.vision.world_latest
    global D_POINT
    if D_POINT is not None:
        cv2.circle(frame, (int(D_POINT.x), int(D_POINT.y)), 5, BGR_COMMON['blue'], 3)

    if world.ball is not None:
        ball = world.ball

        cv2.circle(frame, (int(ball.x), int(ball.y)), 6, BGR_COMMON['red'], 3)

        if(config.vision.world_previous.ball is not None):

            x = world.ball.x - config.vision.world_previous.ball.x
            y = world.ball.y - config.vision.world_previous.ball.y

            arrowhead = (int(x * 10 + world.ball.x), int(y * 10 + world.ball.y))
            cv2.arrowedLine(frame, ball.centre, arrowhead, BGR_COMMON['red'], 2, cv2.LINE_AA)

    for team in ['blue', 'yellow']:
        for colour in ['pink', 'green']:

            robot = getattr(world, "robot_{}_{}".format(team, colour))
            if robot is None:
                continue

            cv2.circle(frame, (int(robot.x), int(robot.y)), 10, BGR_COMMON[colour], 3)
            cv2.circle(frame, (int(robot.x), int(robot.y)), 6, BGR_COMMON[team], 3)

            length = 15
            complex = cmath.rect(length, robot.angle)
            y = -complex.imag
            x = complex.real

            if robot.velocity != 0:
                arrowhead = (int(x + robot.x), int(y + robot.y))
                cv2.arrowedLine(frame, robot.centre, arrowhead, BGR_COMMON['black'], 2, cv2.LINE_AA)

    return frame
예제 #19
0
def draw_vectors(image_bw, starting_points, vectors):
   """Draws vectors on a given black and white image.
   Parameters
   ----------
   image_bw : numpy matrix of integers
      Stores the binary image that are being studied, with contents marked by
      255s and background marked by 0s.
   starting_points : List[(float, float)]
      Stores the starting point of each vector.
   vectors : List[List[(float, float)]]
      Stores the vectors at each starting point.
   Returns
   -------
   None
   """
   for i in range(len(starting_points)):
      pos = (int(starting_points[i][0]), int(starting_points[i][1]))
      cv2.putText(image_bw, str(i), pos, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 255,\
                  1, cv2.LINE_AA, False)
      for j in range(len(vectors[i])):
         cv2.arrowedLine(image_bw, pos,\
                        (int(np.ceil(pos[0] + 10 * vectors[i][j][0])),\
                        int(np.ceil(pos[1] + 10 * vectors[i][j][1]))), 255)
예제 #20
0
    def highlight_object(self, f, trace=False, stats=False):
        if self.consec_time_unseen == 0:
            other_edge = (self.box[0] + self.box[2], self.box[1] + self.box[3])
            cv2.rectangle(f, self.box[0:2], other_edge, (0, 0, 255), 1)
            cv2.circle(f, self.center, 3, (0, 0, 255), -1)

        if trace:
            f_copy = f.copy()
            for i in range(max(0, len(self.path) - 20), len(self.path) - 1):
                if i not in self.disappearance_indices:
                    cv2.line(f, self.path[i], self.path[i + 1], (0, 0, 0), 2)
                    cv2.addWeighted(f, .8, f_copy, .2, 0, f)

        if stats:
            t = self.tracking_threshold
            if len(self.path) > t:
                subset = self.path[-t:]
                line_end_x = (subset[t - 1][0] - subset[t - 3][0]) * 2 + subset[t - 1][0]
                line_end_y = (subset[t - 1][1] - subset[t - 3][1]) * 2 + subset[t - 1][1]
                cv2.arrowedLine(f, subset[t - 1], (line_end_x, line_end_y), (255, 0, 0), 2)

                prediction = self.predicted_path(5)
                for p in prediction:
                    cv2.circle(f, p, 3, (50, 255, 50), -1)
예제 #21
0
def GetRectAngles(gray):

    contours = 0
    thresh = 0
    rectangles = list()
    count = 0

    _, thresh = cv2.threshold(gray, 65, 255, 0)
    _, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    for cnt in contours:

        rect = cv2.minAreaRect(cnt)
        box = cv2.boxPoints(rect)
        box = np.int0(box)

        xy, wh, angle = rect
        w, h = np.int0(wh)

        if ((w * w + h * h) ** 0.5 > MinDiagonal) & ((w * w + h * h) ** 0.5 < MaxDiagonal):
            count = count + 1
            rectangles.append(rect)
            cv2.drawContours(gray, [box], 0, (10, 20, 255), 2)
            x, y = np.int0(xy)
            if w > h:
                x1 = np.int0(x + w)
                y1 = np.int0(y + w * math.tan(math.radians(angle)))
                cv2.arrowedLine(gray, (x, y), (x1, y1), (255, 255, 255))
            else:
                x1 = np.int0(x + w)
                y1 = np.int0(y + w * math.tan(math.radians(angle + 90)))
                cv2.arrowedLine(gray, (x, y), (x1, y1), (255, 255, 255))

    cv2.imshow("ShowFoundRects", gray)
    print count, " rectangles found. Press ESC to exit"
    return rectangles
예제 #22
0
파일: kalman.py 프로젝트: benlansdell/hydra
	def plotforces(self, overlay, imageoutput):
		sc = 2
		#Get original pt locations
		ox = self.orig_x[0:(2*self.N)].reshape((-1,2))
		#Get prediction location
		px = self.pred_x[0:(2*self.N)].reshape((-1,2))
		#Get template, flow and mask 'force'
		tv = self.tv[0:(2*self.N)].reshape((-1,2))
		fv = self.fv[0:(2*self.N)].reshape((-1,2))
		mv = self.mv[0:(2*self.N)].reshape((-1,2))
		blank = np.zeros(overlay.shape, dtype=np.uint8)
		blank[:,:,3] = 255
		overlay = cv2.addWeighted(overlay, 0.5, blank, 0.5, 0)
		#Resize image
		overlay = cv2.resize(overlay, (0,0), fx = sc, fy = sc)
		for idx in range(self.N):
			cv2.arrowedLine(overlay, (sc*int(ox[idx,0]),sc*int(ox[idx,1])),\
			 (sc*int(px[idx,0]),sc*int(px[idx,1])), (255,255,255, 255), thickness = 2)
			cv2.arrowedLine(overlay, (sc*int(px[idx,0]),sc*int(px[idx,1])),\
			 (sc*int(px[idx,0]+10*tv[idx,0]),sc*int(px[idx,1]+10*tv[idx,1])), (255,0,0, 255), thickness = 2)
			cv2.arrowedLine(overlay, (sc*int(px[idx,0]),sc*int(px[idx,1])),\
			 (sc*int(px[idx,0]+10*fv[idx,0]),sc*int(px[idx,1]+10*fv[idx,1])), (0,255,0, 255), thickness = 2)
			cv2.arrowedLine(overlay, (sc*int(px[idx,0]),sc*int(px[idx,1])),\
			 (sc*int(px[idx,0]+10*mv[idx,0]),sc*int(px[idx,1]+10*mv[idx,1])), (0,0,255, 255), thickness = 2)

		font = cv2.FONT_HERSHEY_SIMPLEX
		#cv2.putText(img,'Hello World!',(10,500), font, 1,(255,255,255),2)
		legendtext = 'red = mask force\ngreen = flow force\nblue = template force\nwhite = prediction'
		x0, y0 = (20,20)
		dy = 20
		for i, line in enumerate(legendtext.split('\n')):
			y = y0 + i*dy
			cv2.putText(overlay, line, (x0, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255,255))
		
		#cv2.putText(overlay, legendtext, (20, 20), font, 1, (255, 255, 255), 2)
		fn = './' + imageoutput + '_forces_' + strftime("%Y-%m-%d_%H:%M:%S", gmtime()) + '.png'
		cv2.imwrite(fn, overlay)
예제 #23
0
 def show_lines(self, movement):
     """ 
     Plots the average vectors of each of the four quadrants of the image 
     """
     # quadrant 1 vector
     cv2.arrowedLine(
         self.frame,
         (int(self.frame.shape[1] * 1 / 4), int(self.frame.shape[0] * 1 / 4)),
         (int(self.frame.shape[1] * 1 / 4 + movement[0][0]), int(self.frame.shape[0] * 1 / 4 + movement[0][1])),
         (0, 0, 255),
         thickness=3,
     )
     # quadrant 2 vector
     cv2.arrowedLine(
         self.frame,
         (int(self.frame.shape[1] * 3 / 4), int(self.frame.shape[0] * 1 / 4)),
         (int(self.frame.shape[1] * 3 / 4 + movement[1][0]), int(self.frame.shape[0] * 1 / 4 + movement[1][1])),
         (0, 0, 255),
         thickness=3,
     )
     # quadrant 3 vector
     cv2.arrowedLine(
         self.frame,
         (int(self.frame.shape[1] * 1 / 4), int(self.frame.shape[0] * 3 / 4)),
         (int(self.frame.shape[1] * 1 / 4 + movement[2][0]), int(self.frame.shape[0] * 3 / 4 + movement[2][1])),
         (255, 0, 0),
         thickness=3,
     )
     # quadrant 4 vector
     cv2.arrowedLine(
         self.frame,
         (int(self.frame.shape[1] * 3 / 4), int(self.frame.shape[0] * 3 / 4)),
         (int(self.frame.shape[1] * 3 / 4 + movement[3][0]), int(self.frame.shape[0] * 3 / 4 + movement[3][1])),
         (255, 0, 0),
         thickness=3,
     )
def dispOpticalFlow(Image, Flow, Divisor=1):
    "Display image with a visualisation of a flow over the top. A divisor controls the density of the quiver plot."
    PictureShape = np.shape(Image)
    # determine number of quiver points there will be
    Imax = int(PictureShape[0] / Divisor)
    Jmax = int(PictureShape[1] / Divisor)
    # create a blank mask, on which lines will be drawn.
    mask = np.zeros_like(Image)
    for i in range(1, Imax):
        for j in range(1, Jmax):
            X1 = (i) * Divisor
            Y1 = (j) * Divisor
            X2 = int(X1 + Flow[X1, Y1, 1])
            Y2 = int(Y1 + Flow[X1, Y1, 0])
            X2 = np.clip(X2, 0, PictureShape[0])
            Y2 = np.clip(Y2, 0, PictureShape[1])
            # add all the lines to the mask
#         mask = cv2.line(mask, (Y1,X1),(Y2,X2), [0, 0, 100], 2)
            mask = cv2.arrowedLine(mask, (Y1, X1), (Y2, X2), [100, 0, 0], 1)

    # superpose lines onto image
    img = cv2.add(Image / np.max(Image) * 2, mask)
    # print image
    return img
예제 #25
0
    def run(self):
        self.threads = [
            threading.Thread(target=self.face_thread),
            threading.Thread(target=self.land_pose_thread),
            threading.Thread(target=self.gaze_thread)
        ]
        for thread in self.threads:
            thread.start()

        while self.should_run():
            try:
                read_correctly, new_frame = self.get_frame()
            except RuntimeError:
                continue

            if not read_correctly:
                break

            self.fps.update()
            self.frame = new_frame
            self.debug_frame = self.frame.copy()

            if not camera:
                nn_data = depthai.NNData()
                nn_data.setLayer("data", to_planar(self.frame, (300, 300)))
                self.face_in.send(nn_data)

            if debug:  # face
                if self.gaze is not None and self.left_bbox is not None and self.right_bbox is not None:
                    re_x = (self.right_bbox[0] + self.right_bbox[2]) // 2
                    re_y = (self.right_bbox[1] + self.right_bbox[3]) // 2
                    le_x = (self.left_bbox[0] + self.left_bbox[2]) // 2
                    le_y = (self.left_bbox[1] + self.left_bbox[3]) // 2

                    x, y = (self.gaze * 100).astype(int)[:2]

                    if args.lazer:
                        beam_img = np.zeros(self.debug_frame.shape, np.uint8)
                        for t in range(10)[::-2]:
                            cv2.line(beam_img, (re_x, re_y),
                                     ((re_x + x * 100), (re_y - y * 100)),
                                     (0, 0, 255 - t * 10), t * 2)
                            cv2.line(beam_img, (le_x, le_y),
                                     ((le_x + x * 100), (le_y - y * 100)),
                                     (0, 0, 255 - t * 10), t * 2)
                        self.debug_frame |= beam_img

                    else:
                        cv2.arrowedLine(self.debug_frame, (le_x, le_y),
                                        (le_x + x, le_y - y), (255, 0, 255), 3)
                        cv2.arrowedLine(self.debug_frame, (re_x, re_y),
                                        (re_x + x, re_y - y), (255, 0, 255), 3)

                if not args.lazer:
                    for raw_bbox in self.bboxes:
                        bbox = frame_norm(self.frame, raw_bbox)
                        cv2.rectangle(self.debug_frame, (bbox[0], bbox[1]),
                                      (bbox[2], bbox[3]), (10, 245, 10), 2)
                    if self.nose is not None:
                        cv2.circle(self.debug_frame,
                                   (self.nose[0], self.nose[1]),
                                   2, (0, 255, 0),
                                   thickness=5,
                                   lineType=8,
                                   shift=0)
                    if self.left_bbox is not None:
                        cv2.rectangle(self.debug_frame,
                                      (self.left_bbox[0], self.left_bbox[1]),
                                      (self.left_bbox[2], self.left_bbox[3]),
                                      (245, 10, 10), 2)
                    if self.right_bbox is not None:
                        cv2.rectangle(self.debug_frame,
                                      (self.right_bbox[0], self.right_bbox[1]),
                                      (self.right_bbox[2], self.right_bbox[3]),
                                      (245, 10, 10), 2)
                    if self.pose is not None and self.nose is not None:
                        draw_3d_axis(self.debug_frame, self.pose, self.nose)

                if camera:
                    cv2.imshow("Camera view", self.debug_frame)
                else:
                    aspect_ratio = self.frame.shape[1] / self.frame.shape[0]
                    cv2.imshow(
                        "Video view",
                        cv2.resize(self.debug_frame,
                                   (int(900), int(900 / aspect_ratio))))
                if cv2.waitKey(1) == ord('q'):
                    cv2.destroyAllWindows()
                    break

        self.fps.stop()
        print("FPS: {:.2f}".format(self.fps.fps()))
        if not camera:
            self.cap.release()
        cv2.destroyAllWindows()
        for i in range(1, 5):  # https://stackoverflow.com/a/25794701/5494277
            cv2.waitKey(1)
        self.running = False
예제 #26
0
def connectdots(keypoints_dark, keypoints_light, im_with_keypoints,
                latticethresh, line_length_lower, line_length_upper,
                IslandProperties):
    for dark in keypoints_dark:
        xd = int(
            dark.pt[0])  #assigns x and y values and puts them as a coordinate
        yd = int(dark.pt[1])
        cd = (xd, yd)
        for light in keypoints_light:
            xl = int(light.pt[0])
            yl = int(light.pt[1])
            cl = (xl, yl)

            xa, ya = (xd + xl) / 2, (yd + yl) / 2  #assigns middle coordinates

            pixelcolour = latticethresh[int(xa), int(
                ya)]  #gets pixel colour on the lattice image

            if pixelcolour != 255:  #if the pixel colour is not black, continue to line drawing.

                if (
                        xd - xl
                ) == 0:  #fixes problem when divinding by zero in tan function.
                    if yd < yl:
                        xd += 0.1
                    else:
                        xl += 0.1

                if yd == yl:
                    if xd > xl:
                        deg = 0
                    if xl > xd:
                        deg = 180

                elif xd > xl and yd > yl:  #4th quad
                    deg = math.degrees(math.atan((yd - yl) / (xd - xl)))
                    deg = 360 - deg

            #print(deg)
                elif yd > yl and xd < xl:
                    deg = math.degrees(math.atan(
                        (yd - yl) / (xl - xd)))  #third
                    deg = 180 + deg
            #print(deg)
                elif yd < yl and xd < xl:
                    deg = math.degrees(math.atan(
                        (yl - yd) / (xl - xd)))  #second
                    deg = 180 - deg

            #print(deg)
                elif yd < yl and xd > xl:
                    deg = math.degrees(math.atan(
                        (yl - yd) / (xd - xl)))  #first

                if line_length_lower < (
                    (xd - xl)**2 + (yd - yl)**2)**0.5 < line_length_upper and (
                        (deg < degree_change) or ((60 - degree_change) < deg <
                                                  (60 + degree_change)) or
                        ((120 - degree_change) < deg <
                         (120 + degree_change)) or
                        ((180 - degree_change) < deg <
                         (180 + degree_change)) or
                        ((240 - degree_change) < deg <
                         (240 + degree_change)) or
                        ((300 - degree_change) < deg <
                         (3000 + degree_change)) or (deg >
                                                     (360 - degree_change))):
                    mx, my = 0, 0

                    if (deg < degree_change) or (deg > (360 - degree_change)):
                        mx = 1
                        my = 0
                        cv2.arrowedLine(im_with_keypoints, cl, cd, (255, 0, 0),
                                        2)
                        print(deg)

                    if (60 - degree_change) < deg < (60 + degree_change):
                        mx = 0.5
                        my = 0.866025
                        cv2.arrowedLine(im_with_keypoints, cl, cd,
                                        (255, 182, 0), 2)

                    if (120 - degree_change) < deg < (120 + degree_change):
                        mx = -0.5
                        my = 0.866025
                        cv2.arrowedLine(im_with_keypoints, cl, cd,
                                        (178, 255, 0), 2)

                    if (180 - degree_change) < deg < (180 + degree_change):
                        mx = -1
                        my = 0
                        cv2.arrowedLine(im_with_keypoints, cl, cd,
                                        (0, 242, 255), 2)

                    if (240 - degree_change) < deg < (240 + degree_change):
                        mx = -0.5
                        my = -0.866025
                        cv2.arrowedLine(im_with_keypoints, cl, cd, (0, 0, 255),
                                        2)

                    if (300 - degree_change) < deg < (300 + degree_change):
                        mx = 0.5
                        my = -0.866025
                        cv2.arrowedLine(im_with_keypoints, cl, cd,
                                        (255, 0, 255), 2)

                    IslandProperties.append([xa, ya, mx, my])
    return (IslandProperties)
예제 #27
0
def draw_ant( a, img, length = 5 ):
    # OpenCV works in 4th quadrant
    p1 = a.x, a.y
    p2 = a.x + int(length * math.cos( a.angle)), a.y - int(length * math.sin( a.angle))
    cv2.arrowedLine( img, (p2[1], p2[0]), (p1[1], p1[0]), 255, 2 )
예제 #28
0
ret, frame = camera.read()
frame = cv2.resize(frame, (640, 480))
robot = RealDifferentialDrive(get_robot_xyyaw(frame,1.0), \
        0.12, \
        0.18, \
        0.03, \
        1.5, 0.2)
now = time.time()

while (1):
    ret, frame = camera.read()
    frame = cv2.resize(frame, (640, 480))
    h, w, c = frame.shape
    pos = get_robot_xyyaw(frame, 1.0)
    robot.update_info(time.time() - now, pos)

    x = int(robot.x * h)
    y = int(h - robot.y * h)

    cv2.circle(frame, (x, y), 4, [0, 0, 255], 2)
    finish = (x + int(50 * math.cos(robot.yaw)),
              y + int(50 * math.sin(robot.yaw)))
    cv2.arrowedLine(frame, (x, y), finish, [0, 0, 255], 1)

    cv2.imshow('robot', frame)

    if cv2.waitKey(30) & 0xff == 27:
        break
    now = time.time()

cv2.destroyAllWindows()
예제 #29
0
파일: GUI.py 프로젝트: ChiyenLee/py-nav
 def draw(self, image):
     # print (self.color)
     cv2.arrowedLine(image, (self.circle.x, self.circle.y), (self.x, self.y), self.color, self.size)
예제 #30
0
        new = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
        try:
            flow = 3 * cv2.calcOpticalFlowFarneback(
                prvs, new, flow, 0.5, 3, 15, 3, 7, 1.5,
                cv2.OPTFLOW_USE_INITIAL_FLOW)
            cumulative = flow * 0.4 + 0.6 * cumulative  # exponential smoothing
            r = estimateHeading(cumulative)
            print("heading: {}".format(r))
            sth = estimateSomething(r, cumulative)

            blank = np.zeros((H, W, 3), np.uint8)
            frame2 = draw_flow(new, cumulative)
            drawn = drawSth(sth, blank)

            drawn = cv2.arrowedLine(drawn, (W // 2, H // 2),
                                    (W // 2 + int(r * 100), H // 2),
                                    (255, 0, 0),
                                    thickness=2)
            cv2.imshow("original image", frame2)
            cv2.imshow("recognition", drawn)
        except NameError:
            pass

        # time.sleep(0.1)
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break
        elif k == ord('s'):
            cv2.imwrite('optical_flow.png', drawn)
        prvs = new

    cap.release()
예제 #31
0
    def show(self):
        """First shows a bar plot of the action distribution. Then all observations are shown.
            This function is for demonstration purposes only. Use left and right arrow keys to navigate
        """

        data = []
        actions = []
        conditions = []
        for i in self.data_dir.rglob('*.npy'):
            obs = np.load(i, allow_pickle=True).item()

            new_file = {
                "obs": obs['observation'],
                "condition": obs['condition'],
                "action": self.class_names[obs['action'] - 1],
                "name": i.name
            }

            data.append(new_file)

            actions.append(self.class_names[obs['action'] - 1])
            conditions.append(obs['condition'])

        unique_actions, count_actions = np.unique(actions, return_counts=True)

        fig, ax = plt.subplots()
        plt.bar(unique_actions, count_actions)
        plt.show()

        #show the individual observations:
        num_files = len(data)
        i = 0

        while i < num_files:
            print("{}/{}".format(i + 1, num_files))

            obs = data[i]

            image = cv2.cvtColor(obs['obs'], cv2.COLOR_BGR2RGB)

            cv2.putText(image, obs["action"], (10, 25),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255))
            cv2.putText(image, obs["name"], (10, 220),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))

            if obs["condition"] == 1:
                arrow_start = (300, 30)
                arrow_end = (300, 10)
            elif obs["condition"] == 2:
                arrow_start = (310, 20)
                arrow_end = (290, 20)
            elif obs["condition"] == 3:
                arrow_start = (300, 10)
                arrow_end = (300, 30)
            elif obs["condition"] == 4:
                arrow_start = (290, 20)
                arrow_end = (310, 20)

            cv2.arrowedLine(image,
                            arrow_start,
                            arrow_end, (0, 0, 255),
                            2,
                            tipLength=0.5)

            cv2.imshow('image', image)

            cvkey = cv2.waitKeyEx(0)

            if cvkey == 27:  #escape
                break
            elif cvkey == 2555904:  #right
                i += 1
            elif cvkey == 2424832 and i > 0:  #left
                i -= 1
예제 #32
0
 def paint_overlay_line(self, image, steer, accel):
     print(steer, accel)
     x_0, y_0 = image.shape[1] // 2, image.shape[0] // 2
     x_1, y_1 = round(x_0 + steer * 50), y_0 - round((accel * 1.14) * 200)
     cv2.arrowedLine(image, (x_0, y_0), (x_1, y_1), (255, 0, 0), 3)
     return image
예제 #33
0
import numpy as np
import cv2

img = cv2.imread('D:\\puppies\\puppy1.jpg')

img = cv2.line(img, (0, 0), (255, 255), (128, 255, 0), 5)
img = cv2.arrowedLine(img, (0, 0), (255, 255), (128, 255, 0), 5)
cv2.imshow('image', img)

cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #34
0
    def calculate_pitch_yaw_vertical(self, bbox_list):

        target_centroid = list()
        area = list()

        for i in bbox_list:
            pt1 = (int(i[0]), int(i[1]))
            pt2 = (int(i[2]), int(i[3]))

            image_roi = self.result[pt1[1]:pt2[1], pt1[0]:pt2[0]]

            try:

                image_roi = cv2.inRange(image_roi, self.LOWER_RED_RANGE,
                                        self.UPPER_RED_RANGE)

                image_roi = cv2.medianBlur(image_roi, 11)

                cv2.imshow('ROI', image_roi)

                _, contours, hierarchy = cv2.findContours(
                    image_roi.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

                if contours:
                    dx = pt2[0] - pt1[0]
                    dy = pt2[1] - pt1[1]
                    target_centroid.append(
                        (int(pt1[0] + dx / 2), int(pt1[1] + dy / 2)))
                    cv2.rectangle(self.result,
                                  pt1=pt1,
                                  pt2=pt2,
                                  color=[0, 0, 255],
                                  thickness=6)
                    area.append(dx * dy)

            except:
                print("Red Person Not Found!")
                self.pitch_rate = 0
                self.yaw_rate = 0
                self.vertical_rate = 0
                pass

        # 드론 중점 그림
        cv2.circle(self.result,
                   self.drone_centroid,
                   radius=4,
                   color=[255, 0, 0],
                   thickness=2)

        dst = list()

        # 드론 중점과 타겟간 중점 그림
        for i in target_centroid:
            cv2.circle(self.result,
                       i,
                       radius=4,
                       color=[0, 0, 255],
                       thickness=2)
            cv2.arrowedLine(self.result,
                            self.drone_centroid,
                            i,
                            color=[255, 0, 0],
                            thickness=4)
            dst.append(distance.euclidean(self.drone_centroid, i))

        try:
            if dst[0] > 10:
                self.yaw_rate = int(dst[0] / 2)
                self.vertical_rate = int(dst[0] / 20)

                # 우하단
                if self.drone_centroid[0] <= target_centroid[0][
                        0] and self.drone_centroid[1] <= target_centroid[0][1]:
                    self.vertical_rate = -self.vertical_rate

                # 좌하단
                elif self.drone_centroid[0] > target_centroid[0][
                        0] and self.drone_centroid[1] <= target_centroid[0][1]:
                    self.yaw_rate = -self.yaw_rate
                    self.vertical_rate = -self.vertical_rate

                # 좌상단
                elif self.drone_centroid[0] > target_centroid[0][
                        0] and self.drone_centroid[1] > target_centroid[0][1]:
                    self.yaw_rate = -self.yaw_rate

            else:
                self.yaw_rate = 0
                self.vertical_rate = 0

            if area[0] > 25000:
                self.pitch_rate = -int(area[0] / 30000) * 2

            else:
                self.pitch_rate = int(30000 / area[0]) * 2

            print("Red Person & Centroid Found!\narea[0]:{}, dst[0]:{}".format(
                area[0], dst[0]))

        except IndexError:
            print("Centroid Not Found!")
            self.pitch_rate = 0
            self.yaw_rate = 0
            self.vertical_rate = 0
            pass  #list index out of range 일 경우 아무것도 하지 않도록 예외처리
        side_img, (int(side_img.shape[1] / 2), int(side_img.shape[0] / 2)))
    ball_loc, side_img = ball_detection(side_img)
    # print("ball loc",ball_loc)
    # print("prev ball loc", ball_loc_prev)

    print(ball_loc)
    #calculate velocity vector and plot
    if ball_loc != None and ball_loc_prev != None:
        velocity_vec = tuple(map(lambda i, j: (i - j), ball_loc,
                                 ball_loc_prev))
        # print(velocity_vec)
        print("velocity:", np.linalg.norm(np.array(velocity_vec) / 1 / 60))
        side_img = cv.arrowedLine(side_img,
                                  pt1=ball_loc,
                                  pt2=tuple(
                                      map(lambda i, j: i + j * scale, ball_loc,
                                          velocity_vec)),
                                  color=(255, 0, 0),
                                  thickness=3)

        # frame when ball touches ground
        if velocity_vec[0] == 0:
            velocity_slope = np.inf
        else:
            velocity_slope = velocity_vec[1] / velocity_vec[0]
        # print(velocity_slope)
        if contact_loc == None and velocity_slope >= 0:
            contact_loc = ball_loc_prev
        if contact_loc != None:
            cv.circle(side_img, contact_loc, 10, (0, 0, 255), -1)
        # print(contact_loc)
예제 #36
0
def redo(im_with_keypoints, IslandProperties):
    cv2.imshow("Keypoints", im_with_keypoints)
    cv2.setMouseCallback("Keypoints", click_event_bars)
    cv2.waitKey(-1)

    for i in range(int(len(newbar) * 0.5)):
        xl = int(newbar[2 * i][0])
        yl = int(newbar[2 * i][1])
        cl = (xl, yl)
        xd = int(newbar[(2 * i) + 1][0])
        yd = int(newbar[(2 * i) + 1][1])
        cd = (xd, yd)
        xa, ya = (xd + xl) / 2, (yd + yl) / 2  #assigns middle coordinates
        ca = (xa, ya)

        if (xd - xl
            ) == 0:  #fixes problem when divinding by zero in tan function.
            if yd < yl:
                xd += 0.1
            else:
                xl += 0.1

        if yd == yl:
            if xd > xl:
                deg = 0
            if xl > xd:
                deg = 180

        elif xd > xl and yd > yl:  #4th quad
            deg = math.degrees(math.atan((yd - yl) / (xd - xl)))
            deg = 360 - deg

    #print(deg)
        elif yd > yl and xd < xl:
            deg = math.degrees(math.atan((yd - yl) / (xl - xd)))  #third
            deg = 180 + deg
    #print(deg)
        elif yd < yl and xd < xl:
            deg = math.degrees(math.atan((yl - yd) / (xl - xd)))  #second
            deg = 180 - deg

    #print(deg)
        elif yd < yl and xd > xl:
            deg = math.degrees(math.atan((yl - yd) / (xd - xl)))  #first

        if (deg > 45 and deg < 225):
            cv2.arrowedLine(im_with_keypoints, cl, cd, (0, 0, 255), 2)

        elif (deg < 45 or deg > 225):
            cv2.arrowedLine(im_with_keypoints, cl, cd, (255, 0, 0), 2)

        if (deg < degree_change) or (deg > (360 - degree_change)):
            mx = 1
            my = 0
            cv2.arrowedLine(im_with_keypoints, cl, cd, (255, 0, 0), 2)
            print(deg)

        if (60 - degree_change) < deg < (60 + degree_change):
            mx = 0.5
            my = 0.866025
            cv2.arrowedLine(im_with_keypoints, cl, cd, (255, 182, 0), 2)

        if (120 - degree_change) < deg < (120 + degree_change):
            mx = -0.5
            my = 0.866025
            cv2.arrowedLine(im_with_keypoints, cl, cd, (178, 255, 0), 2)

        if (180 - degree_change) < deg < (180 + degree_change):
            mx = -1
            my = 0
            cv2.arrowedLine(im_with_keypoints, cl, cd, (0, 242, 255), 2)

        if (240 - degree_change) < deg < (240 + degree_change):
            mx = -0.5
            my = -0.866025
            cv2.arrowedLine(im_with_keypoints, cl, cd, (0, 0, 255), 2)

        if (300 - degree_change) < deg < (300 + degree_change):
            mx = 0.5
            my = -0.866025
            cv2.arrowedLine(im_with_keypoints, cl, cd, (255, 0, 255), 2)

        IslandProperties.append([xa, ya, mx, my])
    return (IslandProperties)
예제 #37
0
    def draw_arrow(self, frame):
        """This Function draws an arrow according to the current direction of the ball along the ball.

        Args:
            self: This class.
            frame: Image or Frame to draw the arrow in.

        Returns:
            None.

        """
        box = self.box
        if len(box) == 0:
            return
        center_line_a = ((box[1][0] + int(
            (box[0][0] - box[1][0]) / 2)) if box[0][0] > box[1][0] else
                         (box[0][0] + int(
                             (box[1][0] - box[0][0]) / 2)), (box[1][1] + int(
                                 (box[0][1] - box[1][1]) / 2))
                         if box[0][1] > box[1][1] else (box[0][1] + int(
                             (box[1][1] - box[0][1]) / 2)))
        center_line_b = ((box[2][0] + int(
            (box[1][0] - box[2][0]) / 2)) if box[1][0] > box[2][0] else
                         (box[1][0] + int(
                             (box[2][0] - box[1][0]) / 2)), (box[2][1] + int(
                                 (box[1][1] - box[2][1]) / 2))
                         if box[1][1] > box[2][1] else (box[1][1] + int(
                             (box[2][1] - box[1][1]) / 2)))
        center_line_c = ((box[3][0] + int(
            (box[2][0] - box[3][0]) / 2)) if box[2][0] > box[3][0] else
                         (box[2][0] + int(
                             (box[3][0] - box[2][0]) / 2)), (box[3][1] + int(
                                 (box[2][1] - box[3][1]) / 2))
                         if box[2][1] > box[3][1] else (box[2][1] + int(
                             (box[3][1] - box[2][1]) / 2)))
        center_line_d = ((box[0][0] + int(
            (box[3][0] - box[0][0]) / 2)) if box[3][0] > box[0][0] else
                         (box[3][0] + int(
                             (box[0][0] - box[3][0]) / 2)), (box[0][1] + int(
                                 (box[3][1] - box[0][1]) / 2))
                         if box[3][1] > box[0][1] else (box[3][1] + int(
                             (box[0][1] - box[3][1]) / 2)))

        left = center_line_a if center_line_a[0] < center_line_b[
            0] else center_line_b
        left = left if left[0] < center_line_c[0] else center_line_c
        left = left if left[0] < center_line_d[0] else center_line_d

        right = center_line_a if center_line_a[0] > center_line_b[
            0] else center_line_b
        right = right if right[0] > center_line_c[0] else center_line_c
        right = right if right[0] > center_line_d[0] else center_line_d

        if self.left and left and right:
            self.lastArrowHead = self.arrowhead
            self.lastArrowTail = self.arrowtail
            self.arrowhead = left
            self.arrowtail = right
            cv2.arrowedLine(frame, right, left, (0, 255, 0), 3)
            return
        elif left and right:
            self.lastArrowHead = self.arrowhead
            self.lastArrowTail = self.arrowtail
            self.arrowhead = right
            self.arrowtail = left
            cv2.arrowedLine(frame, left, right, (0, 255, 0), 3)
            return
예제 #38
0
import cv2
import numpy as np

if __name__ == "__main__":
    print(f"cv2 version is {cv2.__version__}")

    size = np.array([480, 640, 3])
    # 白のキャンバス
    img = np.full(size, (255, 255, 255), dtype=np.uint8)
    color = np.array([0., 0., 255])

    cv2.arrowedLine(
        img=img,
        pt1=(30, 50),
        pt2=(320, 50),
        color=color,
        thickness=5,
    )
    cv2.arrowedLine(
        img=img,
        pt1=(610, 100),
        pt2=(320, 100),
        color=color,
        thickness=5,
    )
    cv2.arrowedLine(
        img=img,
        pt1=(0, 480),
        pt2=(320, 240),
        color=color,
        thickness=5,
def visualize_video(video):
    frame_count = video[3]
    if frame_count == -1:
        return

    moi_count = len(video[1])
    track_list = []

    t = time.time()

    video_name = os.path.basename(os.path.normpath(video[4]))
    video[4] = cv2.VideoCapture(video[4])
    vid = video[4]
    vid_fps = int(vid.get(cv2.CAP_PROP_FPS))
    width, height = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(
        vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
    codec = cv2.VideoWriter_fourcc(*'mp4v')
    video[2] = cv2.VideoWriter(video[2], codec, vid_fps, (width, height))

    curr_count = np.zeros(shape=(moi_count, 5), dtype=int)

    img = Image.new('L', (width, height), 0)
    ImageDraw.Draw(img).polygon([tuple(x) for x in video[0]],
                                outline=1,
                                fill=1)
    roi_mask = np.array(img)

    segment_start = list(range(0, frame_count, FRAME_PER_SEGMENT))
    segment_end = segment_start[1:].append(frame_count)
    num_segment = len(segment_start)
    flash_list = []

    max_traffic = max([sum([len(y) for y in x]) for x in video[5]])
    print('max traffic', video_name, max_traffic)

    for i in range(num_segment):
        print('Video: {}. Segment {:03d}/{:03d} ({:05.2f}%)'.format(
            video_name, (i + 1), num_segment, 100 * (i + 1) / num_segment))
        img = np.ndarray(shape=(FRAME_PER_SEGMENT, height, width, 3),
                         dtype=np.uint8)
        for j in range(FRAME_PER_SEGMENT):
            _, frame = vid.read()
            img[j] = frame

        for j in range(FRAME_PER_SEGMENT):

            # cv2.polylines(img[j], [video[0]], isClosed=True, color=ROI_COLOR_BGR, thickness=4)
            ALPHA = 96
            ROI_COLOR_BGR_MODIFIED = np.array(ROI_COLOR_BGR, dtype=np.float32)
            ROI_COLOR_BGR_MODIFIED = np.clip(
                ROI_COLOR_BGR_MODIFIED *
                (1 + 0.5 * len(flash_list) / max_traffic), 0, 255)
            img[j][np.where(roi_mask)] = (
                ROI_COLOR_BGR_MODIFIED * ALPHA / 255 +
                img[j][np.where(roi_mask)].astype(np.float32) *
                (1 - ALPHA / 255)).astype(np.uint8)

            for moi_id, moi in enumerate(video[1]):
                if moi_id == 0:
                    continue
                cv2.polylines(img[j], [moi[:-1]],
                              isClosed=False,
                              color=getColorMOI_BGR(moi_id),
                              thickness=2)
                cv2.arrowedLine(img[j],
                                tuple(moi[-2]),
                                tuple(moi[-1]),
                                color=getColorMOI_BGR(moi_id),
                                thickness=2,
                                tipLength=0.03)

            cv2.rectangle(img[j], (1060 - 175 * ((moi_count - 2) // 6), 0),
                          (1280, 200),
                          color=(224, 224, 224),
                          thickness=-1)

            for moi_id in range(1, moi_count):
                obj_list = video[5][i * FRAME_PER_SEGMENT + j][moi_id]
                for obj in obj_list:
                    curr_count[moi_id][obj[0]] += 1
                    if obj[1][0] > -1:
                        flash_list.append(
                            (i * FRAME_PER_SEGMENT + j, obj, moi_id))

                count_str = ' '.join([str(x) for x in curr_count[moi_id][1:]])
                moi = video[1][moi_id]
                cv2.putText(img[j],
                            count_str, (1080 - 175 * ((moi_id - 1) // 6), 35 +
                                        ((moi_id - 1) % 6) * 25),
                            cv2.FONT_HERSHEY_COMPLEX,
                            fontScale=0.6,
                            color=getColorMOI_BGR(moi_id),
                            thickness=2)

            # Remove frames older than 0.25s
            flash_list = [
                flash for flash in flash_list
                if (i * FRAME_PER_SEGMENT + j - flash[0] < (vid_fps * 0.25))
            ]

            for flash in flash_list:
                radius = (30 * flash[1][1][1] // height)
                if radius <= 12: radius = 12
                cv2.circle(img[j],
                           flash[1][1],
                           radius=radius,
                           color=getColorMOI_BGR(flash[2]),
                           thickness=-1)
                cv2.putText(img[j],
                            str(flash[1][0]),
                            (flash[1][1][0] - radius, flash[1][1][1] - radius),
                            cv2.FONT_HERSHEY_COMPLEX,
                            fontScale=max(0.4, flash[1][1][1] / height),
                            color=(0, 255, 255),
                            thickness=2)
            frame_str = "frame_id: " + str(i * FRAME_PER_SEGMENT + j + 1)
            cv2.putText(img[j],
                        frame_str, (30, 30),
                        cv2.FONT_HERSHEY_COMPLEX,
                        fontScale=1,
                        color=(0, 0, 255),
                        thickness=2)

        [video[2].write(frame) for frame in img]

    print(time.time() - t)
    video[2].release()
예제 #40
0
            img = img.astype(np.uint8)

        out_path = os.path.join(out_dir, os.path.basename(img_path[img_idx]))
        img_idx += 1

        h, w = flow_x.shape
        n_hregion = int(h / ksize)
        n_wregion = int(w / ksize)

        for h_idx in range(n_hregion):
            for w_idx in range(n_wregion):
                w_start = ksize * w_idx
                w_end = w_start + ksize

                h_start = ksize * h_idx
                h_end = h_start + ksize

                mean_x = np.mean(flow_x[h_start:h_end, w_start:w_end])
                mean_y = np.mean(flow_y[h_start:h_end, w_start:w_end])
                #print("(h, w) = (%d, %d), (y, x) = (%d, %d)" % (h_start, w_start, mean_y, mean_x))

                pt1, pt2 = calc_point(w_start, w_end, h_start, h_end, mean_x,
                                      mean_y)
                cv2.arrowedLine(img,
                                pt1,
                                pt2, (255, 0, 0),
                                arrow_weight,
                                tipLength=tipLength)

        misc.imsave(out_path, img)
예제 #41
0
import cv2
import numpy as np

img = cv2.imread('lena.jpg', 1)

img = cv2.line(img, (0,0), (255,255), (0, 255, 255), 3)
img = cv2.arrowedLine(img, (0,255), (255,255), (255, 255, 0), 3)
img = cv2.rectangle(img, (384,0), (510,128), (255,100,100), -1)
img = cv2.circle(img, (447,63), 63, (100, 0, 255), -1)
font = cv2.FONT_HERSHEY_SIMPLEX
img = cv2.putText(img, 'OpenCV', (30,480), font, 4, (255,255,255),5,cv2.LINE_AA)

cv2.imshow('image', img)

cv2.waitKey(0)
cv2.destroyAllWindows()
  def callback(self,data):
    try:
      cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError as e:
      print(e)

    (rows,cols,channels) = cv_image.shape

    blueColor = (255,0,0)
    greenColor = (0,255,0)
    redColor = (0,0,255)

    ###########################################################################################
    #    COLOR DETECTION (B,G,R)
    ###########################################################################################
    # Define color detection area
    detectionAreaYRange = (int(rows/3), int(2*rows/3))
    detectionAreaXRange = (int(cols/3), int(2*cols/3))
    colorROI = np.array([
      [detectionAreaXRange[0], 0],
      [detectionAreaXRange[1], 0],
      [detectionAreaXRange[1], rows],
      [detectionAreaXRange[0], rows]
    ])

    # Count blue and/or green pixels within detection area
    numBluePixels = 0
    numGreenPixels = 0
    # for i in range(detectionAreaYRange[0], detectionAreaYRange[1], 3):
    for i in range(0, rows, 10):
      # for j in range(0, cols, 10):
      for j in range(detectionAreaXRange[0], detectionAreaXRange[1], 10):
        if cv_image[i,j,0] >= 200:
          numBluePixels += 1
          # cv_image[i,j] = [0,255,0]
        elif cv_image[i,j,1] >= 240:
          numGreenPixels += 1

    # Decide if there are enough blue pixels to consider them a blue tool
    if numBluePixels >= 20:
      toolDetectionMsg = "Blue tool detected"
      blueToolDetected = True
    else:
      toolDetectionMsg = "No tool detected"
      blueToolDetected = False
    # Decide if there are enough blue pixels to consider them a mounting dock containing a Trocar
    if numGreenPixels >= 20:
      trocarDetectionMsg = "Trocar detected"
      trocarDetected = True
    else:
      trocarDetectionMsg = "No Trocar detected"
      trocarDetected = False


    ###########################################################################################
    #    SHAPE DETECTION
    ###########################################################################################
    # Restrict detection in the center columns
    # img_detection_region = cv_image[0:rows, int(cols/3):int(2*cols/3)]
    img_detection_region = cv_image

    # convert to grayscale
    gray = cv2.cvtColor(img_detection_region, cv2.COLOR_BGR2GRAY)

    # make boundary of image white so that contours will be always closed loops within the region
    gray[0:img_detection_region.shape[0], 0].fill(255)
    gray[0:img_detection_region.shape[0], img_detection_region.shape[1]-1].fill(255)
    gray[0, 0:img_detection_region.shape[1]].fill(255)
    gray[img_detection_region.shape[0]-1, 0:img_detection_region.shape[1]].fill(255)

    # blur the image
    blur = cv2.blur(gray, (3, 3))
    # thresholded image for blue color
    ret, thresh = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY)
    # thresholded image for green color
    ret, threshGreen = cv2.threshold(blur, 110, 255, cv2.THRESH_BINARY)

    # Finding contours for the thresholded image
    im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    im2, contoursGreen, hierarchy = cv2.findContours(threshGreen, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # create hull array for convex hull points
    hull = []
    # calculate points for each contour
    for i in range(len(contours)):
      # creating convex hull object for each contour
      hull.append(cv2.convexHull(contours[i], False))

    # Calculate and store the center of mass of each contour
    contourCenterOfMass = []
    for i in range(len(contours)):
      contourCenterOfMass.append(geometry.centerOfMass(contours[i]))

    # For each convex hull, calculate mean distance of hull points from center of mass
    hullsMeanDistanceFromCenter = []
    for i in range(len(hull)):
      sumD = 0
      for point in hull[i]:
        cm = contourCenterOfMass[i]
        distance = math.sqrt((point[0][0] - cm[0])**2 + (point[0][1] - cm[1])**2)
        sumD += distance
      avgD = sumD/len(hull[i])
      hullsMeanDistanceFromCenter.append(avgD)
    # Calculate max convex hull by finding the maximum mean distance
    if len(hull) > 1:
      startFrom = 1 # Ignore first convex hull (if there are many), because it is the whole picture
    else:
      startFrom = 0
    maxHullIndex = startFrom
    for i in range(startFrom, len(hull)):
      if hasPolygonCenterOfColor(cv_image, contours[i], blueColor):
        if hullsMeanDistanceFromCenter[i] > hullsMeanDistanceFromCenter[maxHullIndex]:
          maxHullIndex = i


    ###########################################################################################
    #    POSE DETECTION
    ###########################################################################################

    # Calculate ROI of selected tool using it's convex hull
    min_x = cols
    min_y = rows
    max_x = max_y = 0
    for point in hull[maxHullIndex]:
      if point[0][0] < min_x:
        min_x = point[0][0]
      if point[0][1] < min_y:
        min_y = point[0][1]
      if point[0][0] > max_x:
        max_x = point[0][0]
      if point[0][1] > max_y:
        max_y = point[0][1]

    toolROI = np.array([
      [min_x, min_y],
      [max_x, min_y],
      [max_x, max_y],
      [min_x, max_y]
    ])
    
    # Get pixels inside the selected contour
    # CAUTION: This is very heavy computanionally and drops FPS by half or more!
    # Could be avoided with a more clever handling
    toolPixels = []
    # for i in range(detectionAreaYRange[0], detectionAreaYRange[1], 3):
    for i in range(min_y, max_y, 10):
      for j in range(min_x, max_x, 10):
      # for j in range(detectionAreaXRange[0], detectionAreaXRange[1], 10):
        point = (j,i)
        if cv2.pointPolygonTest(contours[maxHullIndex], point, False) != -1:
          numBluePixels += 1
          cv_image[i,j] = [0,255,0]
          toolPixels.append(np.array([[i,j]]))
        elif cv_image[i,j,1] >= 240:
          numGreenPixels += 1
    toolPixels = np.array(toolPixels)

    # Center of mass of tool (weighted average of cm computed from contour and cm computed from toolPixels)
    toolPixelsCM = geometry.centerOfMass(toolPixels) # toolPixelsCM coordinates are (Y,X) (??)
    maxHullCmX = (contourCenterOfMass[maxHullIndex][0] + 5*toolPixelsCM[1]) / 6
    maxHullCmY = (contourCenterOfMass[maxHullIndex][1] + 5*toolPixelsCM[0]) / 6

    # Find orientation vectors of tool
    a,b = geometry.orientationVectors(toolPixels)
    # attach vectors to center of mass point
    a = a + [maxHullCmX, maxHullCmY]
    b = b + [maxHullCmX, maxHullCmY]

    # Update toolCenterOfMass point variable (and orientation) only if there is a significant change in (x,y) values,
    # in order to make the point and orientation stable at all times
    cmChangeThreshold = 2
    if (abs(self.toolCenterOfMass[0] - maxHullCmX) > cmChangeThreshold) and (abs(self.toolCenterOfMass[1] - maxHullCmY) > cmChangeThreshold):
      self.toolCenterOfMass[0] = maxHullCmX
      self.toolCenterOfMass[1] = maxHullCmY
      self.toolOrientX = a
      self.toolOrientY = b

    ###########################################################################################
    #    VISUAL SERVOING COMMAND
    ###########################################################################################
    # The origin is the desired position for our visual servo controller
    originY = int(rows/2)
    originX = int(cols/2)

    # Actual position of center of mass
    cmX = self.toolCenterOfMass[0]
    cmY = self.toolCenterOfMass[1]

    if not disable_servo and blueToolDetected:
      ex = round(0.1*(cmX - originX)/float(cols), 4)
      ey = -round(0.1*(cmY - originY)/float(rows), 4)
      eth = round(math.atan(a[0]/a[1]), 4)/math.pi
      
      error_norm = math.sqrt(ex**2 + ey**2)
      # Filter out sudden spikes in error which occur from sudden temporary detection of another tool
      if error_norm > 2*self.prev_error_norm and self.prev_error_norm > 0.0:
        ex = self.prev_ex
        ey = self.prev_ey
        error_norm = self.prev_error_norm
      
      twist = Twist()
      # Only send the command if one of the errors is bigger than the allowed tolerance, so that
      # we avoid small oscillations in the target position
      if error_norm > self.error_tolerance or eth > self.error_tolerance:
        twist.linear.x = self.Kp*ex + self.Kd*(ex - self.prev_ex)
        twist.linear.y = self.Kp*ey + self.Kd*(ey - self.prev_ey)
        # twist.angular.z = eth
        self.servo_cmd.publish(twist)
      
      self.error_pub.publish(error_norm)
      self.error_theta_pub.publish(eth)

      self.prev_ex = ex
      self.ex_sum += ex
      self.prev_ey = ey
      self.ey_sum += ey
      self.prev_error_norm = math.sqrt(self.prev_ex**2 + self.prev_ey**2)

    ###########################################################################################
    #    FIND GRASP POINTS - FORCE CLOSURE
    ###########################################################################################
    
    # create an image filled with zeros, single-channel, same size as img_detection_region.
    blank = np.zeros(img_detection_region.shape[0:2])
    cv2.circle(img_detection_region, (cmX,cmY), 100, 1, 1)
    circle_img = cv2.circle(blank.copy(), (cmX,cmY), 100, 1, 1)
    contour_img = cv2.drawContours(blank.copy(), contours, maxHullIndex, 1, 1, 8)
    intersectedImg = circle_img + contour_img
    intersectionPoints = np.argwhere(intersectedImg == np.max(intersectedImg))
    reducedIntersectionPoints = [intersectionPoints[0]]
    for p in range(1, len(intersectionPoints)):
      pt1 = intersectionPoints[p]
      canAddPoint = True
      for pt2 in reducedIntersectionPoints:
        # Manhattan Distance
        if abs(pt1[0] - pt2[0]) + abs(pt1[1] - pt2[1]) < 10:
          canAddPoint = False
          break
      if canAddPoint:
        reducedIntersectionPoints.append(pt1)
    
    if len(reducedIntersectionPoints) > 3:
      reducedIntersectionPoints = reducedIntersectionPoints[0:3]

    # print("reducedIntersectionPoints", reducedIntersectionPoints)

    ###########################################################################################
    #    DRAW OPENCV IMAGE
    ###########################################################################################

    if not disable_servo:
      # Draw target at the center (origin) of the image
      cv2.circle(img_detection_region,(originX,originY),10,(255,0,255),-1)

    # draw contour and hull points of the biggest convex hull, 
    # Draw if there was a blue tool
    contour_color = (0, 0, 255)
    convex_hull_color = (0, 255, 0)

    # for i in range(len(contours)):
    #   cv2.drawContours(img_detection_region, contours, i, contour_color, 2, 8)

    if len(hull) > 1 and blueToolDetected: # if there is only one hull, it means this is the while picture (default convex hull)
      # draw max convex hull object
      cv2.drawContours(img_detection_region, hull, maxHullIndex, convex_hull_color, 2, 8)

      # draw center of mass of convex hull
      cv2.circle(img_detection_region,(cmX,cmY),5,(0,0,255),-1)

      # draw orientation vectors
      a = self.toolOrientX
      b = self.toolOrientY
      cv2.arrowedLine(img_detection_region, (cmX,cmY), (a[0], a[1]), redColor, 2, 8, 0, 0.1)
      cv2.arrowedLine(img_detection_region, (cmX,cmY), (b[0], b[1]), greenColor, 2, 8, 0, 0.1)

      if not disable_servo:
        # Draw arrow from detected tool center of mass to center of the image
        # This arrow will also be used as the visual servoing command
        cv2.arrowedLine(img_detection_region, (cmX,cmY), (originX, originY), (0,255,255), 2, 8, 0, 0.1)

    if trocarDetected:
      for i in range(1, len(contoursGreen)):
      # draw ith contour
        cv2.drawContours(img_detection_region, contoursGreen, i, contour_color, 2, 8, hierarchy)

    # Draw Color Region Of Interest
    # cv2.polylines(cv_image, [colorROI], True, (0,255,0), thickness=3)

    # Draw Tool Region Of Interest
    cv2.polylines(cv_image, [toolROI], True, (255,255,255), thickness=2)

    # Draw grasp points and their triangle
    for point in reducedIntersectionPoints:
      cv2.circle(img_detection_region,(point[1],point[0]),7,(10,255,255),-1)
    grasp_triangle = np.flip(np.array(reducedIntersectionPoints)).reshape((-1,1,2))
    cv2.polylines(cv_image, [grasp_triangle], True, (0,255,255))

    # Draw tool detection message
    font                   = cv2.FONT_HERSHEY_SIMPLEX
    topLeftCorner = (50,50)
    fontScale              = 0.5
    lineType               = 1
    cv2.putText(
      cv_image,
      toolDetectionMsg, 
      topLeftCorner, 
      font, 
      fontScale,
      blueColor,
      lineType
    )

    # Draw trocar detection message
    position = (50,70)
    cv2.putText(
      cv_image,
      trocarDetectionMsg, 
      position, 
      font, 
      fontScale,
      greenColor,
      lineType
    )

    # Calculate and display FPS
    endOfFrameTime = time.time()
    seconds = endOfFrameTime - self.prevFrameTime
    self.prevFrameTime = endOfFrameTime
    fps = round(1/seconds, 2)
    fpsInfo = "FPS = {}".format(fps)
    position = (cols-200,50)
    cv2.putText(
      cv_image,
      fpsInfo, 
      position, 
      font, 
      fontScale,
      blueColor,
      lineType
    )

    cv2.imshow("OpenCV Image", cv_image)
    cv2.waitKey(3)

    try:
      self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
    except CvBridgeError as e:
      print(e)
예제 #43
0
def draw_edge_result(img, result, edge_thresh=0.5, keynode_thresh=0.5):
    """Draw text and their relationship on empty images.

    Args:
        img (np.ndarray): The original image.
        result (dict): The result of model forward_test, including:
            - img_metas (list[dict]): List of meta information dictionary.
            - nodes (Tensor): Node prediction with size:
                number_node * node_classes.
            - edges (Tensor): Edge prediction with size: number_edge * 2.
        edge_thresh (float): Score threshold for edge classification.
        keynode_thresh (float): Score threshold for node
            (``key``) classification.

    Returns:
        np.ndarray: The image with key, value and relation drawn on it.
    """

    h, w = img.shape[:2]

    vis_area_width = w // 3 * 2
    vis_area_height = h
    dist_key_to_value = vis_area_width // 2
    dist_pair_to_pair = 30

    bbox_x1 = dist_pair_to_pair
    bbox_y1 = 0

    new_w = vis_area_width
    new_h = vis_area_height
    pred_edge_img = np.ones((new_h, new_w, 3), dtype=np.uint8) * 255

    nodes = result['nodes'].detach().cpu()
    texts = result['img_metas'][0]['ori_texts']
    num_nodes = result['nodes'].size(0)
    edges = result['edges'].detach().cpu()[:, -1].view(num_nodes, num_nodes)

    # (i, j) will be a valid pair
    # either edge_score(node_i->node_j) > edge_thresh
    # or edge_score(node_j->node_i) > edge_thresh
    pairs = (torch.max(edges, edges.T) > edge_thresh).nonzero(as_tuple=True)
    pairs = (pairs[0].numpy().tolist(), pairs[1].numpy().tolist())

    # 1. "for n1, n2 in zip(*pairs) if n1 < n2":
    #     Only (n1, n2) will be included if n1 < n2 but not (n2, n1), to
    #     avoid duplication.
    # 2. "(n1, n2) if nodes[n1, 1] > nodes[n1, 2]":
    #     nodes[n1, 1] is the score that this node is predicted as key,
    #     nodes[n1, 2] is the score that this node is predicted as value.
    #     If nodes[n1, 1] > nodes[n1, 2], n1 will be the index of key,
    #     so that n2 will be the index of value.
    result_pairs = [(n1, n2) if nodes[n1, 1] > nodes[n1, 2] else (n2, n1)
                    for n1, n2 in zip(*pairs) if n1 < n2]

    result_pairs.sort()
    result_pairs_score = [
        torch.max(edges[n1, n2], edges[n2, n1]) for n1, n2 in result_pairs
    ]

    key_current_idx = -1
    pos_current = (-1, -1)
    newline_flag = False

    key_font_size = 15
    value_font_size = 15
    key_font_color = (0, 0, 0)
    value_font_color = (0, 0, 255)
    arrow_color = (0, 0, 255)
    score_color = (0, 255, 0)
    for pair, pair_score in zip(result_pairs, result_pairs_score):
        key_idx = pair[0]
        if nodes[key_idx, 1] < keynode_thresh:
            continue
        if key_idx != key_current_idx:
            # move y-coords down for a new key
            bbox_y1 += 10
            # enlarge blank area to show key-value info
            if newline_flag:
                bbox_x1 += vis_area_width
                tmp_img = np.ones(
                    (new_h, new_w + vis_area_width, 3), dtype=np.uint8) * 255
                tmp_img[:new_h, :new_w] = pred_edge_img
                pred_edge_img = tmp_img
                new_w += vis_area_width
                newline_flag = False
                bbox_y1 = 10
        key_text = texts[key_idx]
        key_pos = (bbox_x1, bbox_y1)
        value_idx = pair[1]
        value_text = texts[value_idx]
        value_pos = (bbox_x1 + dist_key_to_value, bbox_y1)
        if key_idx != key_current_idx:
            # draw text for a new key
            key_current_idx = key_idx
            pred_edge_img, text_sizes = draw_texts_by_pil(
                pred_edge_img, [key_text],
                draw_box=False,
                on_ori_img=True,
                font_size=key_font_size,
                fill_color=key_font_color,
                draw_pos=[key_pos],
                return_text_size=True)
            pos_right_bottom = (key_pos[0] + text_sizes[0][0],
                                key_pos[1] + text_sizes[0][1])
            pos_current = (pos_right_bottom[0] + 5, bbox_y1 + 10)
            pred_edge_img = cv2.arrowedLine(
                pred_edge_img, (pos_right_bottom[0] + 5, bbox_y1 + 10),
                (bbox_x1 + dist_key_to_value - 5, bbox_y1 + 10), arrow_color,
                1)
            score_pos_x = int(
                (pos_right_bottom[0] + bbox_x1 + dist_key_to_value) / 2.)
            score_pos_y = bbox_y1 + 10 - int(key_font_size * 0.3)
        else:
            # draw arrow from key to value
            if newline_flag:
                tmp_img = np.ones((new_h + dist_pair_to_pair, new_w, 3),
                                  dtype=np.uint8) * 255
                tmp_img[:new_h, :new_w] = pred_edge_img
                pred_edge_img = tmp_img
                new_h += dist_pair_to_pair
            pred_edge_img = cv2.arrowedLine(
                pred_edge_img, pos_current,
                (bbox_x1 + dist_key_to_value - 5, bbox_y1 + 10), arrow_color,
                1)
            score_pos_x = int(
                (pos_current[0] + bbox_x1 + dist_key_to_value - 5) / 2.)
            score_pos_y = int((pos_current[1] + bbox_y1 + 10) / 2.)
        # draw edge score
        cv2.putText(pred_edge_img, '{:.2f}'.format(pair_score),
                    (score_pos_x, score_pos_y), cv2.FONT_HERSHEY_COMPLEX, 0.4,
                    score_color)
        # draw text for value
        pred_edge_img = draw_texts_by_pil(pred_edge_img, [value_text],
                                          draw_box=False,
                                          on_ori_img=True,
                                          font_size=value_font_size,
                                          fill_color=value_font_color,
                                          draw_pos=[value_pos],
                                          return_text_size=False)
        bbox_y1 += dist_pair_to_pair
        if bbox_y1 + dist_pair_to_pair >= new_h:
            newline_flag = True

    return pred_edge_img
예제 #44
0
    def interactive_mode(self):
        cv2.namedWindow("Build", cv2.WINDOW_NORMAL)
        cv2.resizeWindow("Build", self.image_size, self.image_size)
        pi2 = np.pi * 2
        img = self.prep_image()
        pos_start, pos_cen, pos_arrow = self.necessary_data()
        for frame, (x_start,y_start),arr_cen, arr_pos in zip(self.frame_data['o'][self.start_particle:self.number_test],\
                                                             pos_start[self.start_particle:self.number_test],\
                                                             pos_cen[self.start_particle:self.number_test],\
                                                             pos_arrow[self.start_particle:self.number_test]):
            temp_img = img[x_start:x_start + self.training_shape,
                           y_start:y_start + self.training_shape]
            resize_img = resize(temp_img, (self.training_shape*self.upscale_size, self.training_shape*self.upscale_size),\
                               mode = 'reflect', anti_aliasing = True)
            if np.isnan(arr_pos[0]):
                cv2.imshow("Build", resize_img)
            else:
                arrow_plot = cv2.arrowedLine(resize_img,
                                             (arr_cen[1], arr_cen[0]),
                                             (arr_pos[1], arr_pos[0]),
                                             (255, 0, 0), 2)
                cv2.imshow('Build', resize_img)
            print("detect or is: {}".format(frame))
            press = cv2.waitKey(0)
            # everything is good, store current information into training sets
            if press == 49:
                self.training_dict["training_x"].append(temp_img)
                self.training_dict["training_regress"].append(frame)
                self.training_dict["training_class"].append(1)
            # enter plot mode
            elif press == 50:
                points_store = []
                button_down = False
                cv2.imshow("Build", resize_img)

                def orientation_click(event, x, y, flags, param):
                    global button_down
                    if event == cv2.EVENT_LBUTTONUP and button_down:
                        button_down = False
                        points_store.append((x, y))
                        cv2.arrowedLine(resize_img, points_store[0],
                                        points_store[1], (255, 0, 0), 1)
                        cv2.imshow("Build", resize_img)

                    elif event == cv2.EVENT_MOUSEMOVE and button_down:
                        button_down = True
                        image = resize_img.copy()
                        cv2.arrowedLine(image, points_store[0], (x, y),
                                        (255, 0, 0), 1)
                        cv2.imshow("Build", image)

                    elif event == cv2.EVENT_LBUTTONDOWN and len(
                            points_store) < 2:
                        button_down = True
                        points_store.insert(0, (x, y))

                cv2.setMouseCallback('Build', orientation_click, points_store)
                enter = cv2.waitKey(0)
                orientation = np.arctan2(
                    points_store[1][0] - points_store[0][0],
                    points_store[1][1] - points_store[0][1])
                orientation = (orientation + pi2) % pi2
                print("Click orientation is: {}".format(orientation))
                if enter == 49:
                    self.training_dict["training_x"].append(temp_img)
                    self.training_dict["training_regress"].append(orientation)
                    self.training_dict["training_class"].append(1)
            # wrong detected particles goes to mode 3, no orientation detected give it class 1
            elif press == 51:
                self.training_dict["training_x"].append(temp_img)
                self.training_dict["training_regress"].append(np.nan)
                self.training_dict["training_class"].append(0)
            elif press == 27:
                break
        cv2.destroyAllWindows()
        for i in range(1, 5):
            cv2.waitKey(1)
        return
예제 #45
0
            if last_center_point is None:
                last_center_point = center_point

            # find vector direction
            v1 = np.subtract(center_point, last_center_point)
            v1 = tuple(v1 * VECTOR_SIZE_NORMALIZE)

            currentMouse.update_history(v1)
            super = currentMouse.get_super_avg()

            super_end_point = tuple(np.add(center_point, super))

            vector_end_point = tuple(np.add(center_point, tuple(v1)))

            if np.linalg.norm(v1) > SHORT_MOVEMENT_THRESHOLD:
                cv2.arrowedLine(im_with_keypoints, center_point, vector_end_point, COLOR_GREEN)
            if np.linalg.norm(super) > GENERAL_MOVEMENT_THRESHOLD:
                cv2.arrowedLine(im_with_keypoints, center_point, super_end_point, COLOR_ORANGE, 2)

            last_center_point = center_point
            res = angle_between(v1, super)


            if not np.isnan(res) and (np.linalg.norm(v1) > SHORT_MOVEMENT_THRESHOLD and np.linalg.norm(super) > GENERAL_MOVEMENT_THRESHOLD):
                currentMouse.movement_sum += np.linalg.norm(v1)

                assert angle_between(v1, super) >= 0

                #currentMouse.angle_sum += angle_between(v1,super)
                #currentMouse.avg_angle = currentMouse.angle_sum/currentMouse.movement_sum
                currentMouse.data['x'].append(currentTime/(1000*60))
예제 #46
0
def main():
    model.eval()
    ttime_all = []
    for inx in range(len(test_left_img)):
        idxname = test_left_img[inx].split('/')[-1].split('.')[0]
        print(test_left_img[inx])
        imgL_o = cv2.imread(test_left_img[inx])[:, :, ::-1]
        imgR_o = cv2.imread(test_right_img[inx])[:, :, ::-1]

        # for gray input images
        if len(imgL_o.shape) == 2:
            imgL_o = np.tile(imgL_o[:, :, np.newaxis], (1, 1, 3))
            imgR_o = np.tile(imgR_o[:, :, np.newaxis], (1, 1, 3))

        # resize
        maxh = imgL_o.shape[0] * args.testres
        maxw = imgL_o.shape[1] * args.testres
        max_h = int(maxh // 64 * 64)
        max_w = int(maxw // 64 * 64)
        if max_h < maxh: max_h += 64
        if max_w < maxw: max_w += 64

        input_size = imgL_o.shape
        imgL = cv2.resize(imgL_o, (max_w, max_h))
        imgR = cv2.resize(imgR_o, (max_w, max_h))
        imgL_noaug = torch.Tensor(imgL / 255.)[np.newaxis].float().cuda()

        # flip channel, subtract mean
        imgL = imgL[:, :, ::-1].copy() / 255. - np.asarray(mean_L).mean(0)[
            np.newaxis, np.newaxis, :]
        imgR = imgR[:, :, ::-1].copy() / 255. - np.asarray(mean_R).mean(0)[
            np.newaxis, np.newaxis, :]
        imgL = np.transpose(imgL, [2, 0, 1])[np.newaxis]
        imgR = np.transpose(imgR, [2, 0, 1])[np.newaxis]

        # modify module according to inputs
        from models.VCNplus import WarpModule, flow_reg
        for i in range(len(model.module.reg_modules)):
            model.module.reg_modules[i] = flow_reg([1,max_w//(2**(6-i)), max_h//(2**(6-i))],
                            ent=getattr(model.module, 'flow_reg%d'%2**(6-i)).ent,\
                            maxdisp=getattr(model.module, 'flow_reg%d'%2**(6-i)).md,\
                            fac=getattr(model.module, 'flow_reg%d'%2**(6-i)).fac).cuda()
        for i in range(len(model.module.warp_modules)):
            model.module.warp_modules[i] = WarpModule(
                [1, max_w // (2**(6 - i)), max_h // (2**(6 - i))]).cuda()

        # get intrinsics
        if '2015' in args.dataset:
            from utils.util_flow import load_calib_cam_to_cam
            ints = load_calib_cam_to_cam(test_left_img[inx].replace(
                'image_2', 'calib_cam_to_cam')[:-7] + '.txt')
            K0 = ints['K_cam2']
            K1 = K0
            fl = K0[0, 0]
            cx = K0[0, 2]
            cy = K0[1, 2]
            bl = ints['b20'] - ints['b30']
            fl_next = fl
            intr_list = [
                torch.Tensor(inxx).cuda()
                for inxx in [[fl], [cx], [cy], [bl], [1], [0], [0], [1], [0],
                             [0]]
            ]
        elif 'sintel' in args.dataset and not 'test' in test_left_img[inx]:
            from utils.sintel_io import cam_read
            passname = test_left_img[inx].split('/')[-1].split('_')[-4]
            seqname1 = test_left_img[inx].split('/')[-1].split('_')[-3]
            seqname2 = test_left_img[inx].split('/')[-1].split('_')[-2]
            framename = int(
                test_left_img[inx].split('/')[-1].split('_')[-1].split('.')[0])
            #TODO add second camera
            K0, _ = cam_read(
                '/data/gengshay/tf_depth/sintel-data/training/camdata_left/%s_%s/frame_%04d.cam'
                % (seqname1, seqname2, framename + 1))
            K1, _ = cam_read(
                '/data/gengshay/tf_depth/sintel-data/training/camdata_left/%s_%s/frame_%04d.cam'
                % (seqname1, seqname2, framename + 2))
            fl = K0[0, 0]
            cx = K0[0, 2]
            cy = K0[1, 2]
            fl_next = K1[0, 0]
            bl = 0.1
            intr_list = [
                torch.Tensor(inxx).cuda()
                for inxx in [[fl], [cx], [cy], [bl], [1], [0], [0], [1], [0],
                             [0]]
            ]
        elif 'seq' in args.dataset:
            fl, cx, cy = seqcalib[inx]
            bl = 1
            fl_next = fl
            K0 = np.eye(3)
            K0[0, 0] = fl
            K0[1, 1] = fl
            K0[0, 2] = cx
            K0[1, 2] = cy
            K1 = K0
            intr_list = [
                torch.Tensor(inxx).cuda()
                for inxx in [[fl], [cx], [cy], [bl], [1], [0], [0], [1], [0],
                             [0]]
            ]
        else:
            print('NOT using given intrinsics')
            fl = min(input_size[0], input_size[1]) * 2
            fl_next = fl
            cx = input_size[1] / 2.
            cy = input_size[0] / 2.
            bl = 1
            K0 = np.eye(3)
            K0[0, 0] = fl
            K0[1, 1] = fl
            K0[0, 2] = cx
            K0[1, 2] = cy
            K1 = K0
            intr_list = [
                torch.Tensor(inxx).cuda()
                for inxx in [[fl], [cx], [cy], [bl], [1], [0], [0], [1], [0],
                             [0]]
            ]
        intr_list.append(torch.Tensor([input_size[1] / max_w
                                       ]).cuda())  # delta fx
        intr_list.append(torch.Tensor([input_size[0] / max_h
                                       ]).cuda())  # delta fy
        intr_list.append(torch.Tensor([fl_next]).cuda())

        disc_aux = [None, None, None, intr_list, imgL_noaug, None]

        if args.disp_path == '': disp_input = None
        else:
            try:
                disp_input = disparity_loader('%s/%s_disp.pfm' %
                                              (args.disp_path, idxname))
            except:
                disp_input = disparity_loader('%s/%s.png' %
                                              (args.disp_path, idxname))
            disp_input = torch.Tensor(disp_input.copy())[np.newaxis,
                                                         np.newaxis].cuda()

        # forward
        imgL = Variable(torch.FloatTensor(imgL).cuda())
        imgR = Variable(torch.FloatTensor(imgR).cuda())
        with torch.no_grad():
            imgLR = torch.cat([imgL, imgR], 0)
            model.eval()
            torch.cuda.synchronize()
            start_time = time.time()
            rts = model(imgLR, disc_aux, disp_input)
            torch.cuda.synchronize()
            ttime = (time.time() - start_time)
            print('time = %.2f' % (ttime * 1000))
            ttime_all.append(ttime)
            flow, occ, logmid, logexp, fgmask, heatmap, polarmask, disp = rts
            bbox = polarmask['bbox']
            polarmask = polarmask['mask']
            polarcontour = polarmask[:polarmask.shape[0] // 2]
            polarmask = polarmask[polarmask.shape[0] // 2:]

        # upsampling
        occ = cv2.resize(occ.data.cpu().numpy(),
                         (input_size[1], input_size[0]),
                         interpolation=cv2.INTER_LINEAR)
        logexp = cv2.resize(logexp.cpu().numpy(),
                            (input_size[1], input_size[0]),
                            interpolation=cv2.INTER_LINEAR)
        logmid = cv2.resize(logmid.cpu().numpy(),
                            (input_size[1], input_size[0]),
                            interpolation=cv2.INTER_LINEAR)
        fgmask = cv2.resize(fgmask.cpu().numpy(),
                            (input_size[1], input_size[0]),
                            interpolation=cv2.INTER_LINEAR)
        heatmap = cv2.resize(heatmap.cpu().numpy(),
                             (input_size[1], input_size[0]),
                             interpolation=cv2.INTER_LINEAR)
        polarcontour = cv2.resize(polarcontour, (input_size[1], input_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        polarmask = cv2.resize(polarmask, (input_size[1], input_size[0]),
                               interpolation=cv2.INTER_NEAREST).astype(int)
        polarmask[np.logical_and(fgmask > 0, polarmask == 0)] = -1
        if args.disp_path == '':
            disp = cv2.resize(disp.cpu().numpy(),
                              (input_size[1], input_size[0]),
                              interpolation=cv2.INTER_LINEAR)
        else:
            disp = np.asarray(disp_input.cpu())[0, 0]
        flow = torch.squeeze(flow).data.cpu().numpy()
        flow = np.concatenate([
            cv2.resize(flow[0],
                       (input_size[1], input_size[0]))[:, :, np.newaxis],
            cv2.resize(flow[1],
                       (input_size[1], input_size[0]))[:, :, np.newaxis]
        ], -1)
        flow[:, :, 0] *= imgL_o.shape[1] / max_w
        flow[:, :, 1] *= imgL_o.shape[0] / max_h
        flow = np.concatenate(
            (flow, np.ones([flow.shape[0], flow.shape[1], 1])), -1)
        bbox[:, 0] *= imgL_o.shape[1] / max_w
        bbox[:, 2] *= imgL_o.shape[1] / max_w
        bbox[:, 1] *= imgL_o.shape[0] / max_h
        bbox[:, 3] *= imgL_o.shape[0] / max_h

        # draw instance center and motion in 2D
        ins_center_vis = np.zeros(flow.shape[:2])
        for k in range(bbox.shape[0]):
            from utils.detlib import draw_umich_gaussian
            draw_umich_gaussian(ins_center_vis,
                                bbox[k, :4].reshape(2, 2).mean(0), 15)
        ins_center_vis = 256 * np.stack([
            ins_center_vis,
            np.zeros(ins_center_vis.shape),
            np.zeros(ins_center_vis.shape)
        ], -1)
        if args.refine:
            ## depth and scene flow estimation
            # save initial disp and flow
            init_disp = disp.copy()
            init_flow = flow.copy()
            init_logmid = logmid.copy()

            if args.mask_path == '':
                mask_input = polarmask
            else:
                mask_input = cv2.imread(
                    '%s/%s.png' % (args.mask_path, idxname), 0)
                if mask_input is None:
                    mask_input = cv2.imread(
                        '%s/%s.png' % (args.mask_path, idxname.split('_')[0]),
                        0)

            bgmask = (mask_input == 0)
            scene_type, T01_c, R01, RTs = ddlib.rb_fitting(
                bgmask,
                mask_input,
                disp,
                flow,
                occ,
                K0,
                K1,
                bl,
                parallax_th=4,
                mono=(args.sensor == 'mono'),
                sintel='Sintel' in idxname)
            print('camera trans: ')
            print(T01_c)
            disp, flow, disp1 = ddlib.mod_flow(bgmask,
                                               mask_input,
                                               disp,
                                               disp / np.exp(logmid),
                                               flow,
                                               occ,
                                               bl,
                                               K0,
                                               K1,
                                               scene_type,
                                               T01_c,
                                               R01,
                                               RTs,
                                               fgmask,
                                               mono=(args.sensor == 'mono'),
                                               sintel='Sintel' in idxname)
            logmid = np.clip(np.log(disp / disp1), -1, 1)

            # draw ego vehicle
            ct = [4 * input_size[0] // 5, input_size[1] // 2][::-1]
            cv2.circle(ins_center_vis,
                       tuple(ct),
                       radius=10,
                       color=(0, 255, 255),
                       thickness=10)
            obj_3d = K0[0, 0] * bl / np.median(
                disp[bgmask]) * np.linalg.inv(K0).dot(
                    np.hstack([ct, np.ones(1)]))
            obj_3d2 = obj_3d + (-R01.T.dot(T01_c))
            ed = K0.dot(obj_3d2)
            ed = (ed[:2] / ed[-1]).astype(int)
            if args.sensor == 'mono':
                direct = (ed - ct)
                direct = 50 * direct / (1e-9 + np.linalg.norm(direct))
            else:
                direct = (ed - ct)
            ed = (ct + direct).astype(int)
            if np.linalg.norm(direct) > 1:
                ins_center_vis = cv2.arrowedLine(
                    ins_center_vis,
                    tuple(ct),
                    tuple(ed), (0, 255, 255),
                    6,
                    tipLength=float(30. / np.linalg.norm(direct)))

            # draw each object
            for k in range(mask_input.max()):
                try:
                    obj_mask = mask_input == k + 1
                    if obj_mask.sum() == 0: continue
                    ct = np.asarray(
                        np.nonzero(obj_mask)).mean(1).astype(int)[::-1]  # Nx2
                    cv2.circle(ins_center_vis,
                               tuple(ct),
                               radius=5,
                               color=(255, 0, 0),
                               thickness=5)
                    if RTs[k] is not None:
                        #ins_center_vis[mask_input==k+1] = imgL_o[mask_input==k+1]
                        obj_3d = K0[0, 0] * bl / np.median(
                            disp[mask_input == k + 1]) * np.linalg.inv(K0).dot(
                                np.hstack([ct, np.ones(1)]))
                        obj_3d2 = obj_3d + (-RTs[k][0].T.dot(RTs[k][1]))
                        ed = K0.dot(obj_3d2)
                        ed = (ed[:2] / ed[-1]).astype(int)
                        if args.sensor == 'mono':
                            direct = (ed - ct)
                            direct = 50 * direct / (np.linalg.norm(direct) +
                                                    1e-9)
                        else:
                            direct = (ed - ct)
                        ed = (ct + direct).astype(int)
                        if np.linalg.norm(direct) > 1:
                            ins_center_vis = cv2.arrowedLine(
                                ins_center_vis,
                                tuple(ct),
                                tuple(ed), (255, 0, 0),
                                3,
                                tipLength=float(30. / np.linalg.norm(direct)))
                except:
                    pdb.set_trace()
        cv2.imwrite('%s/%s/mvis-%s.jpg' % (args.outdir, args.dataset, idxname),
                    ins_center_vis[:, :, ::-1])

        # save predictions
        with open('%s/%s/flo-%s.pfm' % (args.outdir, args.dataset, idxname),
                  'w') as f:
            save_pfm(f, flow[::-1].astype(np.float32))
        flowvis = point_vec(imgL_o, flow)
        cv2.imwrite(
            '%s/%s/visflo-%s.jpg' % (args.outdir, args.dataset, idxname),
            flowvis)
        imwarped = ddlib.warp_flow(imgR_o, flow[:, :, :2])
        cv2.imwrite('%s/%s/warp-%s.jpg' % (args.outdir, args.dataset, idxname),
                    imwarped[:, :, ::-1])
        cv2.imwrite(
            '%s/%s/warpt-%s.jpg' % (args.outdir, args.dataset, idxname),
            imgL_o[:, :, ::-1])
        cv2.imwrite(
            '%s/%s/warps-%s.jpg' % (args.outdir, args.dataset, idxname),
            imgR_o[:, :, ::-1])
        with open('%s/%s/occ-%s.pfm' % (args.outdir, args.dataset, idxname),
                  'w') as f:
            save_pfm(f, occ[::-1].astype(np.float32))
        with open('%s/%s/exp-%s.pfm' % (args.outdir, args.dataset, idxname),
                  'w') as f:
            save_pfm(f, logexp[::-1].astype(np.float32))
        with open('%s/%s/mid-%s.pfm' % (args.outdir, args.dataset, idxname),
                  'w') as f:
            save_pfm(f, logmid[::-1].astype(np.float32))
        with open('%s/%s/fg-%s.pfm' % (args.outdir, args.dataset, idxname),
                  'w') as f:
            save_pfm(f, fgmask[::-1].astype(np.float32))
        with open('%s/%s/hm-%s.pfm' % (args.outdir, args.dataset, idxname),
                  'w') as f:
            save_pfm(f, heatmap[::-1].astype(np.float32))
        with open('%s/%s/pm-%s.pfm' % (args.outdir, args.dataset, idxname),
                  'w') as f:
            save_pfm(f, polarmask[::-1].astype(np.float32))
        ddlib.write_calib(
            K0, bl, polarmask.shape, K0[0, 0] * bl / (np.median(disp) / 5),
            '%s/%s/calib-%s.txt' % (args.outdir, args.dataset, idxname))

        # submit to KITTI benchmark
        if 'test' in args.dataset:
            outdir = 'benchmark_output'
            # kitti scene flow
            import skimage.io
            skimage.io.imsave('%s/disp_0/%s.png' % (outdir, idxname),
                              (disp * 256).astype('uint16'))
            skimage.io.imsave('%s/disp_1/%s.png' % (outdir, idxname),
                              (disp1 * 256).astype('uint16'))
            flow[:, :, 2] = 1.
            write_flow('%s/flow/%s.png' % (outdir, idxname.split('.')[0]),
                       flow)

        # save visualizations
        with open('%s/%s/disp-%s.pfm' % (args.outdir, args.dataset, idxname),
                  'w') as f:
            save_pfm(f, disp[::-1].astype(np.float32))

        try:
            # point clouds
            from utils.fusion import pcwrite
            hp2d0 = np.concatenate(
                [
                    np.tile(
                        np.arange(0, input_size[1]).reshape(1, -1),
                        (input_size[0], 1)).astype(float)[None],  # 1,2,H,W
                    np.tile(
                        np.arange(0, input_size[0]).reshape(-1, 1),
                        (1, input_size[1])).astype(float)[None],
                    np.ones(input_size[:2])[None]
                ],
                0).reshape(3, -1)
            hp2d1 = hp2d0.copy()
            hp2d1[:2] += np.transpose(flow, [2, 0, 1])[:2].reshape(2, -1)
            p3d0 = (K0[0, 0] * bl /
                    disp.flatten()) * np.linalg.inv(K0).dot(hp2d0)
            p3d1 = (K0[0, 0] * bl /
                    disp1.flatten()) * np.linalg.inv(K1).dot(hp2d1)

            def write_pcs(points3d, imgL_o, mask_input, path):
                # remove some points
                points3d = points3d.T.reshape(input_size[:2] + (3, ))
                points3d[points3d[:, :,
                                  -1] > np.median(points3d[:, :, -1]) * 5] = 0
                #points3d[:2*input_size[0]//5] = 0. # KITTI
                points3d = np.concatenate([points3d, imgL_o], -1)
                validid = np.linalg.norm(points3d[:, :, :3], 2, -1) > 0
                bgidx = np.logical_and(validid, mask_input == 0)
                fgidx = np.logical_and(validid, mask_input > 0)
                pcwrite(path.replace('/pc', '/fgpc'), points3d[fgidx])
                pcwrite(path.replace('/pc', '/bgpc'), points3d[bgidx])
                pcwrite(path, points3d[validid])

            if inx == 0:
                write_pcs(p3d0,
                          imgL_o,
                          mask_input,
                          path='%s/%s/pc0-%s.ply' %
                          (args.outdir, args.dataset, idxname))
                write_pcs(p3d1,
                          imgL_o,
                          mask_input,
                          path='%s/%s/pc1-%s.ply' %
                          (args.outdir, args.dataset, idxname))
        except:
            pass
        torch.cuda.empty_cache()
    print(np.mean(ttime_all))
            #Posibles casos:
            # 'A' = Nuevo sector
            # 'B' = Disco no se ha movido de sector
            # 'C' = Disco fuera
            if((drawSectors==True) and (caso is not 'C')):
                colored_sector(frame, pos_D1)

            if(caso is 'A'):
                if(pos_D1[0]<=pos_D0[0]): #actual_sector[x] <= old_sector[x]

                    disc_advances = True

                    #Si quisiésemos pintar una flecha por dónde se mueve el disco:
                    pt1 = calcRobotPos_in_mm(pos_D0)
                    pt2 = calcRobotPos_in_mm(pos_D1)
                    cv2.arrowedLine(frame, pt1, pt2, (0, 100, 255), thickness=3)

            elif(caso is 'C'):
                pos_D0 = (-2,-2)
                disc_Out = True

        #--ROBOT SECTOR POSITION CONTROL ------------------------------------------------
        if (disc_advances) and (center_ROBOT is not None):

            caso, pos_R = update_actual_sector_position(
                                    center_ROBOT[0],center_ROBOT[1],pos_R)


            S_1 = calculoEstado(pos_D0, pos_D1, pos_R)

예제 #48
0
def draw_arrow(canvas,cord1,cord2):
    cv.arrowedLine(canvas,cord1,cord2,(0,255,0),10)
import numpy as np
import cv2

#img = cv2.imread('hello/lena.jpg', 1)
img= np.zeros([600,600,3], np.uint8)

img= cv2.rectangle(img, (0,0), (510,510), (0,0,255), -1)
img= cv2.line(img, (0,0), (510,255), (0,255,255), 10,)
img= cv2.arrowedLine(img, (0,0), (255,510), (255,255,0), 2)
img= cv2.rectangle(img, (255,255), (510,510), (0,255,0), 5)
img= cv2.circle(img, (255,255), 150, (150,0,255), 10)

font= cv2.FONT_HERSHEY_SCRIPT_COMPLEX
img= cv2.putText(img, "Sample", (150,255), font, 3, (255,0,0), 5, cv2.LINE_8)
#img= cv2.ellipse(img, (300,300), )

cv2.imshow('image', img)
if cv2.waitKey(0) and 0xFF == ord('q'):
    cv2.destroyAllWindows()
예제 #50
0
            alpha = float(tmp[12])
            xmin = int(bbox2D[0])
            ymin = int(bbox2D[1])
            xmax = int(bbox2D[0]) + int(bbox2D[2])
            ymax = int(bbox2D[1]) + int(bbox2D[3])
            x = (bbox2D[0] + bbox2D[2]) / 2
            cv2.line(image, (xmin, ymin), (xmax, ymin), (0, 255, 0), 2, lineType=cv2.LINE_AA)
            cv2.line(image, (xmax, ymin), (xmax, ymax), (0, 255, 0), 2, lineType=cv2.LINE_AA)
            cv2.line(image, (xmin, ymax), (xmin, ymin), (0, 255, 0), 2, lineType=cv2.LINE_AA)
            cv2.line(image, (xmax, ymax), (xmin, ymax), (0, 255, 0), 2, lineType=cv2.LINE_AA)
            ct_x = xmin + bbox2D[2] / 2
            ct_y = ymin + bbox2D[3] / 2
            cv2.circle(image, (int(ct_x), int(ct_y)), 10, (0, 255, 0), -1)
            end_x = ct_x + np.cos(rotation_y)*100
            end_y = ct_y - np.sin(rotation_y)*100
            cv2.arrowedLine(image, (int(ct_x),int(ct_y)), (int(end_x), int(end_y)), (0,255,0), 2) #초록
            #alpha = rot_y2alpha(rotation_y, x, calib[0, 2], calib[0, 0])
            alpha_x = ct_x + np.cos(alpha)*100
            alpha_y = ct_y - np.sin(alpha)*100
            cv2.arrowedLine(image, (int(ct_x), int(ct_y)), (int(alpha_x), int(alpha_y)), (255,0,0), 2) #파랑

            # rotation_y = -rotation_y
            theta = RotationMatrixToeulerAngles(Rtilt)
            box_3d = compute_box_3d_world(dim, location, rotation_y, theta)

            # print(cat_id, location, dim, rotation_y, bbox2D, alpha)
            # ann에 axis 바꾼거를 집어넣기
            sun_permutation = [0, 2, 1]
            #19.12.26 추가된 부분
            print("Rtilt")
            print(Rtilt)
import cv2
import numpy as np

#img = cv2.imread('lena.jpg', 1)
# CREAR UNA IMAGEN VACIA CON NUMPY
img = np.zeros([512, 512, 3], np.uint8)
# EL ARGUMENTO COLOR ESTA ORDENADO POR AZUL VERDE Y ROJO
# PARA VER EL CODGIO DE COLORES PUEDO BUSCAR RGB COLOR PICKER
img = cv2.line(img, (0, 0), (255, 255), (147, 96, 44), 3)
img = cv2.arrowedLine(img, (0, 255), (255, 255), (147, 96, 44), 3)
# EL PONER -1 EN TICKNEES ME RELLENA EL RECTANGUO
# PRIMER PUNTO ES EL VERTICE SUPERIOR
# SEGUNDO PUNTO ES EL VERTICE INFERIOR
img = cv2.rectangle(img, (384, 0), (510, 128), (0, 0, 255), 1)
img = cv2.circle(img, (447, 63), 63, (0, 255, 0), -1)
font = cv2.FONT_HERSHEY_SIMPLEX
# PONER TEXTO EN UNA IMAGEN
img = cv2.putText(img, 'Hola', (10, 500), font, 4, (255, 255, 255), 10, cv2.LINE_AA)
cv2.imshow('image', img)

cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #52
0
                                          minArea=2000,
                                          filter=4,
                                          cThr=[50, 50],
                                          draw=False)

        if len(conts) != 0:
            for obj in conts2:
                cv2.polylines(imgC, [obj[2]], True, (0, 255, 0), 2)
                nPoints = utilis.reorder(obj[2])
                nW = round((utilis.findDist(nPoints[0][0] // scale,
                                            nPoints[1][0] // scale) / 10), 1)
                nH = round((utilis.findDist(nPoints[0][0] // scale,
                                            nPoints[2][0] // scale) / 10), 1)

                cv2.arrowedLine(imgC, (nPoints[0][0][0], nPoints[0][0][1]),
                                (nPoints[1][0][0], nPoints[1][0][1]),
                                (255, 0, 255), 3, 8, 0, 0.05)
                cv2.arrowedLine(imgC, (nPoints[0][0][0], nPoints[0][0][1]),
                                (nPoints[2][0][0], nPoints[2][0][1]),
                                (255, 0, 255), 3, 8, 0, 0.05)
                x, y, w, h = obj[3]
                cv2.putText(imgC, '{}cm'.format(nW), (x + 30, y - 10),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (255, 0, 255),
                            2)
                cv2.putText(imgC, '{}cm'.format(nH), (x - 70, y + h // 2),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (255, 0, 255),
                            2)

        cv2.imshow("cont in cont", imgC)

    if cv2.waitKey(20) & 0xFF == ord('q'):
예제 #53
0
    def find_train(self, mat, color_splits):
        lab_split = color_splits['lab']
        yuv_split = color_splits['yuv']
        hls_split = color_splits['hls']
        
        train_yuv_uthreshed = cv2.inRange(yuv_split[1], self.options['train_yuv_u_min'], self.options['train_yuv_u_max'])
        train_lab_athreshed = cv2.inRange(lab_split[1], self.options['train_lab_a_min'], self.options['train_lab_a_max'])
        train_hls_hthreshed = cv2.inRange(hls_split[0], self.options['train_hls_h_min'], self.options['train_hls_h_max'])
        train_threshed = train_yuv_uthreshed & train_lab_athreshed & train_hls_hthreshed
        
        train_eroded = cv2.erode(train_threshed, np.ones((10, 10)))
        train_dilated = cv2.dilate(train_eroded, np.ones((100, 100)))

        if self.options['debugging']:
            self.post('train_lab_athreshed', train_lab_athreshed)
            self.post('train_yuv_uthreshed', train_yuv_uthreshed)
            self.post('train_hls_hthreshed', train_hls_hthreshed)
            self.post('train_threshed', train_threshed)
            self.post('train_eroded', train_eroded.copy())
            self.post('train_dilated', train_dilated.copy())

        _, train_threshed_contours, train_threshed_hierarchy = cv2.findContours(train_eroded, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        if train_threshed_hierarchy is None:
            print('Cannot find any train contours in the picture')
            shm.recovery_results.train_heuristic_score.set(0)
            return
        
        _, train_dilated_contours, train_dilated_hierarchy = cv2.findContours(train_dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        
        train_dilated_contour_areas = map(lambda (i, x): [cv2.contourArea(x), i, cv2.boundingRect(x), x],
                                   enumerate(train_dilated_contours))
        train_bounding_contour = max(train_dilated_contour_areas, key=lambda x: x[0])
        
        train_threshed_contours = filter(lambda x: point_in_rect(center(cv2.boundingRect(x)), train_bounding_contour[2]), train_threshed_contours)

        
        if self.options['debugging']:
            all_contours_drawing = np.copy(mat)
            cv2.drawContours(all_contours_drawing, [c for c in train_threshed_contours if cv2.contourArea(c)], -1,
                (255, 255, 0), 2)
            cv2.drawContours(all_contours_drawing, [c for c in train_dilated_contours if cv2.contourArea(c)], -1,
                (255, 0, 0), 2)
            self.post('train_threshed_contours', all_contours_drawing)

        if self.options['debugging']:
            all_contours_drawing = np.copy(mat)
            cv2.drawContours(all_contours_drawing, [c for c in train_threshed_contours if cv2.contourArea(c)], -1,
                (255, 255, 0), 2)
            cv2.drawContours(all_contours_drawing, [c for c in train_dilated_contours if cv2.contourArea(c)], -1,
                (255, 0, 0), 2)
            self.post('train_threshed_contours', all_contours_drawing)
            
        train_threshed_contour_areas = map(lambda (i, x): [cv2.contourArea(x), i, cv2.boundingRect(x), x],
                                           enumerate(train_threshed_contours))

        train_threshed_contour_areas.sort(key=lambda x: x[0], reverse=True)
        train_threshed_contour_areas = train_threshed_contour_areas[:4]
        
        train_threshed_contour_areas_by_x = sorted(train_threshed_contour_areas, key=lambda x: center(x[2])[0])
        biggest_block_index_by_x, biggest_block = max(enumerate(train_threshed_contour_areas_by_x), key=lambda x: x[1][0])
        biggest_block_rotated_rect = list(cv2.minAreaRect(biggest_block[3]))

        if biggest_block_rotated_rect[1][0] >  biggest_block_rotated_rect[1][1]:
            biggest_block_rotated_rect[1] = biggest_block_rotated_rect[1][1], biggest_block_rotated_rect[1][0]
            biggest_block_rotated_rect[2] += 90
    
        train_threshed_contour_areas_by_y = sorted(train_threshed_contour_areas, key=lambda x: center(x[2])[1])
        biggest_block_index_by_y, _ = max(enumerate(train_threshed_contour_areas_by_y), key=lambda x: x[1][0])
        
        vertical = False
        
        if len(train_threshed_contour_areas) == 4:
            if biggest_block_index_by_x > 1:
                biggest_block_rotated_rect[2] += 180
                
            if biggest_block_index_by_y > 1:
                vertical = True
        elif len(train_threshed_contour_areas) == 3:
            if biggest_block_index_by_x == 2:
                biggest_block_rotated_rect[2] += 180
            elif biggest_block_index_by_x == 1:
                if train_threshed_contour_areas_by_x[0][0] < train_threshed_contour_areas_by_x[2][0]:
                    biggest_block_rotated_rect[2] += 180
                    
            if biggest_block_index_by_y == 2:
                vertical = True
            elif biggest_block_index_by_y == 1:
                if train_threshed_contour_areas_by_y[0][0] < train_threshed_contour_areas_by_y[2][0]:
                    vertical = True
        elif len(train_threshed_contour_areas) == 2:
            if biggest_block_index_by_x == 1:
                biggest_block_rotated_rect[2] += 180
                
            if biggest_block_index_by_y == 1:
                vertical = True

        biggest_block_rotated_rect[2] %= 360
        biggest_block_rotated_rect[2] = 360 - biggest_block_rotated_rect[2]

        if 65 < biggest_block_rotated_rect[2] < 115 and not vertical:
            biggest_block_rotated_rect[2] += 180
        elif 245 < biggest_block_rotated_rect[2] < 294 and vertical:
            biggest_block_rotated_rect[2] -= 180
        
        
        if self.options['debugging']:
            arrow_distance = 200
            angle = 360 - biggest_block_rotated_rect[2]
            arrow_x_dist = int(arrow_distance * cos(radians(angle)))
            arrow_y_dist = int(arrow_distance * sin(radians(angle)))
            
            arrow_start = tuple(map(int, biggest_block_rotated_rect[0]))
            arrow_end = (arrow_start[0] + arrow_x_dist, arrow_start[1] + arrow_y_dist)
            
            cv2.arrowedLine(all_contours_drawing, arrow_start, arrow_end, (255, 0, 0), 4)
    
        shm.recovery_results.train_center_x.set(int(biggest_block_rotated_rect[0][0]))
        shm.recovery_results.train_center_y.set(int(biggest_block_rotated_rect[0][1]))
        shm.recovery_results.train_angle.set(biggest_block_rotated_rect[2])
        shm.recovery_results.train_heuristic_score.set(train_bounding_contour[0])
예제 #54
0
if matches_to_use <= MIN_MATCHES:
    print("Insufficient point number")
    exit()

# apply ransac
src_pts = np.float32([kp1[match.queryIdx].pt for match in matches])
dst_pts = np.float32([kp2[match.trainIdx].pt for match in matches])
F, mask = cv2.findFundamentalMat(src_pts.reshape(-1, 1, 2),
                                 dst_pts.reshape(-1, 1, 2), cv2.RANSAC,
                                 RANSAC_REPROJ, RANSAC_ACCURACY)
match_mask = mask.ravel().tolist()

# draw matches
params = dict(matchesMask=match_mask)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, **params)
# filter points
good_src = [src_pts[i] for i in range(len(src_pts)) if match_mask[i] == 1]
good_dst = [dst_pts[i] for i in range(len(dst_pts)) if match_mask[i] == 1]
print("Found {} good matches".format(len(good_src)))

# print arrows
arrows = img1.copy()
for i in range(len(good_src)):
    # print(good_src[i])
    cv2.arrowedLine(arrows, tuple(good_src[i]), tuple(good_dst[i]),
                    (0, 100, 255))
cv2.imshow('matches', arrows)
cv2.imshow('origin', img1)
cv2.waitKey()
cv2.destroyAllWindows()
cap.release()
예제 #55
0
    if ok:
        # Tracking success
        p1 = (int(bbox[0]), int(bbox[1]))
        p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))

        # note: colors are blue, green, red (lmao why)

        # rectangle around target (blue)
        cv2.rectangle(image, p1, p2, (255, 0, 0), 2, 1)

        # from this information we can then derive distance from center and angle of correction using some fancy math.
        target_center = (int(p1[0] + (bbox[2] / 2.0)),
                         int(p1[1] + (bbox[3] / 2.0)))

        # target correction arrow (green)
        cv2.arrowedLine(image, video_center, target_center, (0, 255, 0), 3)

        # target reticule (red)
        cv2.circle(image,
                   target_center,
                   radius, (0, 0, 255),
                   thickness=2,
                   lineType=8,
                   shift=0)

        cv2.putText(image, "Tracking Successful", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
    else:
        # Tracking failure
        cv2.putText(image, "Tracking failure detected", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
예제 #56
0
def main(yolo):
    selectingObject = False  #use detect not hand
    initTracking = True
    onTracking = False
    duration = 0.01
    pts = deque(maxlen=50)
    bx2 = [0, 0, 0, 0]
    tracker = KCFTracker(True, True, True)
    #KF initialization
    P = np.diag([3000.0, 3000.0, 3000.0, 3000.0])
    I = np.eye(4)
    H = np.matrix([[0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
    ra = 0.15  # 厂商提供
    R = np.matrix([[ra, 0.0], [0.0, ra]])
    noise_ax = 0.3
    noise_ay = 0.3
    cv2.namedWindow('tracking')
    #cap = cv2.VideoCapture(0)
    cap = cv2.VideoCapture('./oc1.mp4')
    while True:
        ret, frame = cap.read()
        if not ret:
            break
        if (selectingObject):
            continue
        elif (initTracking):
            image = Image.fromarray(frame[..., ::-1])  #bgr to rgb
            box, class_names = yolo.detect_image(image)  #use yolo to predict
            ix0 = int(box[0][0])
            iy = int(box[0][1])
            w = int(box[0][2])
            h = int(box[0][3])
            tracker.init([ix0, iy, w, h], frame)
            #Lenovo camera
            ixx0 = int((box[0][0]) + (box[0][2]) / 2)
            iyy0 = int((box[0][1]) + (box[0][3]) / 2)

            #GH4 camera
            '''
            D0 = 2*( 355.2/ w)  # meter,qianhou
            x_cen = 640
            xd0 = ixx0 - x_cen
            # geo similarity
            X0 = D0 / 887.9 * xd0  # meter,zuoyou
            '''
            #xiaomi

            D0 = (1978.9 / h)  # meter,qianhou
            x_cen = 703
            xd0 = ixx0 - x_cen
            # geo similarity
            X0 = xd0 * (1.54 / h)  # meter,zuoyou

            #humw.append(0)
            #humw.append(w)
            #DJI
            '''
            D0 = (3006.5/h)  # meter,qianhou
            x_cen = 1361.8
            xd0 = ixx0 - x_cen
            # geo similarity
            X0 = xd0*(1.75/h)  # meter,zuoyou
            '''
            state = np.matrix([[X0, D0, 0.0, 0.0]]).T
            state_2D = np.matrix([[ixx0, iyy0, 0.0, 0.0]]).T
            initTracking = False
            onTracking = True
        elif (onTracking):
            t0 = time()
            boundingbox = tracker.update(frame)
            x = boundingbox[0]
            y = boundingbox[1]
            w = boundingbox[2]
            h = boundingbox[3]
            cx = int(x + w / 2)
            cy = int(y + h / 2)
            boundingbox = list(map(int, boundingbox))
            x1 = boundingbox[0]
            y1 = boundingbox[1]
            w1 = boundingbox[2]
            h1 = boundingbox[3]
            ix = int((boundingbox[0]) + (boundingbox[2]) / 2)
            iy = int((boundingbox[1]) + (boundingbox[3]) / 2)
            ht = boundingbox[3]
            wt = boundingbox[2]
            #GH$
            '''
            D = 2*( 355.2/ wt)  # meter,qianhou
            x_cen = 640
            xd = ix - x_cen
            # geo similarity
            X = D / 887.9 * xd  # meter,zuoyou
            '''
            #xiaomi

            D = (1978.9 / ht)  # meter,qianhou
            x_cen = 703
            xd0 = ix0 - x_cen
            # geo similarity
            X = xd0 * (1.52 / ht)  # meter,zuoyou
            '''
            D = (3006.5/ht)  # meter,qianhou
            x_cen = 1361.8
            xd0 = ix0 - x_cen
            # geo similarity
            X = xd0*(1.75/ht)
            '''
            #K = D/(D+60)

            #if ():
            #else:
            # bx2[0] = int(x+w1/20)
            # bx2[1] = int(y-h1/20)
            #bx2[2] = int(w)
            #bx2[3] = int(h)
            td = time()
            dt = td - t0
            #print(dt)  # Time step between Filters steps
            F = np.matrix([[1.0, 0.0, dt, 0.0], [0.0, 1.0, 0.0, dt],
                           [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
            dt_2 = dt * dt
            dt_3 = dt_2 * dt
            dt_4 = dt_3 * dt
            Q = np.matrix(
                [[0.25 * dt_4 * noise_ax, 0, 0.5 * dt_3 * noise_ax, 0],
                 [0, 0.25 * dt_4 * noise_ay, 0, 0.25 * dt_3 * noise_ay],
                 [dt_3 / 2 * noise_ax, 0, dt_2 * noise_ax, 0],
                 [0, dt_3 / 2 * noise_ay, 0, dt_2 * noise_ay]])
            dX = X - state[0][0]
            dD = D - state[1][0]
            dVx = int(dX / dt)
            dVd = int(dD / dt)
            dX2 = cx - state_2D[0][0]
            dD2 = cy - state_2D[1][0]
            dVx2 = int(dX2 / dt)
            dVd2 = int(dD2 / dt)
            state = F * state  # Project the state ahead
            state_2D = F * state_2D
            P = F * P * F.T + Q  # Project the error covariance ahead
            # Measurement Update (Correction)
            # ==============================
            S = H * P * H.T + R
            K = (P * H.T) * np.linalg.pinv(S)
            # Update the estimate via z
            Z = ([[float('%.2f' % dVx)], [float('%.2f' % dVd)]]
                 )  # 本身应该用速度传感器测量真值的,但是由于没有这个传感器所以只能通过视频中算出来的X,D除去对应的dt计算。
            Z2 = ([[float('%.2f' % dVx2)], [float('%.2f' % dVd2)]])
            ykf = Z - (H * state)
            y2kf = Z2 - (H * state_2D)
            state = state + (K * ykf)
            state_2D = state_2D + (K * y2kf)
            # update the error convariance
            P = (I - (K * H)) * P
            #draw the picture and give out the info of interested obj
            #print(str(int(state_2D[2][0])))
            #print(str(int(state_2D[3][0])))
            if (abs(int(state_2D[2][0])) - abs(int(state_2D[3][0])) > 29):
                if (int(state_2D[2][0]) > 0):
                    #right
                    bx2[0] = int(x + w / 15)
                    bx2[1] = int(y - h / 15)
                    bx2[2] = int(w)
                    bx2[3] = int(h)

                    cv2.rectangle(frame, (x1, y1), (x1 + w1, y1 + h1),
                                  (0, 0, 255), 2)
                    cv2.rectangle(frame, (bx2[0], bx2[1]),
                                  (bx2[0] + bx2[2], bx2[1] + bx2[3]),
                                  (0, 0, 255), 2)

                    cv2.line(frame, (x1, y1), (bx2[0], bx2[1]), (255, 0, 255),
                             2)
                    cv2.line(frame, (x1 + w1, y1), (bx2[0] + bx2[2], bx2[1]),
                             (255, 0, 255), 2)
                    cv2.line(frame, (x1, y1 + h1), (bx2[0], bx2[1] + bx2[3]),
                             (255, 0, 255), 2)
                    cv2.line(frame, (x1 + w1, y1 + h1),
                             (bx2[0] + bx2[2], bx2[1] + bx2[3]), (255, 0, 255),
                             2)

                    cv2.line(frame, (x1, y1), (x1 + w1, y1 + h1),
                             (255, 255, 255), 1)
                    cv2.line(frame, (bx2[0], bx2[1]),
                             (bx2[0] + bx2[2], bx2[1] + bx2[3]),
                             (100, 100, 50), 1)
                    cv2.line(frame, (x1 + w1, y1), (x1, y1 + h1),
                             (255, 255, 0), 1)
                    cv2.line(frame, (bx2[0] + bx2[2], bx2[1]),
                             (bx2[0], bx2[1] + bx2[3]), (100, 75, 50), 1)

                    cv2.arrowedLine(frame, (int((x1 + bx2[0]) / 2 + w1),
                                            int((y1 + bx2[1]) / 2 + h1)),
                                    (int((x1 + bx2[0]) / 2 + w1 + 50),
                                     int((y1 + bx2[1]) / 2 + h1)), (0, 0, 255),
                                    3)

                else:
                    #left
                    bx2[0] = int(x + w / 15)
                    bx2[1] = int(y - h / 15)
                    bx2[2] = int(w)
                    bx2[3] = int(h)

                    cv2.rectangle(frame, (x1, y1), (x1 + w1, y1 + h1),
                                  (0, 0, 255), 2)
                    cv2.rectangle(frame, (bx2[0], bx2[1]),
                                  (bx2[0] + bx2[2], bx2[1] + bx2[3]),
                                  (0, 0, 255), 2)

                    cv2.line(frame, (x1, y1), (bx2[0], bx2[1]), (255, 0, 255),
                             2)
                    cv2.line(frame, (x1 + w1, y1), (bx2[0] + bx2[2], bx2[1]),
                             (255, 0, 255), 2)
                    cv2.line(frame, (x1, y1 + h1), (bx2[0], bx2[1] + bx2[3]),
                             (255, 0, 255), 2)
                    cv2.line(frame, (x1 + w1, y1 + h1),
                             (bx2[0] + bx2[2], bx2[1] + bx2[3]), (255, 0, 255),
                             2)

                    cv2.line(frame, (x1, y1), (x1 + w1, y1 + h1),
                             (255, 255, 255), 1)
                    cv2.line(frame, (bx2[0], bx2[1]),
                             (bx2[0] + bx2[2], bx2[1] + bx2[3]),
                             (100, 100, 50), 1)
                    cv2.line(frame, (x1 + w1, y1), (x1, y1 + h1),
                             (255, 255, 0), 1)
                    cv2.line(frame, (bx2[0] + bx2[2], bx2[1]),
                             (bx2[0], bx2[1] + bx2[3]), (100, 75, 50), 1)

                    cv2.arrowedLine(frame, (int(
                        (x1 + bx2[0]) / 2), int((y1 + bx2[1]) / 2 + h1)),
                                    (int((x1 + bx2[0]) / 2) - 50,
                                     int((y1 + bx2[1]) / 2 + h1)), (0, 0, 255),
                                    3)

            else:
                if (int(state_2D[3][0]) < 0):
                    #back
                    bx2[0] = int(x + w / 5)
                    bx2[1] = int(y - h / 5)
                    bx2[2] = int(w)
                    bx2[3] = int(h)

                    cv2.rectangle(frame, (x1, y1), (x1 + w1, y1 + h1),
                                  (0, 0, 255), 2)
                    cv2.rectangle(frame, (bx2[0], bx2[1]),
                                  (bx2[0] + bx2[2], bx2[1] + bx2[3]),
                                  (0, 0, 255), 2)

                    cv2.line(frame, (x1, y1), (bx2[0], bx2[1]), (255, 0, 255),
                             2)
                    cv2.line(frame, (x1 + w1, y1), (bx2[0] + bx2[2], bx2[1]),
                             (255, 0, 255), 2)
                    cv2.line(frame, (x1, y1 + h1), (bx2[0], bx2[1] + bx2[3]),
                             (255, 0, 255), 2)
                    cv2.line(frame, (x1 + w1, y1 + h1),
                             (bx2[0] + bx2[2], bx2[1] + bx2[3]), (255, 0, 255),
                             2)

                    cv2.line(frame, (x1, y1), (x1 + w1, y1 + h1),
                             (255, 255, 255), 1)
                    cv2.line(frame, (bx2[0], bx2[1]),
                             (bx2[0] + bx2[2], bx2[1] + bx2[3]),
                             (100, 100, 50), 1)
                    cv2.line(frame, (x1 + w1, y1), (x1, y1 + h1),
                             (255, 255, 0), 1)
                    cv2.line(frame, (bx2[0] + bx2[2], bx2[1]),
                             (bx2[0], bx2[1] + bx2[3]), (100, 75, 50), 1)

                    cv2.arrowedLine(
                        frame, (int(bx2[0] + bx2[2] / 2), bx2[1] + bx2[3]),
                        (int(bx2[0] + bx2[2] / 2 + 50), bx2[1] + bx2[3] - 50),
                        (0, 0, 255), 3)

                else:
                    #front
                    bx2[0] = int(x + w / 5)
                    bx2[1] = int(y - h / 5)
                    bx2[2] = int(w)
                    bx2[3] = int(h)

                    cv2.rectangle(frame, (x1, y1), (x1 + w1, y1 + h1),
                                  (0, 0, 255), 2)
                    cv2.rectangle(frame, (bx2[0], bx2[1]),
                                  (bx2[0] + bx2[2], bx2[1] + bx2[3]),
                                  (0, 0, 255), 2)

                    cv2.line(frame, (x1, y1), (bx2[0], bx2[1]), (255, 0, 255),
                             2)
                    cv2.line(frame, (x1 + w1, y1), (bx2[0] + bx2[2], bx2[1]),
                             (255, 0, 255), 2)
                    cv2.line(frame, (x1, y1 + h1), (bx2[0], bx2[1] + bx2[3]),
                             (255, 0, 255), 2)
                    cv2.line(frame, (x1 + w1, y1 + h1),
                             (bx2[0] + bx2[2], bx2[1] + bx2[3]), (255, 0, 255),
                             2)

                    cv2.line(frame, (x1, y1), (x1 + w1, y1 + h1),
                             (255, 255, 255), 1)
                    cv2.line(frame, (bx2[0], bx2[1]),
                             (bx2[0] + bx2[2], bx2[1] + bx2[3]),
                             (100, 100, 50), 1)
                    cv2.line(frame, (x1 + w1, y1), (x1, y1 + h1),
                             (255, 255, 0), 1)
                    cv2.line(frame, (bx2[0] + bx2[2], bx2[1]),
                             (bx2[0], bx2[1] + bx2[3]), (100, 75, 50), 1)

                    cv2.arrowedLine(frame, (int(x1 + w1 / 2), y1 + h1),
                                    (int(x1 + w1 / 2 - 50), y1 + h1 + 50),
                                    (0, 0, 255), 3)

            center = (int(cx), int(cy))
            pts.appendleft(center)
            for i in range(1, len(pts)):
                if pts[i - 1] is None or pts[i] is None:
                    continue
                cv2.line(frame, (pts[i - 1]), (pts[i]), (0, 255, 0), 2)
            t1 = time()
            duration = 0.8 * duration + 0.2 * (t1 - t0)
            cv2.putText(frame, 'FPS: ' + str(1 / duration)[:4].strip('.'),
                        (8, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            cv2.putText(
                frame, 'Pedestrian status camera-coor: X,D:' +
                str(state[0][0]) + str(state[1][0]), (8, 40),
                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            cv2.putText(
                frame, 'Pedestrian status camera-coor: Vx,Vd:' +
                str(state[2][0]) + str(state[3][0]), (8, 60),
                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            #print('2D parameter')
            #print(str(state_2D[2][0]))
            #print(str(state_2D[3][0]))
        cv2.imshow('tracking', frame)
        c = cv2.waitKey(20) & 0xFF
        if c == 27 or c == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
예제 #57
0
    def compute_optical_flow(self,
                             frame,
                             color=[0, 0, 255],
                             region=None,
                             ang_vel=[0., 0., 0.],
                             average=False):
        # Zero out the mask
        mask = np.zeros_like(frame)

        # Extract region values
        region_str = region.__str__()
        if region == None:
            region_x = 0
            region_y = 0
            region_width = self.default_size[0]
            region_height = self.default_size[1]
        else:
            region_x = int(region[0])
            region_y = int(region[1])
            region_width = int(region[2])
            region_height = int(region[3])

        # Initialize prev data, if needed
        if not region_str in self.regions_frame_buffer:
            self.regions_frame_buffer[region_str] = FrameBuffer(
                self.buffer_size)
            self.regions_frame_buffer[region_str].fill(
                get_gray(frame, img_type='rgba'))
            # self.p0 = cv2.goodFeaturesToTrack(self.prev_gray, mask = None, **self.feature_params)
            self.regions_p0[region_str] = get_grid([region_x, region_y],
                                                   region_width, region_height,
                                                   self.num_points)

        # Get appropriate data for the region
        p0 = self.regions_p0[region_str]
        prev_gray = self.regions_frame_buffer[region_str].pop()

        # Get gray images
        gray = get_gray(frame, img_type='rgba')

        # Calculate optical flow
        p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, gray, p0, None,
                                               **self.lk_params)

        # Select good points
        good_new = p1[st == 1]
        good_old = p0[st == 1]

        self.regions_frame_buffer[region_str].add_frame(gray)
        # self.p0 = good_new.reshape(-1,1,2)

        # Get static optical flow
        u = (good_new - good_old) / self.dt

        # Cancel out flow from rotational motion
        u_stable = self.remove_rotation(good_old, u, ang_vel)

        scale = 1.0 / self.buffer_size
        if average == True:
            u_stable = np.average(u_stable, 0)
            region_center = np.array(
                [region_x + region_width / 2, region_y + region_height / 2])
            a, b = region_center.ravel()
            c, d = region_center + scale * u_stable
            mask = cv2.arrowedLine(mask, (int(a), int(b)), (int(c), int(d)),
                                   color, 2)
            mask = cv2.rectangle(
                mask, (region_x, region_y),
                (region_x + region_width, region_y + region_height), color)
            frame = cv2.circle(frame, (int(a), int(b)), 3, color, -1)
        else:
            for i, (pt, vec) in enumerate(zip(good_old, u_stable)):
                a, b = pt.ravel()
                c, d = pt + scale * vec
                c, d = int(c), int(d)
                # c,d = old.ravel()
                mask = cv2.arrowedLine(mask, (a, b), (c, d), color, 1)
                frame = cv2.circle(frame, (a, b), 2, color, -1)

        if not self.display_init:
            self.display = cv2.add(frame, mask)
            self.display_init = True
        else:
            self.display = cv2.add(self.display, mask)

        if average == True:
            return u_stable
        else:
            return [u_stable, good_old]
예제 #58
0
import numpy as np
import cv2

# อ่านรูปภาพเข้ามา กำหนดค่า 1 คือค่าสีจะเหมือนค่าเดิมของภาพ, 0 คือ grayscale, -1 คือ ความโปรงแสง
img = cv2.imread("./images/pienza-town-italy-1080x720.jpg", 1)

# Drawing Line เส้นตรง
img = cv2.line(img, (0, 0), (255, 255), (255, 0, 0), 10)

# Drawing Arrow Line ลูกศร
img = cv2.arrowedLine(img, (0, 200), (255, 200), (255, 255, 100), 10)

# Drawing Rectangle สี่เหลี่ยม
img = cv2.rectangle(img, (384, 0), (510, 128), (0, 255, 0), 3)

# Drawing Circle วงกลม
img = cv2.circle(img, (447, 103), 63, (0, 0, 255), 0)

# Drawing Ellipse วงรี
img = cv2.ellipse(img, (256, 256), (100, 50), 0, 0, 180, 255, -1)

# Drawing Polygon รูปหลายเหลี่ยม
pts = np.array([[10, 5], [20, 30], [70, 20], [50, 10]], np.int32)
pts = pts.reshape((-1, 1, 2))
img = cv2.polylines(img, [pts], True, (0, 255, 255))

# Adding Text to Images
font = cv2.FONT_HERSHEY_SIMPLEX
img = cv2.putText(img, 'OpenCV By Noy', (10, 500), font, 4, (255, 255, 255), 2,
                  cv2.LINE_AA)
예제 #59
0
   代表蓝色,第一个是蓝色通道,第二个是绿色通道,第三个是红色通道。对于灰度图只需要传入灰度值。
• thickness 线条的粗细。如果给一个闭合图形 置为 -1  那么这个图形
就会被填充。 默认值是 1.
• linetype 线条的类型, 8 连接,抗锯齿等。  默认情况是8 连接。cv2.LINE_AA
   为抗锯齿  这样看起来会非常平滑。

'''

# Create a black image
img = np.zeros((512, 512, 3), np.uint8)

# Draw a diagonal blue line with thickness of 5 px
cv2.line(img, pt1=(0, 0), pt2=(511, 511), color=(255, 0, 0), thickness=5)  # pt1, pt2, color, thickness=
# cv2.polylines() 可以 用来画很多条线。只需要把想 画的线放在一 个列表中, 将 列表传给函数就可以了。每条线 会被独立绘制。 这会比用 cv2.line() 一条一条的绘制 要快一些。
# cv2.polylines(img, pts, isClosed, color, thickness=None, lineType=None, shift=None)
cv2.arrowedLine(img,pt1=(21, 13), pt2=(151, 401), color=(255, 0, 0), thickness=5)

cv2.rectangle(img, (384, 0), (510, 128), (0, 255, 0), 3)

cv2.circle(img, center=(447, 63), radius=63, color=(0, 0, 255), thickness=-1)  # center, radius, color, thickness=None

# 一个参数是中心点的位置坐标。 下一个参数是长轴和短轴的长度。椭圆沿逆时针方向旋转的角度。
# 椭圆弧演顺时针方向起始的角度和结束角度 如果是 0 很 360 就是整个椭圆
cv2.ellipse(img, center=(256, 256), axes=(100, 50), angle=0, startAngle=0, endAngle=180, color=255,
            thickness=-1)  # center, axes, angle, startAngle, endAngle, color, thickness=

pts = np.array([[10, 5], [20, 30], [70, 20], [50, 10]], np.int32)
pts = pts.reshape((-1, 1, 2))
# 这里 reshape 的第一个参数为-1, 表明这一维的长度是根据后面的维度的计算出来的。
# 注意 如果第三个参数是 False 我们得到的多边形是不闭合的 ,首 尾不相  连 。
                                                    filter=4,
                                                    cThresh=[50, 50],
                                                    draw=False)
        if len(Contours1) != 0:
            for obj in Contours2:
                cv2.polylines(imgContours2, [obj[2]], True, (0, 255, 0), 2)
                newPoints = utils.reorder(obj[2])
                #Divide no of pixels by the scale value
                newWidth = round((utils.findDist(
                    newPoints[0][0] // scale, newPoints[1][0] // scale) / 10),
                                 1)
                newHeight = round((utils.findDist(
                    newPoints[0][0] // scale, newPoints[2][0] // scale) / 10),
                                  1)
                cv2.arrowedLine(imgContours2,
                                (newPoints[0][0][0], newPoints[0][0][1]),
                                (newPoints[1][0][0], newPoints[1][0][1]),
                                (255, 0, 255), 3, 8, 0, 0.05)
                cv2.arrowedLine(imgContours2,
                                (newPoints[0][0][0], newPoints[0][0][1]),
                                (newPoints[2][0][0], newPoints[2][0][1]),
                                (255, 0, 255), 3, 8, 0, 0.05)
                x, y, w, h = obj[3]
                cv2.putText(imgContours2, '{}cm'.format(newWidth),
                            (x + 30, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL,
                            1.5, (255, 0, 255), 2)
                cv2.putText(imgContours2, '{}cm'.format(newHeight),
                            (x - 70, y + h // 2),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (255, 0, 255),
                            2)

        cv2.imshow('A4', imgContours2)