def red_detect(frame): # オレンジ色を検出し、画像加工を施す。 hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) lower = (0, 230, 150) upper = (30, 255, 255) red = cv2.inRange(hsv, lower, upper) kernal = np.ones((5, 5), "uint8") red = cv2.dilate(red, kernal) res = cv2.bitwise_and(frame, frame, mask=red) (ret, contours, hierarchy) = cv2.findContours( red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) x = 0 y = 0 w = 0 h = 0 for pic, contour in enumerate(contours): area = cv2.contourArea(contour) if (area > 100): x, y, w, h = cv2.boundingRect(contour) frame = cv2.rectangle( frame, (x, y), (x + w, y + h), (0, 0, 255), 2) cv2.putText(frame, "RED color", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255)) cv2.drawMarker(frame, (480, 350), (255, 255, 0), markerType=cv2.MARKER_SQUARE, markerSize=5, thickness=10) cv2.drawMarker(frame, ((x + w//2), (y + h//2)), (255, 255, 0), markerType=cv2.MARKER_SQUARE, markerSize=5, thickness=10) cv2.arrowedLine(frame, (480, 350), ((x + w//2), (y + h//2)), (255, 0, 0), 5) cv2.rectangle(frame, (330, 200), (630, 500), (0, 255, 0), 1) return frame, x, y, w, h # 動画データとピクセル(x,y,z,h)を返す
def main(anchorFrame, targetFrame, outfile="OUTPUT", saveOutput=False, blockSize=8): """ :param anchor: file path of I-Frame or I-Frame :param target: file path of Current Frame or Current Frame :return: image with vectors """ editedFrame = copy.copy(targetFrame) anchorFrame, targetFrame = preprocess( anchorFrame, targetFrame, blockSize) #processes frame or filepath to frame hSegments, wSegments = segmentImage(anchorFrame, blockSize) vectors = blockSearchBody(anchorFrame, targetFrame, blockSize) bcount = 0 for y in range(0, int(hSegments * blockSize), blockSize): for x in range(0, int(wSegments * blockSize), blockSize): if (x, y) != vectors[bcount]: #print((x,y)) #print(vectors[bcount]) p = vectors[bcount] cv2.arrowedLine( editedFrame, (p[0] + int(blockSize / 2), p[1] + int(blockSize / 2)), (x + int(blockSize / 2), y + int(blockSize / 2)), (0, 255, 0), 1) bcount = bcount + 1 return editedFrame
def click_event(event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: cv2.circle(img, (x,y), 3, (0,0,255), -1) # Produce the circle at the clicked points point_coords.append((x,y)) if len(point_coords)>=2: # Producing the line from Point1 to Point2 cv2.arrowedLine(img, (point_coords[-2]), (point_coords[-1]), (0,255,0), 2) cv2.imshow('Window1', img)
def draw_path(image, route, index, stations): paths_color = [ (10, 164, 62), # green (235, 23, 23), # red (8, 117, 191), # blue (254, 205, 0) # yellow ] route_color = paths_color[index] if len(route['route']) > 1: for point_index, station_index in enumerate(route['route'][:-1]): station = stations[station_index] next_station_index = route['route'][point_index + 1] next_station = stations[next_station_index] pt1 = (station['centroid'][0], station['centroid'][1]) pt2 = (next_station['centroid'][0], next_station['centroid'][1]) cv2.arrowedLine(image, pt1, pt2, route_color, 3)
import numpy as np from cv2 import cv2 # img = cv2.imread("Screenshot (123).png", 1) img = np.zeros([512, 512, 3], np.uint8) img = cv2.line(img, (350,350), (700, 350), (90, 15, 50), 10) img = cv2.arrowedLine(img, (700, 350), (1050, 350), (50,15,90), 10) img = cv2.rectangle(img, (350, 355), (700, 700), (150,0,135), -1) img = cv2.circle(img, (500, 500), 100, (150, 0, 135), -1) font = cv2.FONT_HERSHEY_SIMPLEX img = cv2.putText(img, "OpenCV", (500, 350), font, 3, (255, 0, 130), 5, cv2.LINE_AA) cv2.imshow('Screenshot', img) cv2.waitKey(0) cv2.destroyAllWindows()
import numpy as np from cv2 import cv2 image = cv2.imread('lena.jpg', 1) image = cv2.line(image, (0, 0), (255, 255), (0, 0, 255), 2) image = cv2.arrowedLine(image, (0, 0), (255, 255), (0, 255, 255), 2) image = cv2.rectangle(image, (25, 70), (200, 170), (0, 255, 0), 5) image = cv2.circle(image, (447, 63), 63, (255, 0, 0), 5) font = cv2.FONT_HERSHEY_SIMPLEX image = cv2.putText(image, "OpenCV", (10, 500), font, 5, (255, 255, 255), 5, cv2.LINE_AA) cv2.imshow('image', image) cv2.waitKey(0) cv2.destroyAllWindows()
def show_little_map(lm, points_2d, motion, id, armor_color): ''' 显示小地图,并在地图上标记目标 :param points_2d: 待标记的二维点坐标 :return: None ''' Red = 1 Blue = 2 Others = 3 ratio = 0.5 arrow_scale = 10 cur_pic = lm.pic.copy() #基准点 即相机所在位置 offset = (int(1 / 2 * lm.map_width), int(0.98 * lm.map_height)) cv.circle(cur_pic, offset, 7, (0, 255, 0), 3) #小地图的缩放尺寸 width = lm.get_width() * ratio height = lm.get_height() * ratio motion = np.array(motion) # print(width,height) #print("show_little_map : ") for i, point in enumerate(points_2d): # print("points_2d :",i," \n",points_2d) # print("motion : \n",motion[i]) #当前运动趋势 cur_motion = np.array(motion[i] * arrow_scale, dtype=np.int) #当前位置 cur_position = (point[0], point[1]) '''正常的投影变换 需要知道准确的相机高度与相机参数 ,所以先不用 # scale_x = point[0]/(lm.count_width)*(lm.map_width) # scale_y = point[1]/(lm.count_height)*(lm.map_height) # # cur_position = (int( offset[0] + scale_x), int(offset[1]-scale_y)) # print("scale : (",cur_position[0],",",cur_position[1],")") # print("point : (",point) # motion_direction = (point[0]+cur_motion[0] ,point[1]+cur_motion[1]) ''' #运动趋势 motion_direction = (cur_position[0] + cur_motion[0], cur_position[1] + cur_motion[1]) #打印牌号 label = '{}{:d}'.format("", id[i]) cv.putText(cur_pic, label, (cur_position[0] + 10, cur_position[1] + 10), cv.FONT_HERSHEY_PLAIN, 2, [255, 255, 255], 2) color = armor_color[i] if (color == Red): circle_color = (0, 0, 255) elif (color == Blue): circle_color = (255, 0, 0) else: circle_color = (255, 255, 255) #打印所在位置与运动趋势 cv.circle(cur_pic, cur_position, 10, circle_color, 2) cv.arrowedLine(cur_pic, cur_position, motion_direction, (0, 255, 0), 5, 8, 0, 0.3) cur_pic = cv.resize(cur_pic, (int(width), int(height)), interpolation=cv.INTER_AREA) return cur_pic
# Make different shapes on image from cv2 import cv2 img = cv2.imread('lena.jpg', 1) img = cv2.line(img, (0, 0), (160, 160), (0, 255, 0), 10) # Make a line img = cv2.arrowedLine(img, (0, 0), (100, 100), (0, 0, 255), 10) # Make an arrowed line img = cv2.rectangle(img, (180, 180), (400, 400), (255, 0, 0), 7) # Produce a rectangle img = cv2.circle(img, (290, 290), 100, (255, 255, 0), -1) # Produce a circle # Note: If in above thickness=-1 is put then it will cover the whole figure as in case of circle # Writing a text on the image img = cv2.putText(img, "HOTT Sensation!!", (10, 450), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2) cv2.imshow('window1', img) if cv2.waitKey(0) == 27: cv2.destroyAllWindows()
n_frames = file_size // (width * height * 2) prevFrame = None f = open(yuv_filename, 'rb') yuv = np.frombuffer(f.read(width * height * 2), dtype=np.uint8).reshape(height, width, 2) prevFrame = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_YUYV) yuv = np.frombuffer(f.read(width * height * 2), dtype=np.uint8).reshape(height, width, 2) frame = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_YUYV) f.close() bcount = 0 blockSize = 8 for y in range(0, int(90 * 8), 8): for x in range(0, int(160 * 8), 8): if (x, y) != (vectors[bcount][0], vectors[bcount][1]): p = vectors[bcount] cv2.arrowedLine( frame, (p[0] + int(blockSize / 2), p[1] + int(blockSize / 2)), (x + int(blockSize / 2), y + int(blockSize / 2)), (0, 255, 0), 1) bcount = bcount + 1 cv2.imshow("Vektori", frame) cv2.waitKey() # Convert YUV420 to Grayscale #old_gray = cv2.cvtColor(old_yuv, cv2.COLOR_YUV2GRAY_I420) #cv2.imshow('frame_gs',old_gray) #cv2.waitKey()
imgContours2, conts2 = utlis.getContours(imgWarp, minArea=2000, filter=4, cThr=[50, 50], draw=False) if len(conts) != 0: for obj in conts2: cv2.polylines(imgContours2, [obj[2]], True, (0, 255, 0), 2) nPoints = utlis.reorder(obj[2]) nW = round((utlis.findDis(nPoints[0][0] // scale, nPoints[1][0] // scale) / 10), 1) nH = round((utlis.findDis(nPoints[0][0] // scale, nPoints[2][0] // scale) / 10), 1) cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]), (nPoints[1][0][0], nPoints[1][0][1]), (255, 0, 255), 3, 8, 0, 0.05) cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]), (nPoints[2][0][0], nPoints[2][0][1]), (255, 0, 255), 3, 8, 0, 0.05) x, y, w, h = obj[3] cv2.putText(imgContours2, '{}cm'.format(nW), (x + 30, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (255, 0, 255), 2) cv2.putText(imgContours2, '{}cm'.format(nH), (x - 70, y + h // 2), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (255, 0, 255), 2) cv2.imshow('Size Of Image', imgContours2) img = cv2.resize(img, (0, 0), None, 0.5, 0.5)
def draw_detected_objects(frame, detected_objects: List[DetectedObject]): cv2.putText(frame, "Number of Detected Objects: " + str(len(detected_objects)), (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [0, 0, 255], 2) for detected_object in detected_objects: if detected_object.distance.measured: color = [255, 0, 0] elif detected_object.object_type == DetectedObjectType.SquareTimber: color = [255, 0, 255] else: color = [0, 255, 0] # draw bounding box min_point = (int(detected_object.bounding_box.min_x), int(detected_object.bounding_box.min_y)) max_point = (int(detected_object.bounding_box.max_x), int(detected_object.bounding_box.max_y)) cv2.rectangle(frame, min_point, max_point, color, 1) cv2.putText(frame, _determine_object_type_string_representation(detected_object.object_type), (min_point[0], min_point[1] - 45), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) cv2.putText(frame, str(detected_object.distance), (min_point[0], min_point[1] - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) cv2.putText(frame, "Probability [" + str(detected_object.probability) + "]", (min_point[0], min_point[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) for relative_object in detected_object.relative_detected_objects_from_relative_type( RelativeObjectType.IN_FRONT): obj_bbox = detected_object.bounding_box rel_obj_bbox = relative_object.bounding_box start_point = (int(obj_bbox.center_x()), int(obj_bbox.center_y())) end_point = (int(rel_obj_bbox.center_x()), int(rel_obj_bbox.center_y())) frame = cv2.arrowedLine(frame, start_point, end_point, [30, 30, 160], 2) cv2.putText(frame, "IN_FRONT_OF", (end_point[0], end_point[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [0, 0, 255], 2) for relative_object in detected_object.relative_detected_objects_from_relative_type( RelativeObjectType.BEHIND): obj_bbox = detected_object.bounding_box rel_obj_bbox = relative_object.bounding_box start_point = (int(obj_bbox.center_x()), int(obj_bbox.center_y())) end_point = (int(rel_obj_bbox.center_x()), int(rel_obj_bbox.center_y())) frame = cv2.arrowedLine(frame, start_point, end_point, [30, 30, 160], 2) cv2.putText(frame, "BEHIND", (end_point[0], end_point[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [60, 0, 255], 2) for relative_object in detected_object.relative_detected_objects_from_relative_type( RelativeObjectType.RIGHT): obj_bbox = detected_object.bounding_box rel_obj_bbox = relative_object.bounding_box start_point = (int(obj_bbox.max_x), int(obj_bbox.min_y + obj_bbox.height / 4)) end_point = (int(rel_obj_bbox.min_x), int(rel_obj_bbox.min_y + rel_obj_bbox.height / 4)) frame = cv2.arrowedLine(frame, start_point, end_point, [30, 30, 160], 2) cv2.putText(frame, "RIGHT", (end_point[0], end_point[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [0, 0, 255], 2) for relative_object in detected_object.relative_detected_objects_from_relative_type( RelativeObjectType.LEFT): obj_bbox = detected_object.bounding_box rel_obj_bbox = relative_object.bounding_box start_point = (int(obj_bbox.min_x), int(obj_bbox.min_y + obj_bbox.height / 4)) end_point = (int(rel_obj_bbox.max_x), int(rel_obj_bbox.min_y + rel_obj_bbox.height / 4)) frame = cv2.arrowedLine(frame, start_point, end_point, [30, 30, 160], 2) cv2.putText(frame, "LEFT", (end_point[0], end_point[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [0, 0, 255], 2)
def main(): parser = argparse.ArgumentParser( description="combine two images with an arrow in between. Spacing and color is automatically decided and is meant to be nice" ) parser.add_argument( "image_file_1", action="store", type=str, help="the image file in the left" ) parser.add_argument( "image_file_2", action="store", type=str, help="the image file in the right" ) parser.add_argument( "--output", "-o", action="store", required=False, type=str, help="the output image file. (e.g. out.jpg out.png) If omitted, <img1>-<img2>.png will be generated under current directory", ) parser.add_argument( "--scale", "-s", required=False, default=1.0, action="store", type=float, help="the scale of the generated image, 1 for no scaling. 0.5 for half the size, etc", ) argv = parser.parse_args() img1 = cv2.imread(argv.image_file_1, 1) img2 = cv2.imread(argv.image_file_2, 1) s_height = min(img1.shape[0], img2.shape[0]) b_height = max(img1.shape[0], img2.shape[0]) s_width = min(img1.shape[1], img2.shape[1]) b_width = max(img1.shape[1], img2.shape[1]) frame = np.full((b_height, 2 * s_width + b_width, 3), 255, dtype=np.uint8) place_on_top(img1, frame, [(b_height - img1.shape[0]) // 2, 0]) place_on_top( img2, frame, [(b_height - img2.shape[0]) // 2, s_width + img1.shape[1]] ) m1 = cv2.mean(img1) m2 = cv2.mean(img2) mean_color = [] for i in range(3): mean_color.append(int(m1[i] + m2[i]) // 2) cv2.arrowedLine( frame, (img1.shape[1] + s_width // 5, b_height // 2), (img1.shape[1] + s_width - s_width // 5, b_height // 2), mean_color, 8, tipLength=0.6, ) assert argv.scale > 0, "scale has to be a positive float" frame = cv2.resize( frame, (int(frame.shape[1] * argv.scale), int(frame.shape[0] * argv.scale)) ) outname = argv.output if argv.output is not None: try: cv2.imwrite(argv.output, frame) except cv2.error as e: print(e, file=stderr) print("Failed to save the image") print("Did you forget to specify image format to the output file?") else: default_name = ( path.splitext(path.basename(argv.image_file_1))[0] + "-" + path.splitext(path.basename(argv.image_file_2))[0] + ".png" ) outname = default_name cv2.imwrite(default_name, frame) cv2.imshow(outname, frame) while cv2.getWindowProperty(outname, cv2.WND_PROP_VISIBLE) == 1: if cv2.waitKey(50) != -1: break cv2.destroyAllWindows()
import numpy as np from cv2 import cv2 img = cv2.imread('lena.png', -1) img = np.zeros([512, 512, 3], np.uint8) # Imagem preta # Linha img = cv2.line(img, (0, 0), (255, 255), (255, 0, 0), 2) # Seta img = cv2.arrowedLine(img, (0, 255), (255, 255), (0, 255, 0), 2) # Quadrado img = cv2.rectangle(img, (384, 0), (510, 128), (0, 0, 255), 2) # Circulo img = cv2.circle(img, (447, 63), (63), (255, 255, 255), 2) # Texto font = cv2.FONT_HERSHEY_SIMPLEX img = cv2.putText(img, 'Texto', (10, 500), font, 4, (0, 255, 0), 3, cv2.LINE_AA) cv2.imshow('image', img) cv2.waitKey(0) cv2.destroyAllWindows()