def of_dataset(folder="testset", model=None, view=False): '''measure the error across the given dataset, it compares the measured points with the annotated ground truth, optionally you can [view] the results''' assert (model) # load face and landmark detectors utils.load_shape_predictor(model) # utils.init_face_detector(True, 150) # init average-error err = 0 num = 0 for img, lmarks, path in utils.ibug_dataset(folder): # detections face = utils.prominent_face(utils.detect_faces(img, detector="dlib")) measured = utils.detect_landmarks(img, face) # get error num += 1 err += normalized_root_mean_square(lmarks, measured) # results: if view is True: utils.draw_rect(img, face, color=Colors.yellow) utils.draw_points(img, lmarks, color=Colors.green) utils.draw_points(img, measured, color=Colors.red) utils.show_image(utils.show_properly(utils.crop_image(img, face))) print(err, num, err / num) print("average NRMS Error for {} is {}".format(folder, err / num))
def draw(self): if self.should_render: if self.images is not None: utils.draw_image(self.images[self.image_state], self.x, self.y) else: utils.draw_rect(self.color, self.x, self.y, self.width, self.height)
def draw(self): super().draw() self.cook_meter.draw() self.batter_selector.draw() if self.selected_batter is not constants.DUMP_BATTER: utils.draw_rect(colors.black, self.batter_selector.x-constants.BATTER_OUTLINE_WIDTH, self.batter_selector.y+constants.BATTER_ICON_SIZE*BATTER_NAMES.index(self.selected_batter)-constants.BATTER_OUTLINE_WIDTH, constants.BATTER_ICON_SIZE+constants.BATTER_OUTLINE_WIDTH*2, constants.BATTER_ICON_SIZE+constants.BATTER_OUTLINE_WIDTH*2, outline_width=constants.BATTER_OUTLINE_WIDTH)
def draw_face(image): '''save the face and draw the face-rec''' global boxes # detect faces faces = utils.detect_faces(image) if len(faces) > 0: face = faces[0] utils.draw_rect(image, face) boxes.append(face)
def average_hardware(self): """With the scene already drawn to the main sample FBO, use OpenGL to get an average of the values of every pixel. """ # Generate a mipmap of the sample buffer - the 4x4 level will contain # the information we need to work out the incident. glBindTexture(GL_TEXTURE_2D, self.sample_tex.id) glGenerateMipmapEXT(GL_TEXTURE_2D) # Draw the sample buffer to a 4x4 rect on the other buffer. Mip-mapping # will mean it contains the average of the full sample buffer. glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, self.sample_fbo_b) glMatrixMode(GL_PROJECTION) glLoadIdentity() glMatrixMode(GL_MODELVIEW) glLoadIdentity() glViewport(0, 0, 4, 4) glOrtho(0.0, 1.0, 0.0, 1.0, -1.0, 1.0) glBindTexture(GL_TEXTURE_2D, self.sample_tex.id) utils.draw_rect() # The target texture now contains a tiny 4x4 hemicube in the corner. # Read the values back. pixel_data = (GLubyte * (4 * 4 * 4))(0) glReadPixels(0, 0, 4, 4, GL_RGBA, GL_UNSIGNED_BYTE, pixel_data) # Reset the state glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0) # Average the RGB values for the cubemap (so ignore the corner pixels) red_value = 0 green_value = 0 blue_value = 0 for y in xrange(4): for x in xrange(4): if y in (0, 3) and x in (0, 3): # Ignore corner pixels continue pixel_index = y * 4 + x pixel_index *= 4 # 4 channels red_value += pixel_data[pixel_index] green_value += pixel_data[pixel_index + 1] blue_value += pixel_data[pixel_index + 2] # We've sampled 12 pixels. Divide to 12 to get the mean, and by 255 # to normalise. red_value /= 12.0 * 255.0 green_value /= 12.0 * 255.0 blue_value /= 12.0 * 255.0 return (red_value, green_value, blue_value)
image = cv2.imread(img_file,0) if image is None: print('No Image found') break roi_new, roi_points_new = ut.lk_wrapper_v2(image, template, roi, roi_points) #templ_out = cv2.cvtColor(template, cv2.COLOR_GRAY2BGR) #templ_out = cv2.rectangle(templ_out, tuple(roi[0]), tuple(roi[1]), (0,250, 0), 2, cv2.LINE_AA) #templ_out = draw_rect(templ_out, roi_points) image_out = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) image_out = cv2.rectangle(image_out, tuple(roi_new[0]), tuple(roi_new[1]), (0,250, 0), 2, cv2.LINE_AA) image_out = ut.draw_rect(image_out, roi_points_new) plt.imshow(image_out) plt.show() frame += 1 frame_str = str(frame).zfill(4) roi = roi_new roi_points = roi_points_new #roi_points = rect_corners(roi_new) template = image.copy()
import cv2 import os from time import sleep import utils as ut video = cv2.VideoCapture(0) while True: ret, img = video.read() faces, gray_img = ut.face_detection(img) for face in faces: (x, y, w, h) = face print("Face identificada!\nPosicao: ({}, {})\tTamanho: ({}px, {}px)\n". format(x, y, w, h)) ut.draw_rect(img, face) ut.put_text(img, "Pessoa", x, y) resized_img = cv2.resize(img, (1000, 700)) cv2.imshow('Video', resized_img) if cv2.waitKey(1) & 0xFF == ord('q'): break video.release() cv2.destroyAllWindows()
def draw(self): super().draw() if self.cook_time < constants.MAX_COOK_TIME: utils.draw_rect(colors.gray, self.x+1, self.y+self.height-2, self.width-2,-(self.height-int(( (self.cook_time+1)/constants.MAX_COOK_TIME)*self.height))) utils.draw_small_text(str(self.cook_time),self.x,self.y)
# sentense = '一二手房车' sentense = '新年快乐' num = len(sentense) width = 100 # 一个字的宽度 100*100 # all_width = width * num # 总长度 center = (20, 0) # counter = 0 for i, word in enumerate(sentense): # turtle.clear() # x_pos = width * (i - num / 2) x_pos = 0 + center[0] y_pos = width * (i - num / 2 + 1) + center[1] # draw_rect(x=x_pos, y=0, width=width) draw_rect(x=x_pos, y=y_pos, width=width) print('----------', word) strokes = data[word] for st in strokes: # 需要把字扭转方向,映射 print('--') for p1, p2 in zip(st[0:-1], st[1:]): # x1 = x_pos + p1['x'] / 10 # y1 = y_pos - p1['y'] / 10 x1 = x_pos + p1['y'] / 10 y1 = y_pos + p1['x'] / 10 - width # x2 = x_pos + p2['x'] / 10 # y2 = y_pos - p2['y'] / 10 x2 = x_pos + p2['y'] / 10
def sample(self, position, heading, pitch): """Return the RGB value of the incident light at the given position. Renders the scene to a cubemap and gets the average of the pixels. """ # Bind the main, full-size FBO glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, self.sample_fbo) for buffer_id in GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT: glClear(buffer_id) # Draw each face of the cube map for setup in self.view_setups: # Setup matrix glViewport(*setup["viewport"]) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(90.0, 1.0, 0.001, 100.0) glMatrixMode(GL_MODELVIEW) glLoadIdentity() glEnable(GL_DEPTH_TEST) glRotatef(90.0, 0.0, 1.0, 0.0) glRotatef(-90.0, 1.0, 0.0, 0.0) glRotatef(utils.rad_to_deg(pitch) + setup["pitch"], 0.0, 1.0, 0.0) glRotatef(utils.rad_to_deg(heading) + setup["heading"], 0.0, 0.0, -1.0) glTranslatef(-position[0], -position[1], -position[2]) # Draw the scene self.render_func() # Draw multiplier map on top. First, set the matrix glMatrixMode(GL_PROJECTION) glLoadIdentity() glMatrixMode(GL_MODELVIEW) glLoadIdentity() glViewport(0, 0, self.sample_size, self.sample_size) glOrtho(0.0, 1.0, 0.0, 1.0, -1.0, 1.0) # Setup the state glDisable(GL_DEPTH_TEST) glColor4f(1.0, 1.0, 1.0, 1.0) glEnable(GL_BLEND) glBlendFunc(GL_ZERO, GL_SRC_COLOR) glEnable(GL_TEXTURE_2D) glBindTexture(GL_TEXTURE_2D, self.multiplier_map.id) # Draw the map utils.draw_rect() # Reset the state glDisable(GL_BLEND) glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0) # Get the average value of all the pixels in the sample if self.average_method == HARDWARE: sample_average = self.average_hardware() elif self.average_method == SOFTWARE: sample_average = self.average_software() else: raise ValueError("Unknown sample method %s" % self.average_method) # Divide by a constant otherwise the compensation map won't give you # the full range. TODO: generate the constant incident_light = [val / 0.40751633986928104 for val in sample_average] return incident_light
def display_seg(image_path, gt_bbox): # fig, axes = plt.subplots(2, 3, figsize=(8, 6), frameon=False) # xs=[0.25,0.8] # bbox_agg = set() # for ax, x in zip(axes[:,0], xs): # bboxes, img = selective_search_bboxwh(image_path, sigma=x) # for bbox in bboxes: # bbox_agg.add(bbox) # color = np.random.rand(3,) # draw_rect(ax, img, bbox, edgecolor=color, linewidth=1) # ax.plot(bbox[0] + bbox[2]/2, bbox[1] + bbox[3]/2, c=color, marker='o') # ax.set_xlabel(str(len(bboxes))) # ax.set_ylabel(str(x)) # x, y, w, h = gt_bbox[0], gt_bbox[1], gt_bbox[2] - gt_bbox[0], gt_bbox[3] - gt_bbox[1] # draw_rect(ax, img, (x, y, w, h), edgecolor='green', linewidth=2) # xs = [500, 1500] # for ax, x in zip(axes[:,1], xs): # bboxes, img = selective_search_bboxwh(image_path, scale=x) # for bbox in bboxes: # bbox_agg.add(bbox) # color = np.random.rand(3, ) # draw_rect(ax, img, bbox, edgecolor=color, linewidth=1) # ax.plot(bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2, c=color, marker='o') # ax.set_xlabel(str(len(bboxes))) # ax.set_ylabel(str(x)) # x, y, w, h = gt_bbox[0], gt_bbox[1], gt_bbox[2] - gt_bbox[0], gt_bbox[3] - gt_bbox[1] # draw_rect(ax, img, (x, y, w, h), edgecolor='green', linewidth=2) # xs = [2, 25] # for ax, x in zip(axes[:,2], xs): # bboxes, img = selective_search_bboxwh(image_path, rescale=x) # ax.imshow(img) # for bbox in bboxes: # bbox_agg.add(bbox) # color = np.random.rand(3, ) # draw_rect(ax, img, bbox, edgecolor=color, linewidth=1) # ax.plot(bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2, c=color, marker='o') # ax.set_xlabel(str(len(bboxes))) # ax.set_ylabel(str(x)) # x, y, w, h = gt_bbox[0], gt_bbox[1], gt_bbox[2] - gt_bbox[0], gt_bbox[3] - gt_bbox[1] # # draw_rect(ax, img, (x, y, w, h), edgecolor='green', linewidth=2) # image = skimage.io.imread(image_path) # width, height = image.shape[0], image.shape[1] # bbox_agg = set() # [bbox_agg.add(y) for x in [0.25, 0.8] for y in selective_search_bboxwh(image, sigma=x)] # [bbox_agg.add(y) for x in [500, 1500] for y in selective_search_bboxwh(image, scale=x)] # [bbox_agg.add(y) for x in [2, 25] for y in selective_search_bboxwh(image, rescale=x)] # fig2, axes = plt.subplots(2, 4, figsize=(8, 6), frameon=False) # xs = [-0.3,-0.4,-0.5,-0.6] # img1 = Image.open(image_path) # img2 = Image.open(image_path) # for ax1, ax2, z in zip(axes[0],axes[1], xs): # bboxes_clustered = [] # cluster_colors = [] # X = np.array([[bb[0]/width, bb[1]/height, bb[2]/width, bb[3]/height] for bb in bbox_agg]) # bbox_colors = np.zeros((len(X), 3)) # af = AffinityPropagation(preference=z).fit(X) # labels = af.labels_ # for cluster in np.unique(labels): # bboxes_cluster = X[labels == cluster] # cluster_color = np.random.rand(3, ) # bbox_colors[labels == cluster] = cluster_color # km = KMeans(n_clusters=1).fit(bboxes_cluster) # closest, _ = pairwise_distances_argmin_min(km.cluster_centers_, bboxes_cluster) # centroid = km.cluster_centers_[0] # bboxes_clustered.append((centroid[0]*width, centroid[1]*height, centroid[2]*width, centroid[3]*height)) # cluster_colors.append(cluster_color) # # x, y, w, h = gt_bbox[0], gt_bbox[1], gt_bbox[2] - gt_bbox[0], gt_bbox[3] - gt_bbox[1] # bboxA = (x, y, w, h) # count = 0 # draw_rect(ax1, img1, (x, y, w, h), edgecolor='green', linewidth=2) # draw_rect(ax2, img2, (x, y, w, h), edgecolor='green', linewidth=2) # for i, bbox in enumerate(bboxes_clustered): # iou = bb_intersection_over_union(bboxA, bbox) # draw_rect(ax1, img1, bbox, edgecolor='red', linewidth=1,text=str(iou*100)[:2]) # ax1.plot(bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2, c=cluster_colors[i], marker='o') # if iou > 0.5: # count += 1 # draw_rect(ax2, img2, bbox, edgecolor='red', linewidth=1,text=str(iou*100)[:2].lstrip('0')) # ax2.plot(bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2, c=cluster_colors[i], marker='o') # ax1.set_xlabel(str(len(bboxes_clustered))) # ax1.set_ylabel(str(z)) # ax2.set_xlabel(str(count)) # ax2.set_ylabel(str(z)) image = skimage.io.imread(image_path) width, height = image.shape[1], image.shape[0] fig2, axes = plt.subplots(1, 4, figsize=(16, 8), frameon=False) x, y, w, h = gt_bbox[0], gt_bbox[1], gt_bbox[2] - gt_bbox[0], gt_bbox[3] - gt_bbox[1] bboxA = (x, y, w, h) bboxes1 = cluster_bboxes(selective_search_bbox_fast(image, int((width * height) / 50)), width, height, preference=-0.25, fast=True) bboxes2 = cluster_bboxes(selective_search_bbox_fast(image, int((width * height) / 50)), width, height, preference=-0.3, fast=True) bboxes3 = cluster_bboxes(selective_search_bbox_fast(image, int((width * height) / 50)), width, height, preference=-0.35, fast=True) bboxes4 = cluster_bboxes(selective_search_bbox_fast(image, int((width * height) / 50)), width, height, preference=-0.4, fast=True) bb = [bboxes1, bboxes2, bboxes3, bboxes4] for ax, bboex in zip(axes, bb): # cl_bb = cluster_bboxes(bboex, width, height) ious = [] for bbox in bboex: ious.append(bb_intersection_over_union(bboxA, bbox)) draw_rect(ax, image, bbox, edgecolor='red', linewidth=1) ax.plot(bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2, marker='o') draw_rect(ax, image, (x, y, w, h), edgecolor='green', linewidth=2) ii = ['%.1f' % y for y in [x*100 for x in sorted(ious,reverse=True)]] ax.set_xlabel('{}: \n{}'.format(len(bboex), '\n'.join(ii))) plt.show()
100 + center_point[1]) sleep(2) # # sentense = '中央经济工作会议精神出炉' # sentense = '一二手房车' sentense = '新年快乐' num = len(sentense) width = 100 # 一个字的宽度 100*100 # all_width = width * num # 总长度 # counter = 0 for i, word in enumerate(sentense): # turtle.clear() x_pos = width * (i - num / 2) + center_point[0] draw_rect(x=x_pos, y=0 + center_point[1], width=width) print('----------', word) strokes = data[word] for st in strokes: print('--') turtle.goto(x_pos + st[0]['x'] / 10, -st[0]['y'] / 10 + center_point[1]) turtle.down() for po in st: print(x_pos + po['x'], po['y'] + center_point[1]) turtle.goto(x_pos + po['x'] / 10, -po['y'] / 10 + center_point[1]) turtle.up() # counter += 1
def build_trainset_auto(src="dataset", dst="trainset", debug=False): '''build a trainset automatically from an ibug-like dataset, the images are taken from [src] folder and saved to [dst] folder''' utils.init_face_detector(True, 150) qualiy = [int(cv2.IMWRITE_JPEG_QUALITY), 50] # file count for naming count = int(utils.count_files_inside(dst) / 8) for img, lmarks, path in utils.ibug_dataset(src): h, w = img.shape[:2] # crop a bigger region around landmarks region = utils.points_region(lmarks) scaled = region.scale(1.8, 1.8).ensure(w, h) img = utils.crop_image(img, scaled) # detect faces face = utils.prominent_face(utils.detect_faces(img)) # if cnn fails try with dlib if face is None: faces = utils.detect_faces(img, detector="dlib") # ..if dlib fails take the region around landmarks if face is None: face = region.copy() else: face = utils.prominent_face(faces) # edit landmarks according to scaled region lmarks = adjust_landmarks(scaled, lmarks) # augumentations i = 0 for image, landmarks, box in augment_data(img, face, lmarks): i = i + 1 if debug: utils.draw_rect(image, box, color=Colors.yellow) utils.draw_points(image, landmarks, color=Colors.purple) name = f"image{i}" utils.show_image(show_properly(image), window=name) else: # save annotation and image ipath = os.path.join(dst, f"face{count}_{i}.jpg") apath = os.path.join(dst, f"face{count}_{i}.ann") cv2.imwrite(ipath, image, qualiy) Annotation(apath, box.as_list()[:4], landmarks).save() if debug: utils.draw_rect(img, face, color=Colors.red) utils.draw_points(img, lmarks, color=Colors.green) utils.show_image(show_properly(img)) else: # save image and annotation ipath = os.path.join(dst, f"face{count}.jpg") apath = os.path.join(dst, f"face{count}.ann") cv2.imwrite(ipath, img, qualiy) Annotation(apath, face.as_list()[:4], lmarks).save() count = count + 1 # info print("{} processed: {}\r".format(count, ipath))