def face_detection_image(net, image_name): imgs = cv2.imread(image_name) rows, cols, ch = imgs.shape scales = caculate_scales(imgs) total_boxes = [] for scale in scales: w, h = int(rows * scale), int(cols * scale) scale_img = tf.resize(imgs, (w, h)) # scale_img = cv2.resize(imgs,(w,h))/255.0 net.blobs["data"].reshape(1, channel, w, h) transformer = caffe.io.Transformer({"data": net.blobs["data"].data.shape}) transformer.set_transpose("data", (2, 0, 1)) transformer.set_channel_swap("data", (2, 1, 0)) transformer.set_raw_scale("data", raw_scale) out = net.forward_all(data=np.asarray([transformer.preprocess("data", scale_img)])) boxes = generateBoundingBox(out["prob"][0, map_idx], scale) if boxes: total_boxes.extend(boxes) boxes_nms = np.array(total_boxes) true_boxes = nms_max(boxes_nms, overlapThresh=0.3) true_boxes = nms_average(np.array(true_boxes), overlapThresh=0.07) img_cv = render_result.read_image(image_name) result = img_cv for box in true_boxes: result = render_result.draw_rectangle(result, (int(box[0]), int(box[1]), int(box[2]), int(box[3])), (0, 0, 255)) render_result.save_2_file(result, "result/" + image_name.split("/")[-1]) return true_boxes
def detectfast(self, image, minComponentSize): # check if the image exists if os.path.exists(image): # load the image img = caffe.io.load_image(image) # compute scale scale = math.ceil(227.0 / int(minComponentSize)) # scale the image ims = caffe.io.resize_image(img, (int(img.shape[0]*scale), int(img.shape[1]*scale))) # load the transformer self.transformer = loadTransformer(self.net_full_conv, self.meanfile, (ims.shape[0], ims.shape[1])) # send image for detection out = self.net_full_conv.forward_all(data=np.asarray([self.transformer.preprocess('data', ims)])) # extract the heat map outprob = out['prob'][0,1] # generate bounding boxes boxes = generateBoundingBox(outprob, scale) # convert boxes to np array for nms boxes_nms = np.array(boxes) # perform non-maximum suppression true_boxes = nms_max(boxes_nms, overlapThresh=0.3) # validate the boxes for boundary conditions true_boxes = validateBoxes(true_boxes, [img.shape[0], img.shape[1]]) # cluster the boxes clusters = clusterBoxes(true_boxes) fCluster = getAvgClusterBoxes(clusters) # perform validation again nBoxes = validateBoxes(fCluster, [img.shape[0], img.shape[1]]) # get enclosing boxes nBoxes = enclosingBoxes(nBoxes) return nBoxes else: print("Image not found")
def face_detection_image(net,image_name): imgs = cv2.imread(image_name) rows,cols,ch = imgs.shape scales = caculate_scales(imgs) total_boxes = [] for scale in scales: w,h = int(rows* scale),int(cols* scale) scale_img = tf.resize(imgs,(w,h)) #scale_img = cv2.resize(imgs,(w,h))/255.0 net.blobs['data'].reshape(1,channel,w,h) transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_transpose('data', (2,0,1)) transformer.set_channel_swap('data', (2,1,0)) transformer.set_raw_scale('data', raw_scale) out = net.forward_all(data=np.asarray([transformer.preprocess('data', scale_img)])) boxes = generateBoundingBox(out['prob'][0,map_idx], scale) if(boxes): total_boxes.extend(boxes) boxes_nms = np.array(total_boxes) true_boxes = nms_max(boxes_nms, overlapThresh=0.3) true_boxes = nms_average(np.array(true_boxes), overlapThresh=0.07) img_cv = render_result.read_image(image_name) result = img_cv for box in true_boxes: result = render_result.draw_rectangle(result,(int(box[0]),int(box[1]),int(box[2]),int(box[3])),(0,0,255)) render_result.save_2_file(result,'result/'+image_name.split('/')[-1]) return true_boxes
def face_detection_image(net, net_vf, image_name): ''' @检测单张人脸图像 ''' scales = [] imgs = skimage.io.imread(image_name) if imgs.ndim == 3: rows, cols, ch = imgs.shape else: rows, cols = imgs.shape #计算需要的检测的尺度因子 min = rows if rows <= cols else cols max = rows if rows >= cols else cols # 放大的尺度 delim = 2500 / max while (delim >= 1): scales.append(delim) delim = delim - 0.5 #缩小的尺度 min = min * factor factor_count = 1 while (min >= face_w): scale = pow(factor, factor_count) scales.append(scale) min = min * factor factor_count += 1 #========================= #scales.append(1) total_boxes = [] ###显示热图用 num_scale = len(scales) s1 = int(np.sqrt(num_scale)) + 1 tt = 1 plt.subplot(s1, s1 + 1, tt) plt.axis('off') plt.title("Input Image") im = caffe.io.load_image(image_name) plt.imshow(im) #============ for scale in scales: w, h = int(rows * scale), int(cols * scale) scale_img = tf.resize(imgs, (w, h)) #更改网络输入data图像的大小 net.blobs['data'].reshape(1, channel, w, h) #转换结构 transformer = caffe.io.Transformer( {'data': net.blobs['data'].data.shape}) #transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) transformer.set_transpose('data', (2, 0, 1)) transformer.set_channel_swap('data', (2, 1, 0)) transformer.set_raw_scale('data', raw_scale) #前馈一次 out = net.forward_all( data=np.asarray([transformer.preprocess('data', scale_img)])) ###显示热图用 tt = tt + 1 plt.subplot(s1, s1 + 1, tt) plt.axis('off') plt.title("sacle: " + "%.2f" % scale) plt.imshow(out['prob'][0, map_idx]) #=========== boxes = generateBoundingBox(out['prob'][0, map_idx], scale) if (boxes): total_boxes.extend(boxes) #非极大值抑制 boxes_nms = np.array(total_boxes) true_boxes1 = nms_max(boxes_nms, overlapThresh=0.3) true_boxes = nms_average(np.array(true_boxes1), overlapThresh=0.07) #=================== plt.savefig('heatmap/' + image_name.split('/')[-1]) #在图像中画出检测到的人脸框 fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6)) ax.imshow(imgs) for box in true_boxes: im_crop = im[box[0]:box[2], box[1]:box[3], :] if im_crop.shape[0] == 0 or im_crop.shape[1] == 0: continue if re_verify(net_vf, im_crop) == True: rect = mpatches.Rectangle((box[0], box[1]), box[2] - box[0], box[3] - box[1], fill=False, edgecolor='red', linewidth=1) ax.text(box[0], box[1] + 20, "{0:.3f}".format(box[4]), color='white', fontsize=6) ax.add_patch(rect) plt.savefig('result/' + image_name.split('/')[-1]) plt.close() return out['prob'][0, map_idx]
def detect(self, image, minComponentSize): """Perfoms detection of component (slower) Works with images of smaller size for the detector to process Parameters ---------- image : full path to the image (string) minComponentSize : the size of the least sized component class Returns ------- nBoxes : a list of bounding boxes [x,y,w,h] of size (n x 4) """ # check if the image exists: if os.path.exists(image): # load the image img = caffe.io.load_image(image) # compute scale scale = math.ceil(227.0 / int(minComponentSize)) # scale the image ims = caffe.io.resize_image( img, (int(img.shape[0] * scale), int(img.shape[1] * scale))) # sliding window total_boxes = [] print("Computing bounding boxes") start1 = time.time() for (x, y, imw) in sliding_window(ims, (515, 515), (minComponentSize, minComponentSize)): # pass the image window to the classifier network out = self.net_full_conv.forward_all(data=np.asarray( [self.transformer.preprocess('data', imw)])) # get the probability matrix associated with class 1 (IC here) outprob = out['prob'][0, 1] # generate the bounding boxes based on the heat map boxes = generateBoundingBox(outprob, 1) # convert boxes to np array for nms boxes_nms = np.array(boxes) # perform non-maximum suppression true_boxes = nms_max(boxes_nms, overlapThresh=cfg.overlapThresh) # if there are any boxes returned scale them according to the original image if (np.any(true_boxes)): #plotHeatmap(scaleImg, outprob, "Heat Map") scaled_boxes = copy.deepcopy(true_boxes) for i, box in enumerate(scaled_boxes): scaled_boxes[i][0] += x scaled_boxes[i][1] += y scaled_boxes[i][2] += x scaled_boxes[i][3] += y #drawBoundingBoxes(ims, scaled_boxes, "Scaled Image") for i, box in enumerate(scaled_boxes): true_boxes[i][0] = scaled_boxes[i][0] / scale true_boxes[i][1] = scaled_boxes[i][1] / scale true_boxes[i][2] = scaled_boxes[i][2] / scale true_boxes[i][3] = scaled_boxes[i][3] / scale total_boxes.extend(true_boxes) end1 = time.time() #print "Total time take for this image: " % (end1-start1) print("Performing Non-Maxima Suppression") # perform nms for the entire set of boxes boxes_nms = np.array(total_boxes) true_boxes = nms_max(boxes_nms, overlapThresh=0.2) print("Clustering bounding boxes") # Cluster the overlapping bounding boxes finalBoxes = ClusterBoundingBoxes(true_boxes) #clusters = clusterBoxes(true_boxes) #fCluster = getAvgClusterBoxes(clusters) #print("Finding enclosing boxes") # group enclosed boxes #finalBoxes = enclosingBoxes(fCluster) return finalBoxes #draw the Bounding boxes on top of the image #drawBoundingBoxes(im, finalBoxes, "Final Image") else: print("Image not found")
def face_detection_image(net, net_vf, info): ''' @检测单张人脸图像 ''' image_name = '/home/mjd/FaceDetection_CNN/aflw/data/' + info[1] scales = [] imgs = skimage.io.imread(image_name) if imgs.ndim == 3: rows, cols, ch = imgs.shape elif imgs.ndim == 0: #09437 channel is 0 return 0 else: rows, cols = imgs.shape #grey image imgs = skimage.color.gray2rgb(imgs) #计算需要的检测的尺度因子 min = rows if rows <= cols else cols max = rows if rows >= cols else cols # 放大的尺度 delim = 2500 / max while (delim >= 1): scales.append(delim) delim = delim - 0.5 #缩小的尺度 min = min * factor factor_count = 1 while (min >= face_w): scale = pow(factor, factor_count) scales.append(scale) min = min * factor factor_count += 1 #========================= #scales.append(1) total_boxes = [] ###显示热图用 num_scale = len(scales) s1 = int(np.sqrt(num_scale)) + 1 tt = 1 plt.subplot(s1, s1 + 1, tt) plt.axis('off') plt.title("Input Image") im = caffe.io.load_image(image_name) plt.imshow(im) #============ for scale in scales: w, h = int(rows * scale), int(cols * scale) scale_img = tf.resize(imgs, (w, h)) #更改网络输入data图像的大小 net.blobs['data'].reshape(1, channel, w, h) #转换结构 transformer = caffe.io.Transformer( {'data': net.blobs['data'].data.shape}) #transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) transformer.set_transpose('data', (2, 0, 1)) transformer.set_channel_swap('data', (2, 1, 0)) transformer.set_raw_scale('data', raw_scale) #前馈一次 out = net.forward_all( data=np.asarray([transformer.preprocess('data', scale_img)])) ###显示热图用 tt = tt + 1 plt.subplot(s1, s1 + 1, tt) plt.axis('off') plt.title("sacle: " + "%.2f" % scale) heatmap = plt.imshow(out['prob'][0, map_idx]) plt.colorbar(heatmap) #添加颜色指示条 #=========== boxes = generateBoundingBox(out['prob'][0, map_idx], scale) if (boxes): total_boxes.extend(boxes) #非极大值抑制 boxes_nms = np.array(total_boxes) true_boxes1 = nms_max(boxes_nms, overlapThresh=0.2) #paras according to experiment true_boxes = nms_average(np.array(true_boxes1), overlapThresh=0.03) #=================== plt.savefig('heatmap/' + image_name.split('/')[-1]) #在图像中画出检测到的人脸框 fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6)) ax.imshow(imgs) #draw groundtruth rect rect = mpatches.Rectangle((float(info[2]), float(info[3])), float(info[4]), float(info[5]), fill=False, edgecolor='blue', linewidth=1) ax.add_patch(rect) #count correct detection boxes result = [] box_num = 0 correct_num = 0 for box in true_boxes: #check if detected rect matches groundtruth xx1 = np.maximum(float(info[2]), box[0]) yy1 = np.maximum(float(info[3]), box[1]) xx2 = np.minimum(float(info[2]) + float(info[4]), box[2]) yy2 = np.minimum(float(info[3]) + float(info[5]), box[3]) w = np.maximum(0, xx2 - xx1) h = np.maximum(0, yy2 - yy1) area_box = (box[2] - box[0]) * (box[3] - box[1]) area_gt = float(info[4]) * float(info[5]) overlap = (w * h) / (area_gt + area_box - w * h) box_num = box_num + 1 if overlap > 0.3: correct_num = correct_num + 1 im_crop = im[box[0]:box[2], box[1]:box[3], :] if im_crop.shape[0] == 0 or im_crop.shape[1] == 0: continue if True: #re_verify(net_vf, im_crop) == True: rect = mpatches.Rectangle((box[0], box[1]), box[2] - box[0], box[3] - box[1], fill=False, edgecolor='red', linewidth=1) ax.text(box[0], box[1] + 20, "{0:.3f}".format(box[4]), color='white', fontsize=6) ax.add_patch(rect) plt.savefig('result/' + image_name.split('/')[-1]) plt.close() result.append(box_num) result.append(correct_num) result.append(out['prob'][0, map_idx]) return result
def face_detection_image(net,net_vf,image_name): ''' @检测单张人脸图像 ''' scales = [] imgs = skimage.io.imread(image_name) if imgs.ndim==3: #print imgs.shape rows,cols,ch = imgs.shape else: rows,cols = imgs.shape #计算需要的检测的尺度因子 min_this = rows if rows<=cols else cols max_this = rows if rows>=cols else cols # 放大的尺度 delim = 2500/max_this while (delim >= 1): scales.append(delim) delim=delim-0.5 #缩小的尺度 min_this = min_this * factor factor_count = 1 while(min_this >= face_w): scale = pow(factor, factor_count) scales.append(scale) min_this = min_this * factor factor_count += 1 #========================= #scales.append(1) total_boxes = [] ###显示热图用 num_scale = len(scales) s1=int(np.sqrt(num_scale))+1 tt=1 plt.subplot(s1, s1+1, tt) plt.axis('off') plt.title("Input Image") im=caffe.io.load_image(image_name) plt.imshow(im) #============ for scale in scales: try: w,h = int(rows* scale),int(cols* scale) scale_img= tf.resize(imgs,(w,h)) #print scale_img.shape #更改网络输入data图像的大小 net.blobs['data'].reshape(1,channel,w,h) #转换结构 transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) #transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) transformer.set_transpose('data', (2,0,1)) transformer.set_channel_swap('data', (2,1,0)) transformer.set_raw_scale('data', raw_scale) #前馈一次 out = net.forward_all(data=np.asarray([transformer.preprocess('data', scale_img)])) #print out['prob'][0,0].shape ###显示热图用 tt=tt+1 plt.subplot(s1, s1+1, tt) plt.axis('off') plt.title("sacle: "+ "%.2f" %scale) plt.imshow(out['prob'][0,map_idx]) #print out['prob'][0,0] #=========== boxes = generateBoundingBox(out['prob'][0,map_idx], scale) if(boxes): total_boxes.extend(boxes) except: continue #非极大值抑制 boxes_nms = np.array(total_boxes) true_boxes1 = nms_max(boxes_nms, overlapThresh=0.3) true_boxes = nms_average(np.array(true_boxes1), overlapThresh=0.07) #=================== plt.savefig('/home/test/tangyudi/faceDetection_alfw/heatmap/'+image_name.split('/')[-1]) #在图像中画出检测到的人脸框 fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6)) ax.imshow(imgs) for box in true_boxes: im_crop = im[box[0]:box[2],box[1]:box[3],:] if im_crop.shape[0] == 0 or im_crop.shape[1] == 0: continue #if re_verify(net_vf, im_crop) == True: rect = mpatches.Rectangle((box[0], box[1]), box[2]-box[0], box[3]-box[1], fill=False, edgecolor='red', linewidth=1) ax.text(box[0], box[1]+20,"{0:.3f}".format(box[4]),color='white', fontsize=6) ax.add_patch(rect) plt.savefig('/home/test/tangyudi/faceDetection_alfw/result/'+image_name.split('/')[-1]) plt.close() return out['prob'][0,map_idx]
fig = plt.figure() im2 = plt.imshow(out['prob'][0,1], cmap = plt.get_cmap('jet')) pos = fig.add_axes([0.93, 0.21, 0.02, 0.56]) fig.colorbar(im2, cax=pos) plt.savefig("Results\\Heatmaps\\" + str(name))''' #bounding boxes around the predicted faces locations bboxes = makeBoundingBoxes(out['prob'][0, 1], sf) if (bboxes): total_bboxes.extend(bboxes) #the following scheme is : NMS_AVERAGE scheme. Perform greater than NMS_MAX scheme. all_bboxes = np.array(total_bboxes) preserved_bboxes = nms_max(all_bboxes, OVERLAPPING_THRESH) preserved_bboxes = nms_average(np.array(preserved_bboxes), CLUSTER_SCORE_THRESH) '''#the following scheme is : NMS_MAX scheme. all_bboxes = np.array(total_bboxes) preserved_bboxes = nms_max(all_bboxes, OVERLAPPING_THRESH)''' #write image name onto opened .txt files resultData.write(thisImgPath + " \n") #using Draw method in Object ImageDraw draw = ImageDraw.Draw(img) '''### box_counter = 0
def face_detection_image(net, net_vf, image_name): ''' @检测单张人脸图像 ''' if _DEBUG != True: pdb.set_trace() scales = [] imgs = skimage.io.imread(image_name) if imgs.ndim == 3: rows, cols, ch = imgs.shape elif imgs.ndim == 0: #09437 channel is 0 return 0 else: rows, cols = imgs.shape #grey image imgs = skimage.color.gray2rgb(imgs) #计算需要的检测的尺度因子 min = rows if rows <= cols else cols max = rows if rows >= cols else cols # 放大的尺度 delim = 2500 / max while (delim >= 1): scales.append(delim) delim = delim - 0.5 #缩小的尺度 min = min * factor factor_count = 1 while (min >= face_w): scale = pow(factor, factor_count) scales.append(scale) min = min * factor factor_count += 1 #========================= #scales.append(1) total_boxes = [] ###显示热图用 num_scale = len(scales) s1 = int(np.sqrt(num_scale)) + 1 tt = 1 plt.subplot(s1, s1 + 1, tt) plt.axis('off') plt.title("Input Image") im = caffe.io.load_image(image_name) plt.imshow(im) #============ for scale in scales: w, h = int(rows * scale), int(cols * scale) scale_img = tf.resize(imgs, (w, h)) #更改网络输入data图像的大小 net.blobs['data'].reshape(1, channel, w, h) #转换结构 transformer = caffe.io.Transformer( {'data': net.blobs['data'].data.shape}) #transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) transformer.set_transpose('data', (2, 0, 1)) # if imgs.ndim==3:#some pictures are in black and white transformer.set_channel_swap('data', (2, 1, 0)) transformer.set_raw_scale('data', raw_scale) #前馈一次 out = net.forward_all( data=np.asarray([transformer.preprocess('data', scale_img)])) ###显示热图用 tt = tt + 1 plt.subplot(s1, s1 + 1, tt) plt.axis('off') plt.title("sacle: " + "%.2f" % scale) heatmap = plt.imshow(out['prob'][0, map_idx]) plt.colorbar(heatmap) #添加颜色指示条 #=========== boxes = generateBoundingBox(out['prob'][0, map_idx], scale) if (boxes): total_boxes.extend(boxes) #非极大值抑制 info = image_name.split('/') info1 = info[8].split('.') boxes_total = np.array(total_boxes) draw_boxes(boxes_total, imgs, im, info1[0] + 'total.jpg') #找出每个热图中最大的若干概率值 # idxs = np.argsort(boxes_total[:,4]) # last = len(idxs) - 1 # i = idxs[last] #写入文件 #fileWriter = open( './result/max_probs.txt', 'a+' ) #fileWriter.write( image_name ) #for i in idxs[ last-10 : last ]: #fileWriter.write( ' ' ) # fileWriter.write( str(np.asscalar(boxes_total[i,4])) ) #fileWriter.write( '\n' ) #fileWriter.close() true_boxes_max = nms_max(boxes_total, 0.2) #draw_boxes( true_boxes_max, imgs, im, info1[0]+'true_boxes_max.jpg') #true_boxes_avg = nms_average(boxes_total, 0.2) #draw_boxes( true_boxes_avg, imgs, im, info1[0]+'true_boxes_avg,jpg') true_boxes_max_avg = nms_average(np.array(true_boxes_max), 0.05) draw_boxes(true_boxes_max_avg, imgs, im, info1[0] + '_1.jpg') #true_boxes_avg_max = nms_max(np.array(true_boxes_avg), 0.07) #draw_boxes( true_boxes_avg_max, imgs, im, info1[0]+'true_boxes_avg_max.jpg') true_boxes_max_avg = nms_average(np.array(true_boxes_max), 0.07) draw_boxes(true_boxes_max_avg, imgs, im, info1[0] + '_2.jpg') true_boxes_max_avg = nms_average(np.array(true_boxes_max), 0.1) draw_boxes(true_boxes_max_avg, imgs, im, info1[0] + '_3.jpg') true_boxes_max = nms_max(boxes_total, 0.3) true_boxes_max_avg = nms_average(np.array(true_boxes_max), 0.05) draw_boxes(true_boxes_max_avg, imgs, im, info1[0] + '_4.jpg') true_boxes_max_avg = nms_average(np.array(true_boxes_max), 0.07) draw_boxes(true_boxes_max_avg, imgs, im, info1[0] + '_5.jpg') true_boxes_max_avg = nms_average(np.array(true_boxes_max), 0.1) draw_boxes(true_boxes_max_avg, imgs, im, info1[0] + '_6.jpg') true_boxes_max = nms_max(boxes_total, 0.4) true_boxes_max_avg = nms_average(np.array(true_boxes_max), 0.05) draw_boxes(true_boxes_max_avg, imgs, im, info1[0] + '_7.jpg') true_boxes_max_avg = nms_average(np.array(true_boxes_max), 0.07) draw_boxes(true_boxes_max_avg, imgs, im, info1[0] + '_8.jpg') true_boxes_max_avg = nms_average(np.array(true_boxes_max), 0.1) draw_boxes(true_boxes_max_avg, imgs, im, info1[0] + '_9.jpg') #=================== #plt.savefig('heatmap/'+image_name.split('/')[-1]) #在图像中画出检测到的人脸框 plt.close() return out['prob'][0, map_idx]