def DispImg(self): img = cv2.cvtColor(self.Image, cv2.COLOR_BGR2RGB) results = dn.detect_np(self.net, self.meta, img) img = dn.chinese_to_img(self.font, img, results) qimg = qimage2ndarray.array2qimage(img) self.DispLb.setPixmap(QPixmap(qimg)) self.DispLb.show()
def detect_object(self,event): ret, frame = self.cap.read() k = cv2.waitKey(1) r = detect_np(self.net, self.meta, frame) result_im = check_detected_result_image(frame, r) cv2.imshow('Image', result_im ) cv2.waitKey(1) #check obstacle flg = False for indx in range(len(r)): #Check detected objects for indx2 in range(len(self.obstacle_label)): #check label name #Check Object Label if r[indx][0] == self.obstacle_label[indx2]: ratio = r[indx][2][2]*r[indx][2][3]/self.imsize if ratio > self.obstacle_ratio: #check object size (ratio per frame size) flg = True break if flg == True: break if flg == True: self.obstacle_cnt = min(self.obstacle_cnt + 1,self.obstacle_cancel_frame_th) else: self.obstacle_cnt = max(self.obstacle_cnt - 1, 0) if self.obstacle_cnt > self.obstacle_frame_th: self.obstacle_flg = True elif self.obstacle_cnt <= 0: self.obstacle_flg = False self.object_detect_publisher.publish(self.obstacle_flg) print(self.obstacle_cnt)
def detect(net,meta,cam_param,im,isshow=False,issave=False,thresh=.3, hier_thresh=.5, nms=.45, name=None, output_dir=None): print "detection =============================" start_t = time.time() r = dn.detect_np(net,meta,im) bdict = {} bdict = allposition(r,cam_param) end_t = time.time() print name, "detection result:",bdict if isshow or issave : image = im.copy() for i in r: # print i sx1,sy1,sx2,sy2=i[2] sx1=sx1-sx2/2 sy1=sy1-sy2/2 cv2.rectangle(image,(int(sx1),int(sy1)),(int(sx1+sx2),int(sy1+sy2)),(0,255,0),3) if (sy1 > 10): cv2.putText(image, i[0]+"-"+str(round(i[1],2)), (int(sx1),int(sy1-6)), cv2.FONT_HERSHEY_COMPLEX_SMALL,0.8, (0, 255, 0) ) else: cv2.putText(image, i[0]+"-"+str(round(i[1],2)), (int(sx1),int(sy1+15)), cv2.FONT_HERSHEY_COMPLEX_SMALL,0.8, (0, 255, 0) ) if isshow: cv2.imshow("pre",image) cv2.waitKey(1) # cv2.imwrite("current.jpg",image) if issave: if name is None: name = 'test' if output_dir is None: output_dir = "./run_pic" t = str(end_t-start_t) cv2.imwrite(os.path.join(output_dir,"pre-"+name+"-"+t+".jpg"),image) return bdict
def detect(cfg_file, weight_file, video_file): dn.set_gpu(0) net = dn.load_net(cfg_file, weight_file, 0) meta = dn.load_meta("cfg/coco.data") print("video: %s" % video_file) cap = cv2.VideoCapture(video_file) video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = int(cap.get(cv2.CAP_PROP_FPS)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) terminate, is_paused = False, False idx = 0 fourcc = cv2.VideoWriter_fourcc(*"mp4v") video_writer = cv2.VideoWriter("gray_processed.mp4", fourcc, fps, (width, height)) while not terminate and cap.isOpened(): idx += 1 print("processing: %.4f%%" % (idx * 100.0 / video_len)) #print("debug") if not is_paused: ret, frame = cap.read() if not ret: break boxes = dn.detect_np(net, meta, frame) new_frame = my_plot(frame.copy(), boxes) video_writer.write(new_frame) """ cv2.imshow('image', new_frame) key = cv2.waitKey(1) if key & 255 == 27: # ESC print("terminating") terminate = True elif key & 255 == 32: # ' ' print("toggeling pause: " + str(not is_paused)) is_paused = not is_paused elif key & 255 == 115: # 's' print("stepping") ret, frame = cap.read() if not ret: break boxes = dn.detect_np(net, meta, frame) is_paused = True """ cap.release() video_writer.release()
def callback(ch, method, properties, body): mdata = pickle.loads(body) print('working on count:', mdata['count']) #time.sleep(20) result = darknet.detect_np(net, meta, mdata['buff']) print(mdata['count'], result) dic = {'bear': 0, 'zebra': 0} for ob in result: ani = ob[0].decode() if ani in dic and dic[ani] < ob[1] * 100: dic[ani] = int(ob[1] * 100) sqlhead = ("INSERT INTO anitag " "(BEAR, ZEBRA) " "VALUES (%(bear)s, %(zebra)s)") print(dic) cur.execute(sqlhead, dic) cnx.commit()
def detect_targets(self, frame): ''' Detects targets in a frame using Darknet. Returns a list of detections and appends detections to a history of detections frame: The 2D image to run detections on. ''' frame_size = (frame.shape[1], frame.shape[0]) frame = cv2.resize(frame, self.net_size) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # For use in debug. If use_temporal is "fake", appends black masks to the # 4th and 5th channels. Otherwise, if use_temporal is True, appends masks # from past frames to the image before passing it through the darknet detector. if self.use_temporal == "fake": augmented_frame = np.stack( (frame[:, :, 0], frame[:, :, 1], frame[:, :, 2], np.zeros((frame.shape[0], frame.shape[1]), dtype="uint8"), np.zeros((frame.shape[0], frame.shape[1]), dtype="uint8")), axis=-1) elif self.use_temporal: augmented_frame = np.stack( (frame[:, :, 0], frame[:, :, 1], frame[:, :, 2], self.make_dets_frame_mask('red', frame.shape), self.make_dets_frame_mask('blue', frame.shape)), axis=-1) #cv2.imshow("Red", augmented_frame[:,:,3]) #cv2.imshow("Blue", augmented_frame[:,:,4]) result = dn.detect_np(self.net, self.meta, augmented_frame if self.use_temporal else frame) self.last_dets.insert(0, [ DetectedTarget(result_tuple, self.net_size, frame_size) for result_tuple in result ]) if len(self.last_dets) > self.PAST_FRAME_NUM: self.last_dets.pop() return self.last_dets[0]
def callback(ch, method, properties, body): ''' Read the frames queued in rabbitmq, run darknet to indentify animals, and save results to mysql databases. ''' mdata = pickle.loads(body) result = darknet.detect_np(net, meta, mdata['buff']) # find the animals with max confid for each animal type in one frame dic = {} for ob in result: ani = ob[0].decode() if ani in aniset: dic[ani] = max(int(ob[1] * 100), dic.get(ani, 0)) if not dic: return # save animal type, timestamp and confid into mysql db for i in dic: cur.execute("INSERT INTO anitag " "(object,timestamp,confid)" "VALUES ('{0}',{1},{2})".format(i, mdata['timestamp'], dic[i])) cnx.commit()
def detect(net,meta,cam_param,im,isshow=True,issave=False,thresh=.3, hier_thresh=.5, nms=.45, name=None): r = dn.detect_np(net,meta,im) bdict = {} bdict = allposition(r,cam_param) if isshow or issave : for i in r: # print i sx1,sy1,sx2,sy2=i[2] sx1=sx1-sx2/2 sy1=sy1-sy2/2 cv2.rectangle(im,(int(sx1),int(sy1)),(int(sx1+sx2),int(sy1+sy2)),(0,255,0),3) if (sy1 > 10): cv2.putText(im, i[0]+"-"+str(round(i[1],2)), (int(sx1),int(sy1-6)), cv2.FONT_HERSHEY_COMPLEX_SMALL,0.8, (0, 255, 0) ) else: cv2.putText(im, i[0]+"-"+str(round(i[1],2)), (int(sx1),int(sy1+15)), cv2.FONT_HERSHEY_COMPLEX_SMALL,0.8, (0, 255, 0) ) if isshow: cv2.imshow("pre",im) cv2.waitKey(1) if issave: if name is None: name = str(time.time()) cv2.imwrite("pre-"+name+".jpg",im) return bdict
# net = Detector(bytes("cfg/yolov3-spp.cfg", encoding="utf-8"), bytes("cfg/yolov3-spp.weights", encoding="utf-8"), 0, bytes("cfg/coco.data",encoding="utf-8")) img_name = args.img_name.encode("utf-8") print('read img_name:', args.img_name) img = cv2.imread(args.img_name) # img2 = Image(img) # img2 = darknet.Image(img2) # r = net.classify(img2) start_time = time.time() # results = net.detect(img2,thresh=.2, hier_thresh=.2, nms=.2) results = darknet.detect_np(net, meta, img, thresh=.3, hier_thresh=.3, nms=.45) end_time = time.time() print('\tElapsed Time:{:.3f} sec'.format(end_time - start_time)) min_car_height = 90 frontcars = [] for i, obj in enumerate(results): cat, score, bounds = obj x, y, w, h = bounds cv2.rectangle(img, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), (255, 0, 0),
w = box[2] h = box[3] left = float(x)-0.5*float(w) top = float(y)-0.5*float(h) right = float(x)+0.5*float(w) bot = float(y) + 0.5*float(h) return (left,top,right,bot,mess,confidence)#(left,top,right,bot) # Darknet ####################### for python3 ########################## net = dn.load_net("detector2/yolov3.cfg".encode("ascii"), "detector2/yolov3.weights".encode("ascii"), 0) meta = dn.load_meta("detector2/coco.data".encode("ascii")) cv2.namedWindow('YOLOV3') img = cv2.imread('dog.jpg') d2_boxes = dn.detect_np(net, meta, img, 0.55, 0.55, 0.45)# net, meta, image, thresh, hier_thresh, nms for b in d2_boxes: boxResults = process_d2box(b) if boxResults is None: continue left, top, right, bot, mess, confidence = boxResults cv2.rectangle(img, (int(left), int(top)), (int(right), int(bot)), (255, 0, 0), 2) cv2.putText(img, str(mess) + ":" + str('%.1f' % (float(confidence) * 100)) + "%", (int(left), int(top) - 8), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1) # hy change:2017/12/18 cv2.imshow('YOLOV3',img) cv2.waitKey() cv2.destroyAllWindows()
def run(self): self.net = dn.load_net(self.cfgFile, self.weightFile, 0) self.meta = dn.load_meta(self.dataFile) def nothing(t): pass cv2.namedWindow("Vision") dataFile = open(self.dataFile, "r") line = dataFile.readline() namesFile = None while line: line = line.replace(" ", "") line = line.split("=") if line[0] == "names": print "Names file:", line namesFile = open(line[1].replace("\n", ""), "r") break line = dataFile.readline() dataFile.close() if namesFile != None: line = namesFile.readline() index = 0 while line: self.detectionDictionary[line.replace("\n", "")] = index index += 1 line = namesFile.readline() namesFile.close() if self.useVideo == False and self.usePicture == False: bus = pc2.BusManager() self.activeCamera = pc2.Camera() self.activeCamera.connect(bus.getCameraFromIndex(0)) self.activeCamera.startCapture() elif self.useVideo == True: self.activeCamera = cv2.VideoCapture(self.videoPath) elif self.usePicture == True: self.sourceImg = cv2.imread(self.picturePath) while self.running == True: if self.useVideo == False and self.usePicture == False: self.image = self.activeCamera.retrieveBuffer() self.image = self.image.convert(pc2.PIXEL_FORMAT.BGR) img = np.array(self.image.getData(), dtype="uint8").reshape( (self.image.getRows(), self.image.getCols(), 3)) img = cv2.flip(img, -1) elif self.useVideo == True: t, img = self.activeCamera.read() else: img = copy.copy(self.sourceImg) #yoloImage = dn.IMAGE() w, h, c = img.shape detections = dn.detect_np(self.net, self.meta, img, thresh=.1, hier_thresh=.1) newDetections = [] self.imgPoints = [] self.srcPoints = [] for detection in detections: fixedClassNumber = self.detectionDictionary[detection[0]] newDetections.append( [fixedClassNumber, detection[1], detection[2]]) if detection[0] in self.torpedoDictionary or detection[ 0] == "Corner": if detection[0] != "Corner": self.imgPoints.append( (detection[2][0], detection[2][1])) self.srcPoints.append(self.torpedoSrcPoints[ self.torpedoDictionary[detection[0]]]) print "Detection", detection[0] if detection[0] == "Top Right Hole": pixelError = 20 x, y, width, height = detection[2] x1, y1, x2, y2 = x + ( .5 * width) + pixelError, y + ( .5 * height) + pixelError, x + ( .5 * width) + pixelError, y + ( .5 * height) + pixelError, if x1 < 0: x1 = 0 if y1 < 0: y1 = 0 if x2 >= w: x2 = w - 1 if y2 >= h: y2 = h - 1 print "Sub img", x1, y1, x2, y2 subImg = img[int(y1):int(y2), int(x1):int(x2)] while True: cv2.imshow("Sub", subImg) if cv2.waitKey(1) == ord("q"): break elif detection[0] == "Corner" and False: print "Found a corner" pixelError = 20 '''cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255,0,0), -1) while True: cv2.imshow("Vision", img) if cv2.waitKey(1) == ord(" "): break''' for secondDet in detections: '''cv2.rectangle(img, (int(secondDet[2][0] - (.5 * secondDet[2][2]) - pixelError ),int(secondDet[2][1] - (.5 * secondDet[2][3]) - pixelError)), (int(secondDet[2][0] - (.5 * secondDet[2][2]) + pixelError),int(secondDet[2][1] - (.5 * secondDet[2][3]) + pixelError)), (255, 0, 0), 3) cv2.rectangle(img, (int(secondDet[2][0]-(.5 * secondDet[2][2])), int(secondDet[2][1]- (.5 * secondDet[2][3]))), (int(secondDet[2][0] + (.5*secondDet[2][2])), int(secondDet[2][1] + (.5*secondDet[2][3]))), (0,0,255)) while True: cv2.imshow("Vision", img) if cv2.waitKey(1) == ord(" "): break''' if secondDet[0] in self.torpedoDictionary: index = None if detection[2][0] >= secondDet[2][0] - ( .5 * secondDet[2][2] ) - pixelError and detection[2][ 0] <= secondDet[2][0] - ( .5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] - ( .5 * secondDet[2][3] ) + pixelError and detection[2][ 1] > secondDet[2][1] - ( .5 * secondDet[2][3]) - pixelError: print "In top left corner of", secondDet[ 0] cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255, 0, 0), -1) index = self.torpedoDictionary[ secondDet[0]] index += 6 elif detection[2][0] >= secondDet[2][0] + ( .5 * secondDet[2][2] ) - pixelError and detection[2][ 0] <= secondDet[2][0] + ( .5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] - ( .5 * secondDet[2][3] ) + pixelError and detection[2][ 1] > secondDet[2][1] - ( .5 * secondDet[2][3]) - pixelError: print "In top right corner of", secondDet[ 0] cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (0, 255, 0), -1) index = self.torpedoDictionary[ secondDet[0]] index += 3 if detection[2][0] >= secondDet[2][0] - ( .5 * secondDet[2][2] ) - pixelError and detection[2][ 0] <= secondDet[2][0] - ( .5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] + ( .5 * secondDet[2][3] ) + pixelError and detection[2][ 1] > secondDet[2][1] + ( .5 * secondDet[2][3]) - pixelError: print "In bottom left corner of", secondDet[ 0] cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (0, 0, 255), -1) index = self.torpedoDictionary[ secondDet[0]] index += 5 elif detection[2][0] >= secondDet[2][0] + ( .5 * secondDet[2][2] ) - pixelError and detection[2][ 0] <= secondDet[2][0] + ( .5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] + ( .5 * secondDet[2][3] ) + pixelError and detection[2][ 1] > secondDet[2][1] + ( .5 * secondDet[2][3]) - pixelError: print "In bottom right corner of", secondDet[ 0] cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255, 0, 255), -1) index = self.torpedoDictionary[ secondDet[0]] index += 4 if index == None: pass else: index += 3 self.srcPoints.append( self.torpedoSrcPoints[index]) self.imgPoints.append( (int(detection[2][0]), int(detection[2][1]))) if len(self.imgPoints) >= 4: print "Solving..." #print self.imgPoints, self.srcPoints rvec, tvec = cv2.solvePnP( np.array(self.srcPoints).astype("float32"), np.array(self.imgPoints).astype("float32"), np.array(self.camMat).astype("float32"), np.zeros( (4, 1)))[-2:] (pose1, jacobian) = cv2.projectPoints( np.array([(0.0, 0.0, 0.1)]), rvec, tvec, np.array(self.camMat).astype("float32"), None) (pose, jacobian) = cv2.projectPoints( np.array([(0, 0, 12.0)]), rvec, tvec, np.array(self.camMat).astype("float32"), None) cv2.line(img, (int(pose1[0][0][0]), int(pose1[0][0][1])), (int(pose[0][0][0]), int(pose[0][0][1])), (255, 0, 0), 2) print "Rvec:", rvec, "\n" print "Tvec:", tvec s = time.time() while time.time() - s < 0: cv2.imshow("Vision", img) k = cv2.waitKey(1) if k == ord(" "): break for detection in detections: loc = detection[2] cv2.rectangle( img, (int(loc[0] - (.5 * loc[2])), int(loc[1] - (.5 * loc[3]))), (int(loc[0] + (.5 * loc[2])), int(loc[1] + (.5 * loc[3]))), (0, 0, 255)) self.getList.append(newDetections) cv2.imshow("Vision", img) key = cv2.waitKey(1) if key == ord('q'): self.running = False self.activeCamera.stopCapture()
def run(self): if not onLinux: return self.net = dn.load_net(self.cfgFile, self.weightFile, 0) self.meta = dn.load_meta(self.dataFile) dataFile = open(self.dataFile, "r") line = dataFile.readline() namesFile = None while line: line = line.replace(" ", "") line = line.split("=") if line[0] == "names": print "Names file:", line namesFile = open(line[1].replace("\n", ""), "r") break line = dataFile.readline() dataFile.close() if namesFile != None: line = namesFile.readline() index = 0 while line: self.detectionDictionary[line.replace("\n", "")] = index index += 1 line = namesFile.readline() namesFile.close() if useVideo == True: video = cv2.VideoCapture(videoFile) else: bus = pc2.BusManager() self.activeCamera = pc2.Camera() self.frontCamera = pc2.Camera() self.botCamera = pc2.Camera() self.frontCamera.connect(bus.getCameraFromIndex(1)) self.frontCamera.startCapture() time.sleep(1) self.botCamera.connect(bus.getCameraFromIndex(0)) self.botCamera.startCapture() self.activeCamera = self.frontCamera while self.running == True: if self.useVideo == False: self.image = self.activeCamera.retrieveBuffer() self.image = self.image.convert(pc2.PIXEL_FORMAT.BGR) img = np.array(self.image.getData(), dtype="uint8").reshape((self.image.getRows(), self.image.getCols(), 3)) img = cv2.flip(img, -1) else: t, img = self.activeCamera.read() #yoloImage = dn.IMAGE() detections = dn.detect_np(self.net, self.meta, img, thresh=.1, hier_thresh = .1) newDetections = [] self.imgPoints = [] self.srcPoints = [] for detection in detections: fixedClassNumber = self.detectionDictionary[detection[0]] cv2.putText(img, detection[0], (int(detection[2][0] - (.5 * detection[2][2])), int(detection[2][1] - (.5 * detection[2][3]))), cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,255), 2) newDetections.append([fixedClassNumber, detection[1], detection[2]]) if detection[0] in self.torpedoDictionary or detection[0] == "Corner": if detection[0] != "Corner": pass self.imgPoints.append((detection[2][0], detection[2][1])) self.srcPoints.append(self.torpedoSrcPoints[self.torpedoDictionary[detection[0]]]) elif detection[0] == "Corner": pixelError = 20 '''cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255,0,0), -1) while True: cv2.imshow("Vision", img) if cv2.waitKey(1) == ord(" "): break''' for secondDet in detections: '''cv2.rectangle(img, (int(secondDet[2][0] - (.5 * secondDet[2][2]) - pixelError ),int(secondDet[2][1] - (.5 * secondDet[2][3]) - pixelError)), (int(secondDet[2][0] - (.5 * secondDet[2][2]) + pixelError),int(secondDet[2][1] - (.5 * secondDet[2][3]) + pixelError)), (255, 0, 0), 3) cv2.rectangle(img, (int(secondDet[2][0]-(.5 * secondDet[2][2])), int(secondDet[2][1]- (.5 * secondDet[2][3]))), (int(secondDet[2][0] + (.5*secondDet[2][2])), int(secondDet[2][1] + (.5*secondDet[2][3]))), (0,0,255)) while True: cv2.imshow("Vision", img) if cv2.waitKey(1) == ord(" "): break''' if secondDet[0] in self.torpedoDictionary: index = None if detection[2][0] >= secondDet[2][0] - (.5 * secondDet[2][2]) - pixelError and detection[2][0] <= secondDet[2][0] - (.5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] - (.5 * secondDet[2][3]) + pixelError and detection[2][1] > secondDet[2][1] - (.5 * secondDet[2][3]) - pixelError: cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255,0,0), -1) index = self.torpedoDictionary[secondDet[0]] index += 9 elif detection[2][0] >= secondDet[2][0] + (.5 * secondDet[2][2]) - pixelError and detection[2][0] <= secondDet[2][0] + (.5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] - (.5 * secondDet[2][3]) + pixelError and detection[2][1] > secondDet[2][1] - (.5 * secondDet[2][3]) - pixelError: cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (0,255,0), -1) index = self.torpedoDictionary[secondDet[0]] index += 6 if detection[2][0] >= secondDet[2][0] - (.5 * secondDet[2][2]) - pixelError and detection[2][0] <= secondDet[2][0] - (.5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] + (.5 * secondDet[2][3]) + pixelError and detection[2][1] > secondDet[2][1] + (.5 * secondDet[2][3]) - pixelError: cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (0,0,255), -1) index = self.torpedoDictionary[secondDet[0]] index += 8 elif detection[2][0] >= secondDet[2][0] + (.5 * secondDet[2][2]) - pixelError and detection[2][0] <= secondDet[2][0] + (.5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] + (.5 * secondDet[2][3]) + pixelError and detection[2][1] > secondDet[2][1] + (.5 * secondDet[2][3]) - pixelError: cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255,0,255), -1) index = self.torpedoDictionary[secondDet[0]] index += 7 if index == None: pass else: self.srcPoints.append(self.torpedoSrcPoints[index]) self.imgPoints.append((int(detection[2][0]), int(detection[2][1]))) if len(self.imgPoints) >= 4: #print self.imgPoints, self.srcPoints rvec, tvec = cv2.solvePnP(np.array(self.srcPoints).astype("float32"), np.array(self.imgPoints).astype("float32"), np.array(self.camMat).astype("float32"), np.zeros((4,1)))[-2:] (pose1, jacobian) = cv2.projectPoints(np.array([(0.0,0.0,0.1)]), rvec, tvec, np.array(self.camMat).astype("float32"), None) (pose, jacobian) = cv2.projectPoints(np.array([(0,0,12.0)]), rvec, tvec, np.array(self.camMat).astype("float32"), None) cv2.line(img, (int(pose1[0][0][0]), int(pose1[0][0][1])), (int(pose[0][0][0]), int(pose[0][0][1])), (255,0,0), 2) for detection in detections: loc = detection[2] cv2.rectangle(img, (int(loc[0]-(.5 * loc[2])), int(loc[1]- (.5 * loc[3]))), (int(loc[0] + (.5*loc[2])), int(loc[1] + (.5*loc[3]))), (0,0,255)) self.getList.append(newDetections) cv2.imshow("Vision", img) self.activeCamera.stopCapture()
dn.set_gpu(0) net = dn.load_net("6_15_test.cfg", "6_15_test_40000.weights", 0) meta = dn.load_meta("test.data") bus = pc2.BusManager() cam = pc2.Camera() cam.connect(bus.getCameraFromIndex(0)) cam.startCapture() startTime = time.time() while True: print "Time was", time.time() - startTime startTime = time.time() image = cam.retrieveBuffer() image = image.convert(pc2.PIXEL_FORMAT.BGR) img = np.array(image.getData(), dtype="uint8").reshape( (image.getRows(), image.getCols(), 3)) yoloImage = dn.IMAGE() detections = dn.detect_np(net, meta, img) for detection in detections: loc = detection[2] cv2.rectangle( img, (int(loc[0] - (.5 * loc[2])), int(loc[1] - (.5 * loc[3]))), (int(loc[0] + (.5 * loc[2])), int(loc[1] + (.5 * loc[3]))), (0, 0, 255)) cv2.imshow("Test", img) key = cv2.waitKey(1) if key == ord("q"): break