예제 #1
0
def detect_image(integer,lokl,eventol,evento2l,the_queuel,the_final_queuel,threshl,hier_threshl,darknet_pathl, data_filel, cfg_filel, weight_filel):
    # Inputs:
    # =======
    # the_queuel(multiprocessing.Queue) the queue where the images waits to be processed.
    # hier_thresl (float)
    # thresl (float) this parameters define the minimun probability that will be accepted as a detection.
    # darknet_pathl (str) path to darknet folder.
    # data_filel (str) path to data file.
    # cfg_filel (str) path to condiguration file.
    # weight_filel (str) path to weight file.
    # Returns:
    # ========
    # outputs (list) every entry list is a dictionary with keys=['right','left','top','bottom','class','prob'] added to a queue
    # Load YOLO weight
    pyyolo.init(darknet_pathl, data_filel, cfg_filel, weight_filel)#loading darknet in the memory
    #while True:
    while the_queuel.qsize()>0 or eventol.is_set()==False:
        from_queue = the_queuel.get()  #note that every item in queuel is a list with the following form [w, h, c, data,frame_id,img_rgb]
        outpyyolo=pyyolo.detect(from_queue[0], from_queue[1], from_queue[2], from_queue[3], threshl, hier_threshl)
        the_queuel.task_done()
        the_final_queuel.put([outpyyolo,from_queue[4],from_queue[5]])
    pyyolo.cleanup()
    print("tamagno de la cola cuando termino el proceso detector ",integer ," es ", the_queuel.qsize())
    print("El proceso detector",integer,"ve que evento1 termino? ",eventol.is_set())
    if evento2l.is_set()==False:
        evento2l.set()
def end_classification():
    pyyolo.cleanup()


####### USED FOR TESTING ########
# fullpath = "/home/carlos/vrlserver/videos/raw/cam1/recording_6"
# for item in range(22,23):
#     ir = np.load(fullpath + "/ir_full_vid/ir_frame_" + str(item) + ".npy")
#     depth = np.load(fullpath + "/depth_full_vid/depth_frame_" + str(item) + ".npy")
#     rgb = np.load(fullpath + "/rgb_full_vid/rgb_frame_" + str(item) + ".npy")
#     interaction_detector(rgb,depth,ir)
# end_classification()
예제 #3
0
def detect_yolo_pyyolo(img_arr, url='', classes=constants.hls_yolo_categories):
    # from file
    print('----- test original C using a file')
    hash = hashlib.sha1()
    hash.update(str(time.time()))
    img_filename = hash.hexdigest()[:10] + 'pyyolo.jpg'
    #  img_filename = 'incoming.jpg'
    cv2.imwrite(img_filename, img_arr)

    outputs = pyyolo.test(img_filename, thresh, hier_thresh)
    relevant_bboxes = []
    for output in outputs:
        print(output)
        label = output['class']
        xmin = output['left']
        ymin = output['top']
        xmax = output['right']
        ymax = output['bottom']
        item = {
            'object': label,
            'bbox': [xmin, ymin, xmax, ymax],
            'confidence': '>' + str(thresh)
        }
        #            item = {'object':label,'bbox':[xmin,ymin,xmax,ymax],'confidence':round(float(confidence),3)}

        relevant_bboxes.append(item)

#not sure wht the diff is between second method and first

# camera
# print('----- test python API using a file')
# i = 1
# while i < 2:
#     # ret_val, img = cam.read()
#     img = cv2.imread(filename)
#     img = img.transpose(2,0,1)
#     c, h, w = img.shape[0], img.shape[1], img.shape[2]
#     # print w, h, c
#     data = img.ravel()/255.0
#     data = np.ascontiguousarray(data, dtype=np.float32)
#     outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh)
#     for output in outputs:
#         print(output)
#     i = i + 1
# free model
    pyyolo.cleanup()
    return relevant_bboxes
예제 #4
0
def detection_yolo(datacfg, cfgfile, weightfile, imgfile):
    pyyolo.init(datacfg, cfgfile, weightfile)
    # image
    catograys = {}

    img = cv2.imread(imgfile)

    img = img.transpose(2, 0, 1)
    c, h, w = img.shape[0], img.shape[1], img.shape[2]
    #print c, h, w
    data = img.ravel() / 255.0
    data = np.ascontiguousarray(data, dtype=np.float32)
    outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh)
    #print outputs
    res = convertoutputyolo(outputs, w, h)
    pyyolo.cleanup()
    return res
예제 #5
0
def main(args):
    darknet_path = './darknet'
    datacfg = 'cfg/coco.data'
    cfgfile = 'cfg/yolov2.cfg'
    weightfile = '../yolov2.weights'
    filename = darknet_path + '/data/person.jpg'

    pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
    '''Initializes and cleanup ros node'''
    ir = image_retriever()

    try:
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down ROS Image feature detector module"

    # free model
    pyyolo.cleanup()
예제 #6
0
def main(args):
    rp = rospkg.RosPack()

    darknet_path = '/home/nvidia/pyyolo/darknet'
    datacfg = os.path.join(rp.get_path(NAME), 'data/cfg', 'coco.data')
    cfgfile = os.path.join(rp.get_path(NAME), 'data/cfg', 'yolov2.cfg')
    weightfile = os.path.join(rp.get_path(NAME), 'data', 'yolov2.weights')

    pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
    '''Initializes and cleanup ros node'''
    ir = Detector()

    try:
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down ROS Image feature detector module"

    # free model
    pyyolo.cleanup()
def GigeStreamer(cam_id):
    camera_id = cam_id
    pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
    Aravis.enable_interface(camera_id)  # using arv-fake-gv-camera-0.6
    camera = Aravis.Camera.new(None)
    stream = camera.create_stream(None, None)
    payload = camera.get_payload()

    for i in range(0, 50):
        stream.push_buffer(Aravis.Buffer.new_allocate(payload))

    print("Starting acquisition")
    camera.start_acquisition()
    while True:
        buffer = stream.try_pop_buffer()
        print(buffer)
        if buffer:
            frame = convert(buffer)
            stream.push_buffer(buffer)  #push buffer back into stream
            cv2.imshow("frame", frame)

            # img = cv2.imread(filename)
            img = frame.transpose(2, 0, 1)  # img = img.transpose(2,0,1)
            c, h, w = img.shape[0], img.shape[1], img.shape[2]
            data = img.ravel() / 255.0
            data = np.ascontiguousarray(data, dtype=np.float32)
            # perform_recognition()
            outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh)
            for output in outputs:
                print(output)

            ch = cv2.waitKey(1) & 0xFF
            if ch == 27 or ch == ord('q'):
                break
            elif ch == ord('s'):
                cv2.imwrite("imagename.png", frame)
    camera.stop_acquisition()
    pyyolo.cleanup()
예제 #8
0
파일: example.py 프로젝트: fyang26/pyyolo
#    ret_val = cv2.imwrite(filename,img)
#    print(ret_val)

pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)

# from file
print('----- test original C using a file')
outputs = pyyolo.test(filename, thresh, hier_thresh, 0)
for output in outputs:
    print(output)

# camera
print('----- test python API using a file')
i = 1
while i < 2:
    # ret_val, img = cam.read()
    img = cv2.imread(filename)
    img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)
    img = img.transpose(2, 0, 1)
    c, h, w = img.shape[0], img.shape[1], img.shape[2]
    # print w, h, c
    data = img.ravel() / 255.0
    data = np.ascontiguousarray(data, dtype=np.float32)
    outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh)
    for output in outputs:
        print(output)
    i = i + 1

# free model
pyyolo.cleanup()
예제 #9
0
def clean_up():
    pyyolo.cleanup()
예제 #10
0
파일: example.py 프로젝트: Roboy/vision
cam = cv2.VideoCapture(filename)
ret_val, img = cam.read()
print(ret_val)
if not ret_val:
	sys.exit()
pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
outVideo = cv2.VideoWriter('outputRoboySkyfall.mp4',fourcc, 20.0, (800,533))
 
print('----- test python API using a file')
while True:
	ok,img = cam.read()
	frame = imutils.resize(img, width=800)
	if not img.any():
		sys.exit()
	img = img.transpose(2,0,1)
	c, h, w = img.shape[0], img.shape[1], img.shape[2]
	print w, h, c 
	data = img.ravel()/255.0
	data = np.ascontiguousarray(data, dtype=np.float32)
	outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh)	
	for output in outputs:
		print(output)
		p1 = (output['left'],output['top'])
		p2 = (output['right'],output['bottom'])
		print(p1,p2)
		cv2.rectangle(frame,p1,p2,(0,0,255,10))
		outVideo.write(frame)
# free model
pyyolo.cleanup()
예제 #11
0
 def __del__(self):
     if pyyolo is not None:
         pyyolo.cleanup()
예제 #12
0
    def __del__(self):
        """ Class destructor used for Clean up pyyolo object detector
        """

        print("Cleaning up")
        pyyolo.cleanup()