示例#1
0
def detect_image(integer,lokl,eventol,evento2l,the_queuel,the_final_queuel,threshl,hier_threshl,darknet_pathl, data_filel, cfg_filel, weight_filel):
    # Inputs:
    # =======
    # the_queuel(multiprocessing.Queue) the queue where the images waits to be processed.
    # hier_thresl (float)
    # thresl (float) this parameters define the minimun probability that will be accepted as a detection.
    # darknet_pathl (str) path to darknet folder.
    # data_filel (str) path to data file.
    # cfg_filel (str) path to condiguration file.
    # weight_filel (str) path to weight file.
    # Returns:
    # ========
    # outputs (list) every entry list is a dictionary with keys=['right','left','top','bottom','class','prob'] added to a queue
    # Load YOLO weight
    pyyolo.init(darknet_pathl, data_filel, cfg_filel, weight_filel)#loading darknet in the memory
    #while True:
    while the_queuel.qsize()>0 or eventol.is_set()==False:
        from_queue = the_queuel.get()  #note that every item in queuel is a list with the following form [w, h, c, data,frame_id,img_rgb]
        outpyyolo=pyyolo.detect(from_queue[0], from_queue[1], from_queue[2], from_queue[3], threshl, hier_threshl)
        the_queuel.task_done()
        the_final_queuel.put([outpyyolo,from_queue[4],from_queue[5]])
    pyyolo.cleanup()
    print("tamagno de la cola cuando termino el proceso detector ",integer ," es ", the_queuel.qsize())
    print("El proceso detector",integer,"ve que evento1 termino? ",eventol.is_set())
    if evento2l.is_set()==False:
        evento2l.set()
示例#2
0
 def __init__(self):
     if pyyolo is not None:
         darknet_path = './darknet'
         datacfg = 'cfg/coco.data'
         cfgfile = 'cfg/yolo.cfg'
         weightfile = 'yolo.weights'
         pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
         self.hier_thresh = 0.1
示例#3
0
def detection_yolo(datacfg, cfgfile, weightfile, imgfile):
    pyyolo.init(datacfg, cfgfile, weightfile)
    # image
    catograys = {}

    img = cv2.imread(imgfile)

    img = img.transpose(2, 0, 1)
    c, h, w = img.shape[0], img.shape[1], img.shape[2]
    #print c, h, w
    data = img.ravel() / 255.0
    data = np.ascontiguousarray(data, dtype=np.float32)
    outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh)
    #print outputs
    res = convertoutputyolo(outputs, w, h)
    pyyolo.cleanup()
    return res
示例#4
0
def main(args):
    darknet_path = './darknet'
    datacfg = 'cfg/coco.data'
    cfgfile = 'cfg/yolov2.cfg'
    weightfile = '../yolov2.weights'
    filename = darknet_path + '/data/person.jpg'

    pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
    '''Initializes and cleanup ros node'''
    ir = image_retriever()

    try:
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down ROS Image feature detector module"

    # free model
    pyyolo.cleanup()
示例#5
0
def main(args):
    rp = rospkg.RosPack()

    darknet_path = '/home/nvidia/pyyolo/darknet'
    datacfg = os.path.join(rp.get_path(NAME), 'data/cfg', 'coco.data')
    cfgfile = os.path.join(rp.get_path(NAME), 'data/cfg', 'yolov2.cfg')
    weightfile = os.path.join(rp.get_path(NAME), 'data', 'yolov2.weights')

    pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
    '''Initializes and cleanup ros node'''
    ir = Detector()

    try:
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down ROS Image feature detector module"

    # free model
    pyyolo.cleanup()
示例#6
0
    def __init__(self):
        """ Initialize pyyolo and darknet for object detection .

        :param object: base class inheritance
        :type object: class:`Object`

        """
        C = config.Config()
        x = str(os.path.realpath(Path(__file__).parent)).split("/")

        weightfile_templates = os.path.join("/", *x[:-1],
                                            C.WEIGHTFILE_TEMPLATES)
        self.thresh = C.THRESH
        self.hier_thresh = C.HIER_THRESH

        # initialize pyyolo
        darknet_path = os.path.join("/", *x[:-1], C.DARKNET_PATH)
        datacfg = os.path.join("/", *x[:-1], C.DATACFG)
        cfgfile = os.path.join("/", *x[:-1], C.CFGFILE)
        pyyolo.init(darknet_path, datacfg, cfgfile, weightfile_templates)
def GigeStreamer(cam_id):
    camera_id = cam_id
    pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
    Aravis.enable_interface(camera_id)  # using arv-fake-gv-camera-0.6
    camera = Aravis.Camera.new(None)
    stream = camera.create_stream(None, None)
    payload = camera.get_payload()

    for i in range(0, 50):
        stream.push_buffer(Aravis.Buffer.new_allocate(payload))

    print("Starting acquisition")
    camera.start_acquisition()
    while True:
        buffer = stream.try_pop_buffer()
        print(buffer)
        if buffer:
            frame = convert(buffer)
            stream.push_buffer(buffer)  #push buffer back into stream
            cv2.imshow("frame", frame)

            # img = cv2.imread(filename)
            img = frame.transpose(2, 0, 1)  # img = img.transpose(2,0,1)
            c, h, w = img.shape[0], img.shape[1], img.shape[2]
            data = img.ravel() / 255.0
            data = np.ascontiguousarray(data, dtype=np.float32)
            # perform_recognition()
            outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh)
            for output in outputs:
                print(output)

            ch = cv2.waitKey(1) & 0xFF
            if ch == 27 or ch == ord('q'):
                break
            elif ch == ord('s'):
                cv2.imwrite("imagename.png", frame)
    camera.stop_acquisition()
    pyyolo.cleanup()
    # Create output folder
    if os.path.isdir(output_dir):
        for file in glob.iglob(os.path.join(output_dir, image_extension)):
            os.remove(file)
    else:
        os.makedirs(output_dir, mode=0o777, exist_ok=True)

    # Open video stream
    cap = cv2.VideoCapture(video_file) #opening the cam
    ret_val, img = cap.read()
    h, w, c = img.shape
    # ratio=np.min([540/float(h), 960/float(w)])

    # Load YOLO weight
    nn1=pyyolo.init(darknet_path, data_file1, cfg_file1, weight_file1)#loading darknet in the memory
    nn2=pyyolo.init(darknet_path, data_file2, cfg_file2, weight_file2)
    time_start=datetime.now()
    while (cap.isOpened()):
        if frame_id % 100==0:
            print("Processing frame: ", frame_id)

        ret_val, img = cap.read()
        frame_id+=1
        if not ret_val:
            break

        #if frame_id % 5 != 0:
        #    continue

        img_rgb=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
示例#9
0
import time
import cv2

from trendi import constants

#get yolo net and keep it in mem
#datacfg = 'cfg/coco.data'
#datacfg = '/data/jeremy/pyyolo/darknet/cfg/coco.data'
datacfg = '/data/jeremy/darknet_orig/cfg/hls.data'
cfgfile = '/data/jeremy/darknet_orig/cfg/yolo-voc_544.cfg'
#cfgfile = '/data/jeremy/pyyolo/darknet/cfg/tiny-yolo.cfg'
#weightfile = '/data/jeremy/pyyolo/tiny-yolo.weights'
weightfile = '/data/jeremy/darknet_orig/bb_hls1/yolo-voc_544_95000.weights'
thresh = 0.24
hier_thresh = 0.5
pyyolo.init(datacfg, cfgfile, weightfile)


def detect_yolo_pyyolo(img_arr, url='', classes=constants.hls_yolo_categories):
    # from file
    print('----- test original C using a file')
    hash = hashlib.sha1()
    hash.update(str(time.time()))
    img_filename = hash.hexdigest()[:10] + 'pyyolo.jpg'
    #  img_filename = 'incoming.jpg'
    cv2.imwrite(img_filename, img_arr)

    outputs = pyyolo.test(img_filename, thresh, hier_thresh)
    relevant_bboxes = []
    for output in outputs:
        print(output)
示例#10
0
from flask import Flask, request, jsonify, abort
# import json, urllib, sys, cv2, shutil, time
import json, urllib, sys, shutil, time
from io import BytesIO
from PIL import Image
import numpy as np
import pyyolo
import IOfuncs as iof

darknet_path = '../pyyolo/darknet'
datacfg = 'cfg/coco.data'
cfgfile = 'cfg/yolo.cfg'
weightfile = '../../weights/yolo.weights'
thresh = 0.24
hier_thresh = 0.5
pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)

application = Flask(__name__)

# # WHITELIST
# # Limit use of API to IPs in whitelist.txt
# @application.before_request
# def limit_remote_addr():
# whitelist = list()
# for line in open('whitelist.txt'):
# line = line.strip()
# whitelist.append(line)
# if request.remote_addr not in whitelist:
# print "[ABORT] IP not on whitelist:", request.remote_addr
# abort(403)
示例#11
0
def worker(camId):
    pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
    Aravis.enable_interface("Fake")
    try:
        cam = Aravis.Camera.new(camId)
        print("Camera found")
    except:
        print("No camera found")
        exit()

    thresh = 0.45
    hier_thresh = 0.5

    #cam.set_region (0,0,2048,1536)
    #cam.set_region (0,0,512,512)
    #cam.set_frame_rate (10.0)
    #cam.set_exposure_time(500000)
    #cam.set_pixel_format (Aravis.PIXEL_FORMAT_MONO_8)

    #print(dir(cam))

    stream = cam.create_stream(None, None)

    payload = cam.get_payload()

    [x, y, width, height] = cam.get_region()

    print("Camera vendor : %s" % (cam.get_vendor_name()))
    print("Camera model  : %s" % (cam.get_model_name()))
    print("Camera id     : %s" % (cam.get_device_id()))
    print("ROI           : %dx%d at %d,%d" % (width, height, x, y))
    print("Payload       : %d" % (payload))
    print("Pixel format  : %s" % (cam.get_pixel_format_as_string()))

    #cv2.namedWindow('Live Stream', flags=0)
    cv2.namedWindow(camId, flags=0)

    #rint(dir(stream))

    #for i in range(0,50):
    #stream.push_buffer (Aravis.Buffer.new_allocate (payload))

    cam.start_acquisition()

    lastTime = time.time()
    transposeTime = 0
    i = 0

    while (True):
        stream.push_buffer(Aravis.Buffer.new_allocate(payload))
        buffer = stream.pop_buffer()
        transposeTime = time.time()
        b = ctypes.cast(buffer.data, ctypes.POINTER(ctypes.c_uint8))
        im = np.ctypeslib.as_array(b, (height, width))
        #im = im.copy()
        #print("shape: ", im.shape)
        #cv2.imshow("Live Stream", im.copy())
        gray = im.copy()
        cv2.imshow(camId, gray)
        #im = np.zeros((3,gray.shape[0],gray.shape[1]))
        #im[1,:,:] = gray
        #im = cv2.cvtColor(gray,cv2.COLOR_GRAY2RGB)
        im = np.stack((im, ) * 3, -1)
        im = im.transpose(2, 0, 1)
        c, h, w = im.shape[0], im.shape[1], im.shape[2]
        #print(im.shape)
        #cv2.imshow(camId, im.copy())
        im = im.ravel() / 255.0
        #print(im.shape)
        data = np.ascontiguousarray(im, dtype=np.float32)
        #print("TRANS-FPS: ", 1.0/(time.time()-transposeTime))
        predictions = pyyolo.detect(w, h, c, data, thresh, hier_thresh)
        # { 'left':0, 'right': 767, 'top': 0, 'bottom': x, class': x, prob: x, }
        for output in predictions:
            left, right, top, bottom, what, prob = output['left'], output[
                'right'], output['top'], output['bottom'], output[
                    'class'], output['prob']
            print(output)
            if (what == 'car'):
                d_from_top = top
                d_from_bottom = bottom
                d_from_left = left
                d_from_right = right
                d_height = top
                d_width = left
                if (d_height > 0.5):
                    print(output)
                    #trigger other cameras
        cv2.waitKey(1)
        print("CAM: ", camId, " Frame: ", i, " FPS: ",
              1.0 / (time.time() - lastTime), "RES: ", h, " x ", w)
        lastTime = time.time()
        i += 1
    cam.stop_acquisition()
def worker(camId):
    pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
    CAM_NAME = CAM_CONFIG[camId]['name']
    WINDOW_NAME = CAM_CONFIG[camId]['window']
    IS_ROTATE = CAM_CONFIG[camId]['rotate']
    h = Harvester()
    h.add_cti_file('/opt/mvIMPACT_Acquire/lib/x86_64/mvGenTLProducer.cti')
    h.update_device_info_list()
    try:
        cam = h.create_image_acquisition_manager(serial_number=CAM_NAME)
        print("Camera found")

    except:
        print("Camera Not Found")
        exit()

    cam.start_image_acquisition()

    lastTime = time.time()
    transposeTime = 0
    i = 0
    numberCars = 0
    lastSnapshot = None
    cv2.namedWindow(WINDOW_NAME, flags=0)

    while (True):
        buffer = cam.fetch_buffer()
        image = buffer.payload.components[0].data
        small = cv2.resize(image,
                           dsize=(320, 200),
                           interpolation=cv2.INTER_CUBIC)
        clone = small.copy()

        rgb = cv2.cvtColor(clone, cv2.COLOR_BayerRG2RGB)
        #img = rgb.transpose(2,0,1)
        img = np.rot90(rgb)
        print(rgb.shape)
        c, h, w = img.shape[0], img.shape[1], img.shape[2]
        #c, h, w = img.shape[2], img.shape[1], img.shape[0]
        data = img.ravel() / 255.0
        #data = np.ascontiguousarray(data, dtype=np.float32)
        predictions = pyyolo.detect(w, h, c, data, thresh, hier_thresh)

        #im = np.zeros((3,small.shape[1],small.shape[0]))

        #im[0,:,:] = np.rot90(small)
        #im[1,:,:] = np.rot90(small)
        #im[2,:,:] = np.rot90(small)

        #im = rgb
        print(rgb.shape)
        #c, h, w = im.shape[0], im.shape[1], im.shape[2]

        # im = im.transpose(2,0,1)

        #c, h, w = im.shape[0], im.shape[1], im.shape[2]

        #c, h, w = 1, image.shape[0], image.shape[1]
        #im = image.copy()
        #data = im.ravel()/255.0
        #print(data.shape)
        #data = np.ascontiguousarray(data, dtype=np.float32)
        #print(data.shape)

        for output in predictions:
            left, right, top, bottom, what, prob = output['left'], output[
                'right'], output['top'], output['bottom'], output[
                    'class'], output['prob']
            #print(output)
            #lastSnapshot = snapshot.copy()
            #cv2.imshow("Snapshots", lastSnapshot)
            if (what == 'car'):
                print(output)
                numberCars += 1
                cv2.rectangle(rgb, (50, 50), (100, 150), (0, 255, 0), 5)
                if (camId == "CAM_2"):
                    urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read()
                    urllib.request.urlopen(TRIGGER_CLOSE_FLASH_URL).read()
                    urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read()

        if IS_ROTATE:
            cv2.imshow(WINDOW_NAME, np.rot90(rgb))
        else:
            cv2.imshow(WINDOW_NAME, rgb)

        cv2.waitKey(1)
        buffer.queue()

        print("Count: ", numberCars, " Frame: ", i, " FPS: ",
              1.0 / (time.time() - lastTime))
        lastTime = time.time()
        i += 1

    cam.stop_image_acquisition()
    cam.destroy()
示例#13
0
 def _init_model(self):
     LOG.info('init general python yolo model')
     pyyolo.init(self.params['darknet_path'],
                 os.path.join('../', self.params['datacfg']),
                 os.path.join('../', self.params['cfgfile']),
                 self.params['weightfile'])
                        images = np.vstack([x])
                        y_proba = weight.predict(images, batch_size=1)
                        y_classes = y_proba.argmax(axis=-1)
                        y_percent = y_proba[0][y_classes[0]]
                        if y_percent < 0.65:
                            continue

                        folder_dest = os.path.join(RESULTS_DIR,
                                                   LABELS[y_classes[0]],
                                                   maker + '|' + model)
                        if not os.path.exists(folder_dest):
                            os.makedirs(folder_dest)

                        new_file = '0' * (6 -
                                          len(str(index[y_classes[0]]))) + str(
                                              index[y_classes[0]]) + ".jpg"
                        new_img_dest = os.path.join(folder_dest, new_file)

                        img.save(new_img_dest)
                        index[y_classes[0]] = index[y_classes[0]] + 1
                    except:
                        print("Filter Error: %s" %
                              os.path.join(images_dir, imagefile))


if __name__ == '__main__':
    pyyolo.init(DARKNET_PATH, DATA_CFG, CFG_FILE, WEIGHT_FILE)
    #filter_image()
    load_image('DBA-NZE144G_0234941.jpg')
def detector():
    # Initialize publisher ROS node
    pub = rospy.Publisher('predictions', Predictions, queue_size=10)
    pub1 = rospy.Publisher('peoplecount', Peoplecount, queue_size=10)
    pub2 = rospy.Publisher('videostream', Image, queue_size=1)
    rospy.init_node('detector', anonymous=True)
    # Ceate sort object
    mot_tracker = Sort()
    # Define paths for yolo files
    darknet_path = '/home/ubuntu/catkin_ws/src/jetsontx1_cvmodule/src/pyyolo/darknet'  # Only './darknet' is dependent on location of rosrun command
    datacfg = 'cfg/coco.data'
    cfgfile = 'cfg/yolov3-tiny.cfg'
    weightfile = '../yolov3-tiny.weights'  #'/media/ubuntu/SDcard/yoloWeights/yolov2-tiny.weights' this also works but it loads way slower
    filename = darknet_path + '/data/person.jpg'
    # Image resolution parameters
    (width,
     height) = (1280, 720
                )  # Use (672,376) for VGA and (1280,720) for HD720 resolution
    # Initialize visualization
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    video = cv2.VideoWriter('predictionstest.avi', fourcc, 10, (width, height))
    # Initialize pyyolo
    pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
    # Initialize face detector
    face_cascade = cv2.CascadeClassifier(
        '/usr/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
    smile_cascade = cv2.CascadeClassifier(
        '/usr/share/OpenCV/haarcascades/haarcascade_smile.xml')
    # Create a Camera object
    zed = sl.Camera()
    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD1080, HD720 or VGA video mode
    init_params.camera_fps = 15  # Set fps at 30
    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)
    zed.set_camera_settings(sl.VIDEO_SETTINGS.EXPOSURE, 50)
    image = sl.Mat()
    point_cloud = sl.Mat()
    colours = 255 * np.random.rand(32,
                                   3)  # For drawing different colours on BB

    runtime_parameters = sl.RuntimeParameters()
    while not rospy.is_shutdown():
        start = time.time()
        # Grab an image, a RuntimeParameters object must be given to grab()
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # A new image is available if grab() returns SUCCESS
            zed.retrieve_image(image, sl.VIEW.LEFT)
            data = image.get_data()
            gray_picture = cv2.cvtColor(
                data, cv2.COLOR_BGR2GRAY
            )  # Make picture gray for face/smile detection
            # Retrieve colored point cloud. Point cloud is aligned on the left image.
            zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)
            Data = data.transpose(2, 0, 1)
            start5 = time.time()
            Data = Data.ravel() / 255.0
            end5 = time.time()
            Data = np.ascontiguousarray(Data, dtype=np.float32)
            start1 = time.time()
            outputs = pyyolo.detect(
                width, height, 4, Data, 0.5, 0.8
            )  #pyyolo.detect returns: [{'right':, 'bottom':, 'top':, 'class':, 'prob':, 'left':}]
            end1 = time.time()
            print("Section cycle time: ", end1 - start1)
            dets = np.empty((0, 5), int)
            count = 0
            for output in outputs:
                if output['class'] == 'person':  #track only people
                    count = count + 1  # Count detected people
                    dets = np.append(dets, [[
                        output['left'], output['top'], output['right'],
                        output['bottom'], output['prob']
                    ]],
                                     axis=0)
            people = Peoplecount()
            people.tot_detected_people = count
            pub1.publish(people)
            preds = Predictions()
            trackers = mot_tracker.update(
                dets
            )  #update tracking ID. mot_tracker.update() returns: [[x1,y1,x2,y2,id]]
            for d in trackers:
                d = d.astype(np.int32)
                # Create a dictionary to store info for publishing
                detectinfo = {
                    'left': d[0],
                    'top': d[1],
                    'right': d[2],
                    'bottom': d[3],
                    'class': 'person',
                    'ID': int(d[4])
                }

                euclidean_distance(detectinfo, point_cloud)
                relative_coordinates(detectinfo, width)
                start3 = time.time()
                facesmile_detect(detectinfo, gray_picture, data, face_cascade,
                                 smile_cascade)
                end3 = time.time()
                #print("facesmile detect cycle time: ", end3 - start3)
                #print detectinfo['smile']

                pred = Prediction()
                #pred.probabilities.append()
                pred.classes.append(detectinfo['class'])
                pred.xmin = detectinfo['left']
                pred.ymin = detectinfo['top']
                pred.xmax = detectinfo['right']
                pred.ymax = detectinfo['bottom']
                pred.id = detectinfo['ID']
                pred.face = detectinfo['face']
                pred.smile = detectinfo['smile']
                pred.distance = detectinfo['distance']
                pred.angle = detectinfo['angle']
                pred.xcoord = detectinfo['x']
                pred.ycoord = detectinfo['y']
                preds.predictions.append(pred)

                label = detectinfo['class'] + " " + str(detectinfo['ID'])

                cv2.rectangle(data, (d[0], d[1]), (d[2], d[3]),
                              (colours[d[4] % 32, 0], colours[d[4] % 32, 1],
                               colours[d[4] % 32, 2]), 2)
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(data, label, (d[2], d[1] + 25), font, 1,
                            (0, 0, 255), 1, cv2.LINE_AA)
            pub.publish(preds)
            msg_frame = CvBridge().cv2_to_imgmsg(data, "8UC4")
            pub2.publish(msg_frame)
            #video.write(data[:,:,:3])#because data.shape is (376,672,4) and it only supports 3 channels.
            cv2.imshow("image", data)
            cv2.waitKey(35)
        end = time.time()
        print("Total cycle time: ", end - start)
    # Close the camera
    zed.close()
示例#16
0
import sys
sys.path.append('/usr/local/lib/python2.7/dist-packages/')

import pyyolo
import numpy as np
import cv2

darknet_path = './darknet'
data_cfg = 'deployment/easy.data'
cfg_file = 'deployment/easy-tiny.cfg'
weight_file = 'deployment/easy-tiny.backup'
thresh = 0.24
hier_thresh = 0.5

pyyolo.init(darknet_path, data_cfg, cfg_file, weight_file)


def predict(img):
    img = img.transpose(2, 0, 1)
    c, h, w = img.shape[0], img.shape[1], img.shape[2]
    # print w, h, c
    data = img.ravel() / 255.0
    data = np.ascontiguousarray(data, dtype=np.float32)
    outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh)
    return outputs


def clean_up():
    pyyolo.cleanup()

示例#17
0
def init_yolo(yoloPath):
    darknet_path = yoloPath+"/darknet"
    datacfg = 'cfg/coco.data'
    cfgfile = 'cfg/tiny-yolo.cfg'
    weightfile = '../tiny-yolo.weights'
    pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
示例#18
0
文件: example.py 项目: Roboy/vision
import imutils

darknet_path = './darknet'
datacfg = 'cfg/coco.data'
cfgfile = 'cfg/tiny-yolo.cfg'
weightfile = '../tiny-yolo.weights'
#filename = darknet_path + '/data/person.jpg'
filename = "/home/roboy/outputRoboy.mp4"
thresh = 0.24
hier_thresh = 0.5
cam = cv2.VideoCapture(filename)
ret_val, img = cam.read()
print(ret_val)
if not ret_val:
	sys.exit()
pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
outVideo = cv2.VideoWriter('outputRoboySkyfall.mp4',fourcc, 20.0, (800,533))
 
print('----- test python API using a file')
while True:
	ok,img = cam.read()
	frame = imutils.resize(img, width=800)
	if not img.any():
		sys.exit()
	img = img.transpose(2,0,1)
	c, h, w = img.shape[0], img.shape[1], img.shape[2]
	print w, h, c 
	data = img.ravel()/255.0
	data = np.ascontiguousarray(data, dtype=np.float32)
	outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh)	
示例#19
0
    # Create output folder
    if os.path.isdir(output_dir):
        for file in glob.iglob(os.path.join(output_dir, image_extension)):
            os.remove(file)
    else:
        os.makedirs(output_dir, mode=0o777, exist_ok=True)

    # Open video stream
    cap = cv2.VideoCapture(video_file)  #opening the cam
    ret_val, img = cap.read()
    h, w, c = img.shape
    # ratio=np.min([540/float(h), 960/float(w)])

    # Load YOLO weight
    pyyolo.init(darknet_path, data_file, cfg_file,
                weight_file)  #loading darknet in the memory

    time_start = datetime.now()
    while (cap.isOpened()):
        if frame_id % 100 == 0:
            print("Processing frame: ", frame_id)

        ret_val, img = cap.read()
        frame_id += 1
        if not ret_val:
            break

        #if frame_id % 5 != 0:
        #    continue

        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)