Esempio n. 1
0
def run_darkflow():
    options = {
        "model": "./cfg/v1.1/tiny-yolov1.cfg",
        "load": "./bin/tiny-yolo-v1.1.weights",
        "threshold": 0.1,
        "demo": "camera",
    }

    tfnet = TFNet(options)
    return tfnet.camera(pl1, pl2, t1, t2)
Esempio n. 2
0
class YOLOv2Subject(Subject):
    def __init__(self, FLAGS):
        self.observers = []
        self.yoloNet = TFNet(FLAGS)

    def attach(self, observer):
        self.observers.append(observer)

    def detach(self, observer):
        self.observers.remove(observer)

    def notify(self, result):
        print("YOLOv2Subject: update people detection")
        for observer in self.observers:
            observer.update(result)

    def detectImage(self, image):
        self.notify(self.yoloNet.return_predict(image))

    def detectVideo(self):
        self.yoloNet.camera()
Esempio n. 3
0
def cliHandler(args):
    FLAGS = argHandler()
    FLAGS.setDefaults()
    FLAGS.parseArgs(args)

    # make sure all necessary dirs exist
    def _get_dir(dirs):
        for d in dirs:
            this = os.path.abspath(os.path.join(os.path.curdir, d))
            if not os.path.exists(this): os.makedirs(this)

    _get_dir([
        FLAGS.imgdir, FLAGS.binary, FLAGS.backup,
        os.path.join(FLAGS.imgdir, 'out'), FLAGS.summary
    ])

    # fix FLAGS.load to appropriate type
    try:
        FLAGS.load = int(FLAGS.load)
    except:
        pass

    tfnet = TFNet(FLAGS)

    if FLAGS.demo:
        tfnet.camera()
        exit('Demo stopped, exit.')

    if FLAGS.train:
        print('Enter training ...')
        tfnet.train()
        if not FLAGS.savepb:
            exit('Training finished, exit.')

    if FLAGS.savepb:
        print('Rebuild a constant version ...')
        tfnet.savepb()
        exit('Done')

    tfnet.predict()
Esempio n. 4
0
from darkflow.net.build import TFNet

options = {
    "model": "cfg/yolo-voc-hyperion.cfg",
    "load": "weights/yolo-voc_50000.weights",
    "threshold": 0.1,
    "labels": "image.names",
    "demo": "camera",
    "gpu": 0.5,
    "address": 'localhost',
    "port": 48051,
    "UDP": True
}
tfnet = TFNet(options)

#imgcv = cv2.imread("./darkflow/sample_img/sample_dog.jpg")
#result = tfnet.return_predict(imgcv)
tfnet.camera()
exit("Demo Stopped, exiting.")
Esempio n. 5
0
class yolo(object):
    
    modeldir = yolo_dir
    parentdir = cwd

        
    def __init__(self,):
        print("yolo model created")
        
    def load(self,model='tiny-yolo',threshold=0.25,gpu=0):
        os.chdir(yolo_dir)
        
        model_list =['tiny-yolo','tiny-yolo-voc','yolo']
        model_custom_list=['yolo-hand','tiny-yolo-hand']
        if model in model_list: 
            self.options = {"model": "cfg/"+model+".cfg", "load": "bin/"+model+".weights", "threshold": threshold,'gpu':gpu}
        elif model in model_custom_list:
            self.options = {"model": "cfg/"+model+".cfg", "load": 100,"threshold": threshold,'gpu':gpu}
        else:
            raise ValueError('model passed in is not in supported model list. Please pass in correct model.(tiny-yolo;tiny-yolo-voc,yolo)')
            
        self.tfnet = TFNet(self.options)
        os.chdir(self.parentdir)
        #print(self.parentdir)
        return None
    
    def predict(self,img_path):
        imgcv = cv2.imread(img_path,1)
        result = self.tfnet.return_predict(imgcv)
        return result
    
    def predict_imgcv(self,imgcv):
        result = self.tfnet.return_predict(imgcv)
        return result
    
    def predict_imgcv_list(self,imgcvs,threshold=0.3):
        buffer_inp = list()
        buffer_pre = list()
        boxex_list = list()
        
        for fr in imgcvs:
            preprocessed = self.tfnet.framework.preprocess(fr)
            buffer_pre.append(preprocessed)
            buffer_inp.append(fr)
        
        feed_dict = {self.tfnet.inp: buffer_pre}
        net_out = self.tfnet.sess.run(self.tfnet.out, feed_dict)
        for im,single_out in zip(buffer_inp, net_out):
            h, w, _ = im.shape
            boxes = self.tfnet.framework.findboxes(single_out)
            boxesInfo = list()
            for box in boxes:
                tmpBox = self.tfnet.framework.process_box(box, h, w, threshold)
                if tmpBox is None:
                    continue
                boxesInfo.append({
                    "label": tmpBox[4],
                    "confidence": tmpBox[6],
                    "topleft": {
                        "x": tmpBox[0],
                        "y": tmpBox[2]},
                    "bottomright": {
                        "x": tmpBox[1],
                        "y": tmpBox[3]}
                })
            
            ## add processed boxex into a list   
            boxex_list.append(boxesInfo)
	
        return boxex_list
    
    def draw_save_pic(self,img_path,out_path):
        imgcv = cv2.imread(img_path,1)
        result = self.tfnet.return_predict(imgcv)
        draw_box(result,imgcv)
        cv2.imwrite( os.path.join(out_path), imgcv);
        return imgcv

    def draw_return_pic(self,frame):
        buffer_inp = list()
        buffer_pre = list()
        results = list()
        for fr in frame:
            preprocessed = self.tfnet.framework.preprocess(fr)
            buffer_pre.append(preprocessed)
            buffer_inp.append(fr)
        
        feed_dict = {self.tfnet.inp: buffer_pre}
        net_out = self.tfnet.sess.run(self.tfnet.out, feed_dict)
        for img, single_out in zip(buffer_inp, net_out):
            postprocessed = self.tfnet.framework.postprocess(single_out, img, False)
            results.append(postprocessed)
	
        return results

    def demo(self,option='camera'):
        self.tfnet.FLAGS.demo = option
        self.tfnet.camera()
        
        return None
    
    def video_return_json(self,option):
        camera = cv2.VideoCapture(0) ## 0 is the default camera
        while camera.isOpened():
            _,frame = camera.read()
            if frame is None:
                print('\n End of Video')
            
            result = self.tfnet.return_predict(frame)
            sys.stdout.write('%s\r' % result[0])
            sys.stdout.flush()
            #cv2.imshow('',frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            
        camera.release()
        cv2.destroyAllWindows()          
Esempio n. 6
0
def start_camera():
    from darkflow.net.build import TFNet
    tfnet = TFNet(FLAGS)

    tfnet.camera()
    exit('Demo stopped, exit.')