コード例 #1
0
    def __init__(self, cfg):
        BaseComponent.__init__(self, cfg)

        params = self.cfg['params']

        tfnet_cfg = {
            "model": params['model'],
            "load": params['weights'],
            "config": '/root/darkflow/cfg',
            "verbalise": True,
            "threshold": 0.1
        }

        self.nn = TFNet(tfnet_cfg)
コード例 #2
0
def get_model():
    config_path = "./darkflow_config.ini"
    config_path = get_right_path(config_path)
    model_path, backup_path, label_file, load, detect_threshold, grasp_backup_path, grasp_net_name = load_config(
        config_path)
    model_path = get_right_path(model_path)
    backup_path = get_right_path(backup_path)
    label_file = get_right_path(label_file)
    grasp_backup_path = get_right_path(grasp_backup_path)
    load = checkload(load)
    detect_threshold = float(detect_threshold)
    print("Model Files in:" + model_path)
    options = {
        "model": model_path,
        "backup": backup_path,
        "load": load,
        "gpu": 0.5,
        "label": label_file,
        "threshold": detect_threshold,
        "pred_grasp": True,
        "pred_grasp_crop_percent": 1.1,  # Original trained data value: 1.1
        "grasp_backup": grasp_backup_path,
        "grasp_net_name": grasp_net_name
    }
    model = TFNet(options)
    return model
コード例 #3
0
def getModel():
    # print('***in get model***')
    # options = {"model": "/media/datta/Sri Datta/dd2419_ws/src/yolo-sts/scripts/cfg/tiny-yolo-voc-sts.cfg",
    #             "pbLoad": "/media/datta/Sri Datta/dd2419_ws/src/yolo-sts/scripts/built_graph/tiny-yolo-voc-sts.pb",
    #             "metaLoad":"/media/datta/Sri Datta/dd2419_ws/src/yolo-sts/scripts/built_graph/tiny-yolo-voc-sts.meta",
    #             "load":"/media/datta/Sri Datta/dd2419_ws/src/yolo-sts/scripts/built_graph",
    #             "threshold":"0.1"
    #             }

    options = {
        "model":
        "/media/datta/Sri Datta/dd2419_ws/src/yolo-sts/scripts/cfg/tiny-yolo-voc-sts.cfg",
        "pbLoad":
        "/media/datta/Sri Datta/dd2419_ws/src/yolo-sts/scripts/built_graph/tiny-yolo-voc-sts.pb",
        "metaLoad":
        "/media/datta/Sri Datta/dd2419_ws/src/yolo-sts/scripts/built_graph/tiny-yolo-voc-sts.meta",
        "backup":
        "/media/datta/Sri Datta/dd2419_ws/src/yolo-sts/scripts/ckpt/",
        "load": -1,
        "threshold": 0.8
    }

    model = TFNet(options)
    print('***done loading***')
    return model
コード例 #4
0
ファイル: darkflow.py プロジェクト: leopardfrog/yolo_ros
def getModel():
    options = {
        "model": "./cfg/tiny-yolo-udacity.cfg",
        "backup": "./ckpt/",
        "load": 8987,
        "gpu": 1.0
    }
    model = TFNet(options)
    return model
コード例 #5
0
from net.build import TFNet
import numpy as np
import cv2
from os import sep

options = {
    "model": "cfg{}yolo-tiny.cfg".format(sep),
    "load": "",
    "threshold": 0.1,
    "gpu": 1.0,
    "verbalise": True
}

tfnet = TFNet(options)
im = cv2.imread('test/dog.jpg')
print(type(im), im.shape)
print(tfnet.return_predict(im))
コード例 #6
0
from net.build import TFNet
import cv2

options = {
    "model":
    "/media/datta/Sri Datta/dd2419_ws/src/yolo-sts/scripts/cfg/tiny-yolo-voc-sts.cfg",
    "pbLoad":
    "/media/datta/Sri Datta/dd2419_ws/src/yolo-sts/scripts/built_graph/tiny-yolo-voc-sts.pb",
    "metaLoad":
    "/media/datta/Sri Datta/dd2419_ws/src/yolo-sts/scripts/built_graph/tiny-yolo-voc-sts.meta",
    "backup": "/media/datta/Sri Datta/dd2419_ws/src/yolo-sts/scripts/ckpt/",
    "load": -1,
    "threshold": 0.8
}

tfnet = TFNet(options)

imgcv = cv2.imread("sample.jpg")
result = tfnet.return_predict(imgcv[None, :, :, :])
print(result)
コード例 #7
0
                    fontScale, (0, 0, 0), thickness)

    cv2.imwrite(imgfile, imgcv)
    return imgfile


def log(string):
    sendTextArrayToTerminal(["[INFO] " + string])


@app.route("/event", methods=['POST'])
def event():
    log('Received request')
    images = getImageArray()
    log('Starting Detection using YOLO ... ')
    for image in images:
        detect(image)
    log('Detection Completed')
    sendImageArray(images, 'file_path')
    log('Sending Image with detections .... ')
    return jsonify(status='ok')


if __name__ == '__main__':
    token = os.getenv('CVFY_TOKEN')
    if token is not None:
        tfnet = TFNet(options)
        app = register(token)
        app.run()
    else:
        print "\'CVFY_TOKEN\' environment variable not found !!! \n EXITING ...."
コード例 #8
0
ファイル: darkflow.py プロジェクト: UndeadBlow/darkflow
# make sure all necessary dirs exist
def get_dir(dirs):
    for d in dirs:
        this = os.path.abspath(os.path.join(os.path.curdir, d))
        if not os.path.exists(this): os.makedirs(this)


get_dir([FLAGS.binary, FLAGS.backup])

# fix FLAGS.load to appropriate type
try:
    FLAGS.load = int(FLAGS.load)
except:
    pass

tfnet = TFNet(FLAGS)

if FLAGS.profile:
    tfnet.framework.profile(tfnet)
    exit()

if FLAGS.demo:
    tfnet.camera(FLAGS.demo)
    exit()

if FLAGS.train:
    print('Enter training ...')
    tfnet.train()
    if not FLAGS.savepb: exit('Training finished')

if FLAGS.savepb:
コード例 #9
0
from net.build import TFNet
import cv2

options = {"model": "cfg/yolo.cfg", "load": "bin/yolo.weights"}

tfnet = TFNet(options)

imgcv = cv2.imread("/Users/ruslan/0_persons.jpg")
result = tfnet.return_predict(imgcv)
print("Original: {}".format(0))
print("Persons: {}".format(
    len([x["label"] for x in result if x["label"] == "person"])))

imgcv = cv2.imread("/Users/ruslan/7_persons.jpg")
result = tfnet.return_predict(imgcv)
print("Original: {}".format(7))
print("Persons: {}".format(
    len([x["label"] for x in result if x["label"] == "person"])))

for finding in result:
    if finding["label"] == "person":
        cv2.rectangle(
            imgcv, (finding["topleft"]["x"], finding["topleft"]["y"]),
            (finding["bottomright"]["x"], finding["bottomright"]["y"]),
            (255, 0, 0), 2)

cv2.imwrite("/Users/ruslan/7_persons_findings.jpg", imgcv)

imgcv = cv2.imread("/Users/ruslan/11_persons.jpg")
result = tfnet.return_predict(imgcv)
print("Original: {}".format(11))
コード例 #10
0
class DeepDetector(BaseComponent):
    '''
    A DeepDetector uses a YOLOv2 convolutional neural network model for
    object detection.
    '''
    def __init__(self, cfg):
        BaseComponent.__init__(self, cfg)

        params = self.cfg['params']

        tfnet_cfg = {
            "model": params['model'],
            "load": params['weights'],
            "config": '/root/darkflow/cfg',
            "verbalise": True,
            "threshold": 0.1
        }

        self.nn = TFNet(tfnet_cfg)

    def execute(self, input_data, input_directory, output_directory):

        # Check what configured inputs are - whether complete image or ROIs output by some
        # other components.
        all_detections = []
        for source in self.cfg['inputs']:
            if source == 'files':
                detections = self.detect_in_image(input_data)
                all_detections.extend(detections)

            else:
                triggerlabels = self.cfg['params'].get('triggerlabels')
                if not triggerlabels:
                    print(
                        "Warning: pipeline file specifies {} in inputs but there are no triggerlabels in params"
                        .format(source))
                    continue

                comp_outputs = input_data.get(source)
                if comp_outputs:
                    comp_reports = comp_outputs['reports']
                    detections = self.detect_in_rois(input_data, comp_reports)
                    all_detections.extend(detections)

        # Each detection is of the form
        # {"label":"person", "confidence": 0.56, "topleft": {"x": 184, "y": 101}, "bottomright": {"x": 274, "y": 382}}
        # These should be transformed to our preferred JSON output documented in basecomponent.py

        reports = []
        for d in all_detections:
            r = {
                'labels': [{
                    'label': d['label'],
                    # The float() here is because that confidence value is actually a np.float32
                    # and that creates serialization typeerror problems while writing report to
                    # json.
                    'confidence': float(d['confidence'])
                }],
                'rect': [
                    d['topleft']['x'],
                    d['topleft']['y'],
                    d['bottomright']['x'],
                    d['bottomright']['y'],
                ]
            }

            reports.append(r)

        results = {'reports': reports}

        print(results)
        return results

    def detect_in_image(self, input_data):
        print("Deep detector starting " + input_data['file'])
        detections = self.nn.return_predict(input_data['img'])
        print("Deep detector completed" + input_data['file'])
        return detections

    def detect_in_rois(self, input_data, comp_reports):
        img = input_data['img']
        roi_detections = []

        for r in comp_reports:

            if ('all' in self.cfg['params']['triggerlabels']) or \
                any( [ l['label'] in self.cfg['params']['triggerlabels'] for l in r['labels'] ] ) :

                rect = r['rect']
                x_offset = rect[0]
                y_offset = rect[1]
                roi = img[rect[1]:rect[3], rect[0]:rect[2], :]

                detections = self.nn.return_predict(roi)
                # These detections in ROI are relative to ROI. So we must add ROI origin to
                # those coordinates to make them full image coordinates.
                for d in detections:
                    d['topleft']['x'] += x_offset
                    d['bottomright']['x'] += x_offset

                    d['topleft']['y'] += y_offset
                    d['bottomright']['y'] += y_offset

                roi_detections.extend(detections)

        return roi_detections
コード例 #11
0
        this = os.path.abspath(os.path.join(os.path.curdir, d))
        if not os.path.exists(this): os.makedirs(this)


get_dir([
    FLAGS.test, FLAGS.binary, FLAGS.backup,
    os.path.join(FLAGS.test, 'out'), FLAGS.summary
])

# fix FLAGS.load to appropriate type
try:
    FLAGS.load = int(FLAGS.load)
except:
    pass

tfnet = TFNet(FLAGS)

if FLAGS.profile:
    tfnet.framework.profile(tfnet)
    exit()

if FLAGS.demo:
    tfnet.camera(FLAGS.demo, FLAGS.saveVideo)
    exit()

if FLAGS.train:
    print('Enter training ...')
    tfnet.train()
    if not FLAGS.savepb: exit('Training finished')

if FLAGS.savepb:
コード例 #12
0
        this = os.path.abspath(os.path.join(os.path.curdir, d))
        if not os.path.exists(this): os.makedirs(this)


get_dir(
    [FLAGS.test, FLAGS.binary, FLAGS.backup,
     os.path.join(FLAGS.test, 'out')])

# fix FLAGS.load to appropriate type
weight_fname = FLAGS.load
try:
    FLAGS.load = int(FLAGS.load)
except:
    pass

tfnet = TFNet(FLAGS)

if FLAGS.profile:
    tfnet.framework.profile(tfnet)
    exit()

if FLAGS.demo:
    tfnet.camera(FLAGS.demo)
    exit()

if FLAGS.train:
    print('Enter training ...')
    tfnet.train()
    if not FLAGS.savepb: exit('Training finished')

if FLAGS.savepb:
コード例 #13
0
import cv2
import glob
import time
import numpy as np
from matplotlib import pyplot as plt
import sys

options = {
    "test": "test/",
    "model": "cfg/tiny-yolo-udacity.cfg",
    "backup": "ckpt/",
    "load": 8987,
    "gpu": 1.0
}

tfnet = TFNet(options)

####################################
#read a video input file

if (sys.argv[1] == "testing"):  #if just for testing purpose, not video purpose
    images = glob.glob('test/*.jpg')
    i = 0

    average = []
    for image in images:
        t = time.time()

        imgcv = cv2.imread(image)
        name = image
        result, boxInfo = tfnet.return_predict(imgcv, name)