Esempio n. 1
0
def savepb_darkflow_model(cfg_obj, args):
    options = darkflow_parse_config(cfg_obj)
    if args.load is None:
        raise AssertionError('Please provide a ckpt number to load from.')
    options['load'] = int(args.load)

    tfnet = TFNet(options)
    tfnet.savepb()
Esempio n. 2
0
def trainFlowCFG(cfg):
    ckptPath = 'ckpt'
    checkpointPath = 'ckpt/checkpoint'
    
    if not os.path.exists(ckptPath):
        os.makedirs(ckptPath)

    if not os.path.exists(checkpointPath):
        os.makedirs(checkpointPath)
    #dataset is the image folder
    options = {"model": cfg['darkflow']["model"], 
            "load": cfg['darkflow']["starting_weights"],
            "batch": cfg['darkflow']["batch_size"],
            "epoch": cfg['darkflow']["epoch"],
            "gpu": cfg['darkflow']["gpu_usage"],
            "train": True,
            "lr": float(cfg['darkflow']["learning_rate"]),
            "annotation": cfg['darkflow']["training_annotations"],
            "labels": cfg['darkflow']["labels_file"],
            "dataset": cfg['darkflow']["training_images"]}
            



    
    tfnet = TFNet(options)
    tfnet.train()
    tfnet.savepb()

    if cfg['meta']['saveRun']:
        modelPath = cfg['darkflow']['model']
        path, filename = os.path.split(modelPath)
        name, ext = os.path.splitext(filename)



    pathPB = os.path.join(cfg['temp']['rootDir'],cfg['paths']['darkflow'],'built_graph', name+'.pb')
    pathMeta = os.path.join(cfg['temp']['rootDir'],cfg['paths']['darkflow'],'built_graph', name+'.meta')
    savePathPB = os.path.join(cfg['temp']['rootDir'],'runData', cfg['meta']['runName'],cfg['meta']['runName']+'.pb')
    savePathMeta = os.path.join(cfg['temp']['rootDir'], 'runData', cfg['meta']['runName'],cfg['meta']['runName']+'.meta')
    shutil.copyfile(pathPB,savePathPB)
    shutil.copyfile(pathMeta,savePathMeta)
Esempio n. 3
0
def cliHandler(args):
    FLAGS = argHandler()
    FLAGS.setDefaults()
    FLAGS.parseArgs(args)

    # make sure all necessary dirs exist
    def _get_dir(dirs):
        for d in dirs:
            this = os.path.abspath(os.path.join(os.path.curdir, d))
            if not os.path.exists(this): os.makedirs(this)

    _get_dir([
        FLAGS.imgdir, FLAGS.binary, FLAGS.backup,
        os.path.join(FLAGS.imgdir, 'out'), FLAGS.summary
    ])

    # fix FLAGS.load to appropriate type
    try:
        FLAGS.load = int(FLAGS.load)
    except:
        pass

    tfnet = TFNet(FLAGS)

    if FLAGS.demo:
        tfnet.camera()
        exit('Demo stopped, exit.')

    if FLAGS.train:
        print('Enter training ...')
        tfnet.train()
        if not FLAGS.savepb:
            exit('Training finished, exit.')

    if FLAGS.savepb:
        print('Rebuild a constant version ...')
        tfnet.savepb()
        exit('Done')

    tfnet.predict()
Esempio n. 4
0
from darkflow.net.build import TFNet
import cv2
import json

with open('config.json') as json_config:
    config = json.load(json_config)

options = {
    "model": config["model"],
    "load": -1,
    "batch": config["batch_size"],
    "epoch": config["epoch"],
    "gpu": config["gpu_usage"],
    "train": True,
    "lr": config["learning_rate"],
    "annotation": config["training_annotations"],
    "labels": config["labels_file"],
    "dataset": config["training_images"]
}

tfnet = TFNet(options)
tfnet.train()
tfnet.savepb()
Esempio n. 5
0
class DetectorOptions:

    DEFAULT_MODEL = os.path.join(dir_path, "cfg\\yolo.cfg")
    DEFAULT_LABELS = os.path.join(dir_path, "cfg\\coco.names")
    DEFAULT_WEIGHTS = os.path.join(dir_path, "bin\\yolo.weights")
    DEFAULT_THRESHOLD = 0.1
    DEFAULT_GPU = 0.7
    DEFAULT_DIRECTION = "horizontal"
    DEFAULT_NORMAL_BLOB_SIZE = 300

    def __init__(self):
        self.model = DetectorOptions.DEFAULT_MODEL
        self.weights = DetectorOptions.DEFAULT_WEIGHTS
        self.labels = DetectorOptions.DEFAULT_LABELS
        self.confidence_threshold = DetectorOptions.DEFAULT_THRESHOLD
        self.gpu = DetectorOptions.DEFAULT_GPU
        self.direction = DetectorOptions.DEFAULT_DIRECTION
        self.normal_blob_size = DetectorOptions.DEFAULT_NORMAL_BLOB_SIZE
        self.tfnet = None
        self.roi = None
        self.filter = None

    def set_model(self, model: str):
        if self.tfnet:
            raise DetectorOptionsException("Neural network already built")
        self.model = model
        return self

    def set_weights(self, weights: str):
        if self.tfnet:
            raise DetectorOptionsException("Neural network already built")
        self.weights = weights
        return self

    def set_confidence_threshold(self, confidence_threshold: float):
        if self.tfnet:
            raise DetectorOptionsException("Neural network already built")
        if confidence_threshold < 0 or confidence_threshold > 1:
            raise ValueError(
                "'confidence_threshold' must be a floating point number between 0 and 1 inclusive"
            )
        self.confidence_threshold = confidence_threshold
        return self

    def set_gpu(self, gpu: float):
        if self.tfnet:
            raise DetectorOptionsException("Neural network already built")
        if gpu < 0 or gpu > 1:
            raise ValueError(
                "'gpu' must be a floating point number between 0 and 1 inclusive"
            )
        self.gpu = gpu
        return self

    def set_roi(self, roi: typing.Dict[str, typing.Dict[str, int]]):
        if self.tfnet:
            raise DetectorOptionsException("Neural network already built")
        self.roi = roi
        return self

    def set_direction(self, type: str):
        if self.tfnet:
            raise DetectorOptionsException("Neural network already built")
        if type not in ['vertical', 'horizontal']:
            raise ValueError(
                "'type' must be either 'vertical' or 'horizontal'")
        self.direction = type
        return self

    def set_filter(self, labels: typing.List[str]):
        if self.tfnet:
            raise DetectorOptionsException("Neural network already built")
        self.filter = labels
        return self

    def build_model(self, verbose_level=0):
        prev_dir = os.getcwd()
        os.chdir(dir_path)
        if verbose_level == 1:
            print("TF_START")
        elif verbose_level >= 2:
            print("Importing tensorflow...")

        from darkflow.net.build import TFNet
        from darkflow.defaults import argHandler

        if verbose_level == 1:
            print("TF_END")
        elif verbose_level >= 2:
            print("Tensorflow imported")

        options = {
            "model": self.model,
            "load": self.weights,
            "labels": self.labels,
            "threshold": self.confidence_threshold,
            "gpu": self.gpu,
        }

        old_options_path = "built_graph/options.json"
        old_pb_path = "built_graph/yolo.pb"
        old_meta_path = "built_graph/yolo.meta"

        if verbose_level == 1:
            print("MODEL_START")
        elif verbose_level >= 2:
            print("Building model...")

        old = False
        if os.path.isfile(old_pb_path) and os.path.isfile(
                old_meta_path) and os.path.isfile(old_options_path):
            with open(old_options_path) as f:
                old_options = json.load(f)
            if old_options == options:
                if verbose_level >= 2:
                    print("Loading model from identical model in storage")
                options = argHandler()
                options.gpu = old_options['gpu']
                options.threshold = old_options['threshold']
                options.model = old_options['model']
                options.load = old_options['load']
                options.pbLoad = old_pb_path
                options.metaLoad = old_meta_path
                old = True

        self.tfnet = TFNet(options)
        if not old:
            print("Saving model in cache")
            with open("built_graph/options.json", "w+") as f:
                json.dump(options, f)
            self.tfnet.savepb()

        if verbose_level == 1:
            print("MODEL_END")
        elif verbose_level >= 2:
            print("Model finished building")

        os.chdir(prev_dir)
        return self
Esempio n. 6
0
class ObjectDetect:
    # number of frames before old car coordinates is disposed (used for counting)
    time_threshold = 2
    # labels to be detected (if uninitialised, detect all objects)
    labels = None
    # video frame count
    frame_count = 0

    def __init__(self, detection_threshold, roi, count_switch,
                 counting_line_vertical):
        self.detection_threshold = detection_threshold
        self.roi = roi
        self.count_switch = count_switch
        self.counting_line_vertical = counting_line_vertical
        self.make_temp_path()

    # def __del__(self):
    #     shutil.rmtree(self.path)

    def init_options(self, model_dir, weights_dir, threshold, gpu):
        self.options = {
            "model": model_dir,
            "load": weights_dir,
            "threshold": threshold,
            "gpu": gpu
        }

        if os.path.exists("./built_graph") and os.path.isdir("./built_graph"):
            shutil.rmtree("./built_graph")
        self.init_model()

    def init_model(self):
        if (os.path.isfile("./built_graph/yolo.pb")
                & os.path.isfile("./built_graph/yolo.meta")):
            # if already saved, load from existing
            FLAGS = argHandler()
            FLAGS.setDefaults()
            FLAGS.pbLoad = "built_graph/yolo.pb"
            FLAGS.metaLoad = "built_graph/yolo.meta"
            self.tfnet = TFNet(FLAGS)
        else:
            self.tfnet = TFNet(self.options)
            self.tfnet.savepb()

    def init_roi(self, botleft, topleft, topright, botright):
        self.roi_pts = [botleft, topleft, topright, botright]

    def set_label(self, labels):
        self.labels = labels

    # get image with region of interest applied for further processing
    def get_roi(self, imgcv):
        roi_pts = self.roi_pts
        pts = np.array([roi_pts[0], roi_pts[1], roi_pts[2], roi_pts[3]],
                       np.int32)
        roi = pts.reshape((-1, 1, 2))
        cv2.polylines(imgcv, [roi], True, (0, 255, 255))

        mask = np.zeros_like(imgcv)
        cv2.drawContours(mask, [roi], -1, (255, 255, 255), -1, cv2.LINE_AA)
        masked = cv2.bitwise_and(imgcv, mask)
        return masked

    # draw region of interest for final result
    def draw_roi(self, imgcv, line):
        roi_pts = self.roi_pts
        pts = np.array([roi_pts[0], roi_pts[1], roi_pts[2], roi_pts[3]],
                       np.int32)
        cv2.polylines(imgcv, [pts], True, (0, 255, 255))
        if not (line is None):
            cv2.line(imgcv, (int(line[0][0]), int(line[0][1])),
                     (int(line[1][0]), int(line[1][1])), (0, 255, 0))
        return imgcv

    # yolo at work - return coordinates of objects per frame
    def process_frame(self, frame):
        print("FRAME_INDEX:" + str(self.frame_count))
        sys.stdout.flush()
        result = self.tfnet.return_predict(frame)
        # print(result)
        self.frame_count += 1
        return result

    # remove overlapping bounding boxes according to detection_threshold
    def remove_overlap(self, img, coord):
        if (len(coord) > 1):
            pointB = coord[0]
            for item in coord[1:]:
                pointA = pointB
                pointB = item
                # detection threshold = batas selisih pixel yang dibutuhkan sehingga 2 box dianggap mendeteksi hal yang sama
                if (point_calculate.boxDistance(pointA[0], pointA[1],
                                                pointB[0], pointB[1]) <
                        self.detection_threshold):
                    # remove box with lower confidence if distance between 2 boxes is less than threshold
                    if pointA[5] > pointB[5]:
                        coord.remove(pointB)
                        pointB = pointA
                    else:
                        coord.remove(pointA)
        return coord

    # draw bounding box and label
    def draw_bb(self, imgcv, coord):
        h, w, _ = imgcv.shape
        for coordinate in coord:
            cv2.rectangle(imgcv, (coordinate[0], coordinate[1]),
                          (coordinate[2], coordinate[3]), 255, 3)
            cv2.putText(imgcv, coordinate[4],
                        (coordinate[0], coordinate[1] - 12), 0, 2e-3 * h, 255,
                        1)

        return imgcv

    def car_count(self, img, coord, old_cars, count, line, is_vertical,
                  frame_index):
        for oc in old_cars:
            if frame_index - oc[6] > self.time_threshold:
                old_cars.remove(oc)

        new_cars = []

        for nc in coord:
            # collision with counting line
            if point_calculate.collision(nc[0], nc[1], nc[2], nc[3], line,
                                         self.counting_line_vertical):
                # if point_calculate.collision_debug(nc[0], nc[1], nc[2], nc[3], line, self.counting_line_vertical, img, frame_index):
                new_cars.append(nc)

                unique_car = True
                for oc in old_cars:
                    # tl.x, tl.y, br.x, br.y
                    oc_point = ((oc[0] + oc[2]) / 2, (oc[1] + oc[3]) / 2)
                    nc_point = ((nc[0] + nc[2]) / 2, (nc[1] + nc[3]) / 2)

                    if self.counting_line_vertical:
                        car_size = oc[3] - oc[1]
                    else:
                        car_size = oc[2] - oc[0]

                    if point_calculate.boxDistance(
                            oc_point[0], oc_point[1], nc_point[0],
                            nc_point[1]) < (car_size // 3 *
                                            (frame_index - oc[6])):
                        # consider same car
                        unique_car = False
                        old_cars.remove(oc)
                        break

                if unique_car:
                    count += 1

        return new_cars + old_cars, count

    def process_coords(self, img, result, frame_index):
        coord = []

        for bbox in result:
            left, top = bbox['topleft']['x'], bbox['topleft']['y']
            right, bot = bbox['bottomright']['x'], bbox['bottomright']['y']
            label = bbox['label']
            conf = bbox['confidence']

            if point_calculate.isNormalBlobSize(left, top, right, bot):
                if (self.labels is None) or (label in self.labels):
                    coord.append(
                        (left, top, right, bot, label, conf, frame_index))

        coord = self.remove_overlap(img, coord)

        return coord

    def get_frame(self, video_dir):
        cap = cv2.VideoCapture(video_dir)

        if (cap.isOpened()):
            ret, frame = cap.read()
        else:
            ret = False

        while ret:
            if (self.roi):
                yield self.get_roi(frame)
            else:
                yield frame
            ret, frame = cap.read()

        cap.release()

    async def video_detect(self, video_dir):
        if self.roi:
            roi_pts = self.roi_pts
        self.frame_count = 0

        cap = cv2.VideoCapture(video_dir)
        length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        cap.release()
        self.frames = length
        print("FRAMES:" + str(length))

        # e = ProcessPoolExecutor(max_workers = 2)
        # coords = await asyncio.gather(*(self.loop.run_in_executor(e, self.process_frame, frame) for frame in self.get_frame(video_dir)))
        coords = [
            self.process_frame(frame) for frame in self.get_frame(video_dir)
        ]

        cap = cv2.VideoCapture(video_dir)
        width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        # if self.count_switch:
        # fourcc = 0x00000021
        # else:
        fourcc = cv2.VideoWriter_fourcc(*'H264')
        result_dir = os.path.join(self.path, "out_video.mp4")
        # result_dir = "./temp_results/out_video.avi"
        out = cv2.VideoWriter(result_dir, fourcc, 30.0,
                              (int(cap.get(3)), int(cap.get(4))))

        print("Coords: " + str(len(coords)))

        cars = []
        count = 0
        self.resultsForJSON = []
        self.objects_json = []
        self.object_count = 0
        # summary of number of object labels
        self.object_labels = set()

        print("WRITE_START")

        for i in range(length):
            ret, frame = cap.read()
            coord = self.process_coords(frame, coords[i], i)

            # car count
            if self.count_switch:
                botleft, topleft, topright, botright = roi_pts[0], roi_pts[
                    1], roi_pts[2], roi_pts[3]
                if self.counting_line_vertical:
                    point_left = ((botleft[0] + topleft[0]) / 2,
                                  (botleft[1] + topleft[1]) / 2)
                    point_right = ((botright[0] + topright[0]) / 2,
                                   (botright[1] + topright[1]) / 2)
                    count_line = (point_left, point_right)
                else:
                    point_up = ((topleft[0] + topright[0]) / 2,
                                (topleft[1] + topright[1]) / 2)
                    point_down = ((topright[0] + botright[0]) / 2,
                                  (botleft[1] + botright[1]) / 2)
                    count_line = (point_up, point_down)
                cars, count = self.car_count(frame, coord, cars, count,
                                             count_line,
                                             self.counting_line_vertical, i)
                # cv2.putText(
                #     frame,
                #     'Detected Vehicles: ' + str(count),
                #     (10, 35),
                #     cv2.FONT_HERSHEY_SIMPLEX,
                #     0.8,
                #     (0, 0xFF, 0xFF),
                #     2,
                #     cv2.FONT_HERSHEY_SIMPLEX,
                # )
            else:
                count = None

            # append processed coordinate to json
            self.append_to_frame_json(coord)

            # drawing roi and bounding box
            if self.roi:
                if not self.count_switch:
                    count_line = None
                frame = self.draw_roi(frame, count_line)
            frame = self.draw_bb(frame, coord)

            # write to video
            # cv2.imwrite(os.path.join(self.path, "out_" + str(i) + ".jpg"), frame)
            # cv2.imwrite("./temp_results/out_" + str(i) + ".jpg", frame)
            out.write(frame)

        # write json to results dir
        self.append_to_json(count)
        self.write_to_json(self.resultsForJSON)

        cap.release()

        print("WRITE_END")
        # show result (comment later)
        # cap2 = cv2.VideoCapture(result_dir)
        # cv2.namedWindow("Result", cv2.WINDOW_AUTOSIZE)
        # while(cap2.isOpened()):
        #     ret, frame = cap2.read()
        #     if ret == True:
        #         cv2.imshow('Frame', frame)
        #         if cv2.waitKey(25) & 0xFF == ord('q'):
        #             break
        #     else:
        #         break
        # cap2.release()
        # cv2.destroyAllWindows()

    async def image_detect(self, img_dir):
        img = cv2.imread(img_dir, cv2.IMREAD_COLOR)

        if (self.roi):
            proc_img = self.get_roi(img)
        else:
            proc_img = img

        coords = self.process_frame(proc_img)
        bbox = self.process_coords(img, coords, 0)
        img = self.draw_bb(img, bbox)

        result_dir = os.path.join(self.path, "out_image.jpg")
        cv2.imwrite(result_dir, img)
        # cv2.imwrite("./temp_results/out_" + img_dir, img)

        # show result (comment later)
        # cv2.imshow('Result', img)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

    def make_temp_path(self):
        # self.path = tempfile.TemporaryDirectory().name

        # self.path = tempfile.mkdtemp()
        # print(self.path)

        self.path = "./.ylab/"
        directory = os.path.dirname(self.path)
        if not os.path.exists(directory):
            os.makedirs(directory)

    def set_roi(self, roi_switch):
        self.roi = roi_switch

    def append_to_frame_json(self, coord):
        self.object_count += len(coord)
        for box in coord:
            left, top, right, bot, label, conf = box[0], box[1], box[2], box[
                3], box[4], box[5]
            self.object_labels.add(label)

            self.objects_json.append({
                "label": label,
                "confidence": float('%.2f' % conf),
                "topleft": {
                    "x": left,
                    "y": top
                },
                "bottomright": {
                    "x": right,
                    "y": bot
                }
            })

    def append_to_json(self, car_count):
        if self.count_switch:
            type_str = "traffic"
        else:
            type_str = "default"

        self.resultsForJSON = {
            "objects": list(self.object_labels),
            "frames": self.objects_json,
            "count_per_frame":
            float('%.2f' % (self.object_count / self.frames)),
            "type": type_str,
            "car_count": car_count
        }

    def write_to_json(self, data):
        result_dir = os.path.join(self.path, "data.json")
        # result_dir = "./temp_results/data.json"
        with open(result_dir, 'w') as outfile:
            json.dump(data, outfile)
Esempio n. 7
0
from darkflow.net.build import TFNet

if __name__ == '__main__':

    options = {
        "model": "/media/mensa/Data/Task/EgyALPR/darkflow/cfg/yolo.cfg",
        "load": "/media/mensa/Data/Task/EgyALPR/darkflow/bin/yolo.weights",
        "batch": 8,
        "epoch": 100,
        "gpu": 0.9,
        "train": True,
        "annotation": "/media/mensa/Data/Task/EgyALPR/training_dataset/xml",
        "dataset": "/media/mensa/Data/Task/EgyALPR/training_dataset/images"
    }
    tf_net = TFNet(options)
    tf_net.train()
    tf_net.savepb()
Esempio n. 8
0
import matplotlib.pyplot as plt
import numpy as np

from darkflow.net.build import TFNet
import cv2

# yolov2-new was created by modifying yolov2-new.cfg as per instructions in darkflow repo

options = {
    "model": "cfg/yolov2-voc2012.cfg",
    "load": "bin/yolov2.weights",
    "epoch": 2,
    "train": True,
    "annotation": "./VOCtrainval_11-May-2012/VOCdevkit/VOC2012/Annotations/",
    "dataset": "./VOCtrainval_11-May-2012/VOCdevkit/VOC2012/JPEGImages/",
    "labels": "labels-voc2012.txt"
}

tfnet = TFNet(options)

tfnet.train()  # creates automatically a ckpt folder containing the checkpoint

tfnet.savepb()  # creates the built_graph folder