Exemplo n.º 1
0
class MyDarkflow:
    def __init__(self):
        self.confidence = 0.4

        self.option = {
            'model': 'cfg/yolo.cfg',
            'load': 'bin/yolo.weights',
            'threshold': self.confidence,
            'gpu': 0.7
        }

        self.tfnet = TFNet(self.option)
        self.colors = {
            'car': (238, 23, 23),
            'truck': (0, 255, 21),
            'bus': (3, 0, 255),
            'person': (0, 255, 243)
        }

    def highlight_vehicles(self, img):
        results = self.tfnet.return_predict(img)
        for result in results:
            #Pega posição e tipo do veículo
            tl = (result['topleft']['x'], result['topleft']['y'])
            br = (result['bottomright']['x'], result['bottomright']['y'])
            label = result['label']

            #Dá cor à label
            if label not in self.colors:
                self.colors[label] = 200 * np.random.rand(3)

            #Desenha quadrado em volta do veículo
            img = cv2.rectangle(img, tl, br, self.colors[label], 3)
            img = cv2.putText(img, label, tl, cv2.FONT_HERSHEY_COMPLEX, 1,
                              (0, 0, 0), 2)
        return (img, len(results))
Exemplo n.º 2
0
#options = {"model": "cfg/yolo-face.cfg", "load": "weight/yolo-face_final.weights", "threshold": 0.1, "gpu": 1.0}
options = {
    "model": "cfg/yolo-face.cfg",
    "load": "weight/yolo-face_4000.weights",
    "threshold": 0.1,
    "gpu": 1.0
}
tfnet = TFNet(options)
count = 0
tracker = cv2.TrackerMIL_create()
cam = cv2.VideoCapture(0)

while True:
    _, camcv = cam.read()
    camcv = cv2.resize(camcv, (448, 448))
    result = tfnet.return_predict(camcv)
    num_people = len(result)
    for i in range(num_people):
        #print(result[i]['topleft'])
        top_left_x = result[i]['topleft']['x']
        top_left_y = result[i]['topleft']['y']
        bottom_right_x = result[i]['bottomright']['x']
        bottom_right_y = result[i]['bottomright']['y']

        if count == 0 and top_left_x != 0:
            count += 1
            bbox = (top_left_x, top_left_y, bottom_right_x, bottom_right_y)
            ok = tracker.init(camcv, bbox)
        elif count == 0:
            continue
        else:
Exemplo n.º 3
0
def predict(videoPath):

    if os.path.exists("yolo.csv"):
        os.remove("yolo.csv")
    with open('yolo.csv', 'a') as writeFile:
        writer = csv.writer(writeFile)
        writer.writerow(['FrameNumber', 'PredictionString'])
        writeFile.close()

    options = {"model": "cfg/455.cfg", "load": 35250, "threshold": 0.4}
    frameNum = 0
    tfnet = TFNet(options)

    skip = False
    fileDir = os.listdir(os.getcwd() + "/frames")
    for filename in fileDir:

        img = cv2.imread(os.getcwd() + "/frames/" + filename, cv2.IMREAD_COLOR)
        #print(os.getcwd() + "/frames/" + filename)
        while img is None:
            print("image is none")
            img = cv2.imread(os.getcwd() + "/frames/" + filename,
                             cv2.IMREAD_COLOR)
            time.sleep(2)
            if img is None:
                skip = True
                os.remove(os.getcwd() + "/frames/" + filename)
                break
        saved_img = img
        if not skip:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            print("not skipping")
            # use YOLO to predict the image
            result = tfnet.return_predict(img)

            #print("result equals " + str(result))
            img.shape

            id = 0
            csvString = ""
            for i in range(0, len(result)):
                tl = (result[i]['topleft']['x'], result[i]['topleft']['y'])
                br = (result[i]['bottomright']['x'],
                      result[i]['bottomright']['y'])
                label = result[i]['label']
                #print(result[i])

                id = id + 1
                csvString += result[i]['label'] + " " + str(result[i]['confidence']) + " " + str(result[i]['bottomright']['x']) + " " \
                    + str(result[i]['bottomright']['x']) + " " + str(result[i]['topleft']['y']) + " " + str(result[i]['bottomright']['y']) + " "
                saved_img = cv2.rectangle(saved_img, tl, br, (0, 255, 0), 7)
                saved_img = cv2.putText(saved_img,
                                        str(id) + " " + label, tl,
                                        cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0),
                                        2)

            cv2.imwrite(os.getcwd() + "/output-images/" + filename, saved_img)
            with open('yolo.csv', 'a') as writeFile:
                writer = csv.writer(writeFile)
                writer.writerow([frameNum, csvString])
                frameNum = frameNum + 1
                csvString = ""
                writeFile.close()

            print("Finished processing image " + str(frameNum) + "/" +
                  str(len(fileDir)))

        skip = False
    print("Making video")
    img_array = []
    for index in range(0, len(os.listdir(os.getcwd() + "/output-images/"))):
        print("sorted file name : " + str(img))
        img = cv2.imread(os.getcwd() + "/output-images/" + str(index) + ".jpg",
                         cv2.IMREAD_COLOR)
        height, width, layers = img.shape
        size = (width, height)
        img_array.append(img)

    print(len(img_array))
    out = cv2.VideoWriter(
        os.getcwd() + "/" + videoPath.split("/")[-1].split(".")[0] +
        "-yolo.avi", cv2.VideoWriter_fourcc(*'DIVX'), 15, size)

    for i in range(len(img_array)):
        out.write(img_array[i])

    out.release()
    return json.dumps({
        'csvpath':
        os.getcwd() + "/yolo.csv",
        'videopath':
        os.getcwd() + "/" + videoPath.split("/")[-1].split(".")[0] +
        "-yolo.avi"
    })
Exemplo n.º 4
0
while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    frame = preprocess(frame)
    start_time = datetime.datetime.now()

    if is_target_exist:
        # ok, bbox = tracker.update(frame)
        bbox = siamFC.track(frame)
        # Draw bounding box
        p1 = (int(bbox[0]), int(bbox[1]))
        p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
        cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)

    else:
        results = tfnet.return_predict(frame) if not is_target_exist else []
        is_contain_person = False
        maximum_index = 0
        maximum_confidence = -1
        for i, res in enumerate(results):
            if res['label'] == 'person':
                is_contain_person = True
                if res['confidence'] > maximum_confidence:
                    maximum_confidence = res['confidence']
                    maximum_index = i

        if is_contain_person:
            res = results[maximum_index]
            bbox = (res['topleft']['x'], res['topleft']['y'],
                    res['bottomright']['x'] - res['topleft']['x'],
                    res['bottomright']['y'] - res['topleft']['y'])