Ejemplo n.º 1
0
def main(args):
    det = Detector(model_path=args.model_path,
                   input_size=args.input_size,
                   num_classes=args.num_classes,
                   threshold=args.threshold)

    if args.inputs.endswith('.mp4'):
        cap = cv2.VideoCapture(args.inputs)
        while True:
            ret, img = cap.read()
            if not ret: break
            results = det.detect(img)
            draw(img, results)
            cv2.imshow('', img)
            cv2.waitKey(1)
    elif os.path.isdir(args.inputs):
        paths = glob.glob(os.path.join(args.inputs, '*'))
        for path in paths:
            img = cv2.imread(path)
            results = det.detect(img)
            draw(img, results)
            cv2.imshow('', img)
            cv2.waitKey(0)
    else:
        img = cv2.imread(args.inputs)
        results = det.detect(img)
        draw(img, results)
        cv2.imshow('', img)
        cv2.waitKey(0)
Ejemplo n.º 2
0
def main(args):

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    image_path = args.image_path
    is_local_weights = args.is_local_weights
    weights_base_path = args.weights_base_path
    show_face = args.show_face
    align_torch = args.align_torch
    arch = args.arch

    # detect faces from image
    image = cv2.imread(image_path)

    det = Detector()
    boxes, landmarks = det.detect(image)
    print(f"Found faces: {boxes.shape[0]}")

    if boxes.shape[0] < 1:
        print("Faces not found")
        sys.exit(0)

    if align_torch:
        faces_aligned, _ = align_face_torch_batch(image, landmarks, boxes,
                                                  device)
    else:
        faces_aligned, _ = align_face_np(image, landmarks, boxes)

    if show_face:
        # show detected face
        idx = 0
        x_tl, y_tl, x_br, y_br = boxes[idx, 0], boxes[idx,
                                                      1], boxes[idx,
                                                                2], boxes[idx,
                                                                          3]
        face = image[y_tl:y_br, x_tl:x_br, :]
        cv2.imshow("Detected face", face)

        if align_torch:
            face_aln = faces_aligned.cpu().numpy()[
                idx, :, :, :].squeeze().copy()
            print(face_aln.min())
            print(face_aln.max())
            face_aln = (face_aln * 255).astype(np.uint8)
            face_aln = face_aln.transpose(1, 2, 0)
        else:
            face_aln = faces_aligned[idx].squeeze().copy().astype(np.uint8)

        cv2.imshow("Aligned face", face_aln)
        cv2.waitKey(0)

    # create embedder and get features
    embedder = Embedder(is_local_weights, arch, weights_base_path)

    features = embedder.get_features(faces_aligned)
    # print(features[idx, :])

    print("Features calculation finished. ")
    print(f"Features shape: {features.shape}")
Ejemplo n.º 3
0
def main():
    for fileName in os.listdir(DEMO_FOLDER):
        os.unlink(os.path.join(DEMO_FOLDER, fileName))

    # for folder in ['tmp/hi_koov_archive/_background_noise_', 'tmp/hi_koov_archive/unknown', 'tmp/hi_koov_archive/hi_koov', 'tmp/hi_koov_demo/hi_koov']:
    #     for the_file in os.listdir(folder):
    #         file_path = os.path.join(folder, the_file)
    #         try:
    #             if os.path.isfile(file_path):
    #                 os.unlink(file_path)
    #             # elif os.path.isdir(file_path): shutil.rmtree(file_path)
    #         except Exception as e:
    #             print(e)

    d = Detector()

    # i = 1

    while True:
        # filePath = os.path.join(
        #     'tmp/hi_koov_demo/hi_koov/', 'voice-{}.wav'.format(i))
        # record_test_voice(filePath)
        record_test_voice(AUDIO_SAVE_PATH)

        t1 = time.time()

        result = d.evaluate()

        t2 = time.time()

        print('=======================================================')

        if result == 2:
            print('Hi KOOV!\t- Inference time: {}s'.format(round(t2 - t1, 2)))
        else:
            print('Unknown\t- Inference time: {}s'.format(round(t2 - t1, 2)))

        # if result == 2:
        #     print('Hi KOOV!')
        #     shutil.move(filePath, os.path.join('tmp/hi_koov_archive/hi_koov'))
        # elif result == 1:
        #     print('Unknown')
        #     shutil.move(filePath, os.path.join('tmp/hi_koov_archive/unknown'))
        # else:
        #     print('Background')
        #     shutil.move(filePath, os.path.join(
        #         'tmp/hi_koov_archive/_background_noise_'))

        print('=======================================================')

        # i += 1

        time.sleep(2)
Ejemplo n.º 4
0
    def __init__(self, cam_id):
        self.id = cam_id
        self.log = "[{}] ".format(self.id)

        self.lock = Lock()

        self.detector = Detector()
        self.tracker = Tracker(skipped_th=5)
        self.warper = Warper()

        self.suspicious_regions = None
        logger.debug(self.log + "New SpotMonitor instance created")
Ejemplo n.º 5
0
def on_mouse_click(event, x, y, flags, frame):
    if event == cv2.EVENT_LBUTTONUP:
        global color, det
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        print(hsv[y - 2:y + 2, x - 2:x + 2, 0])
        # find average of 5 by 5 square around click
        h = np.median(hsv[y - 2:y + 2, x - 2:x + 2, 0])
        s = np.median(hsv[y - 2:y + 2, x - 2:x + 2, 1])
        v = np.median(hsv[y - 2:y + 2, x - 2:x + 2, 2])
        print(h, s, v)
        color = (h, s, v)
        det = Detector("color", color)
Ejemplo n.º 6
0
class SpotMonitor:
    def __init__(self, cam_id):
        self.id = cam_id
        self.log = "[{}] ".format(self.id)

        self.lock = Lock()

        self.detector = Detector()
        self.tracker = Tracker(skipped_th=5)
        self.warper = Warper()

        self.suspicious_regions = None
        logger.debug(self.log + "New SpotMonitor instance created")

    def process(self, im):
        with self.lock:
            filtered_centers, cntrs = self.detector.detect(im=im)
            logger.debug(self.log + "Centers: {} Contours: {}".format(
                len(filtered_centers), len(cntrs)))

            nparr = np.frombuffer(im, np.uint8)
            im = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

            tracks, susp_reg = self.tracker.track(centers=filtered_centers,
                                                  cntrs=cntrs)

            logger.debug(self.log +
                         "Suspicious region: {} ".format(len(susp_reg)))

            bbox_warped = self.warper.warp_image(
                img_shape=[im.shape[0], im.shape[1]],
                coordinates=susp_reg,
                camera_id=1)

            logger.debug(
                self.log +
                "Warped suspicious region: {} ".format(len(bbox_warped)))

            susp_reg = [i.tolist() for i in susp_reg]
            bbox_warped = [i.tolist() for i in bbox_warped]

            self.suspicious_regions = {
                'suspicious': susp_reg,
                "mapped": bbox_warped
            }
Ejemplo n.º 7
0
import PyQt5
from utils.driver import Driver
from utils.neuron import Neuron
from utils.detector import Detector
from utils.player import Player

import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import time
import keyboard
from brian2 import NeuronGroup, ms

# initialise classes
det = Detector("hand")
plr = Player()
drv = Driver()
time.sleep(0.1)
drv.lowerTurn()

## Comment to remove plottingqq
fig, ax = plt.subplots()
ax.set_ylim([-0.08, 0.07])
line1, = ax.plot(np.linspace(0, 10000, 10000), np.zeros(10000), 'r-')
line2, = ax.plot(np.linspace(0, 10000, 10000), np.zeros(10000), 'b-')
line3, = ax.plot(np.linspace(0, 10000, 10000), np.zeros(10000), 'g-')
plt.show(block=False)
fig.canvas.draw()
## /Comment to remove plotting

# Izhikevitch Neuron Model from http://brian2.readthedocs.io/en/stable/introduction/brian1_to_2/library.html
Ejemplo n.º 8
0
Archivo: run.py Proyecto: navin20/SP1-1
def main():
    data = []
    # stream = "rtsp://*****:*****@168.120.33.119"
    # dateday = datetime.date.today()
    date = datetime.datetime.now()
    start = date.minute
    # ToD = ""

    keys = ["in", "out", "total", "morning", "afternoon", "date", "ToD", "trackers"]
    record = {key: 0 for key in keys}
    ENTRY_LINES = [((0, 200, 640, 200), (0, 230, 640, 230), [1, 2])]
    entry_boundaries = [Boundary(boundary[0], boundary[1], sequence=boundary[2]) for boundary in ENTRY_LINES]  
    tracker = PeopleTracker(entry_boundaries, entries=record["in"], exits=record["out"], count=record["total"])

    detector = Detector("model/frozen_inference_graph.pb")
    
    # streamer = FileVideoStream(stream).start()
    streamer = WebcamVideoStream("3.mp4").start()
    # streamer =  RTSPVideoFeed(stream)
    # streamer.open()

    file_name = 'output/' + str(date.day)  + '-' + str(date.minute) + '.avi'
    file_path = os.path.join(os.getcwd(), file_name)
    writer = cv2.VideoWriter(file_path, cv2.VideoWriter_fourcc(*'XVID'), 30.0, (640, 360))
    COLORS = np.random.uniform(0, 255, (100, 3))
 

    #Run people counting
    while True:
        # Working hours
        # if date.hour >= 9 and date.hour <= 20:00:
        #   run
        # else :
        #   stop

        try: 
            ret, frame = streamer.read()
            if ret:
                # timeofday = datetime.datetime.now().strftime("%H:%M:%S")
                # if timeofday < "12:00:00":
                #     ToD = "Morning"
                # elif timeofday >= "12:00:00":
                #     ToD = "Afternoon"

                frame = imutils.resize(frame, width=640)
                points = detector.detect(frame, (20, 40), (100, 200), threshold=0.4)
                tracker.update(points, update_type='distance')
                tracker.check()
                data = tracker.get_data(type= 'dict')
                
                coords = tracker.get_tracker_dictionary()
                # print(coords)
                if coords:
                    for k, v in coords.items():
                        print(k, v)
                        cv2.circle(frame, (v['center'][0], v['center'][1]), 4, (0, 255, 0), -1)
                        cv2.rectangle(frame, (v['coord']),
                            (0, 255, 0), 2)
                # for c in coords:
                #     

                for bound in entry_boundaries:
                    points  = bound.get_lines()
                    for point in points:
                        cv2.line(frame, point[0], point[1], (255, 0, 0), 2)


                info = [
                    ("Out", data['out']),
                    ("In", data['in']),
                ]

                for (i, (k, v)) in enumerate(info):
                    text = "{}: {}".format(k, v)
                    cv2.putText(frame, text, (24, 360 - ((i * 20) + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
                
                writer.write(frame)
                cv2.imshow('Frame', frame)
                end = datetime.datetime.now().minute

                if (end - start == 1):
                    row = [str(data['date']), str(data['morning']), str(data['afternoon']), str(data['in'])]
                    update_csv(row)
                    start = datetime.datetime.now().minute
                    


                # Reset everyday
                # if dateday == datetime.date.today() - datetime.timedelta(days=1):
                #     main()
                #     keys = ["in", "out", "total", "morning", "afternoon", "date", "ToD", "trackers"]
                #     record = {key: 0 for key in keys}
                #     ENTRY_LINES = [((0, 200, 640, 200), (0, 230, 640, 230), [1, 2])]
                #     entry_boundaries = [Boundary(boundary[0], boundary[1], sequence=boundary[2]) for boundary in ENTRY_LINES]  
                #     tracker = PeopleTracker(entry_boundaries, entries=record["in"], exits=record["out"], count=record["total"])
                #     dateday = datetime.date.today()

                if cv2.waitKey(1) == ord('q'):
                    cv2.destroyAllWindows()
                    streamer.release()
                    writer.release()
                    break
            else:
                print("End of video")
                cv2.destroyAllWindows()
                streamer.release()
                writer.release()
                break
        except Exception:
            import logging
            logging.exception('Oops: error occurred')
            streamer.release()
            writer.release()
            sys.exit(1)
            return
            
    streamer.release()
    writer.release()
    return
Ejemplo n.º 9
0
from utils.player import Player
from utils.driver import Driver
from utils.detector import Detector
import keyboard
import time

# initialize videoplayer, car driver, and cup detector
plr = Player()
drv = Driver()
det = Detector("hand")

while True:
    frame = plr.read()
    p0, p1, p2 = det.predict(frame)

    print("------------")
    print(p0, "|", p1, "|", p2)
    print("------------")

    # if a hand is likely >50% on screen, engage movement
    if max(p0, p1, p2) > 0.5:
        if p2 > p1 and p2 > p0:
            drv.right()
            time.sleep(0.1)
            drv.stop()
        elif p0 > p1 and p0 > p2:
            drv.left()
            time.sleep(0.1)
            drv.stop()
        else:
            drv.forward()
Ejemplo n.º 10
0
# 侦测
from utils.detector import Detector
import cv2
import time
from PIL import Image, ImageDraw

if __name__ == '__main__':
    # 用摄像头框人脸
    detector = Detector()
    cap = cv2.VideoCapture(0)

    while cap.isOpened():
        ret, frame = cap.read()
        if ret:
            start_time = time.time()
            frame = cv2.bilateralFilter(frame, 10, 30, 30)
            frames = frame[:, :, ::-1]

            image = Image.fromarray(frames, 'RGB')
            imDraw = ImageDraw.Draw(image)
            boxes = detector.detect(image)
            len = 0.2
            for box in boxes:  # 多个框,没循环一次框一个人脸
                x1 = int(box[0])
                y1 = int(box[1])
                x2 = int(box[2])
                y2 = int(box[3])

                px1 = int(box[5])
                py1 = int(box[6])
                px2 = int(box[7])
Ejemplo n.º 11
0
# always sort import statements by length
import re
import cv2
import time
import keyboard
import numpy as np
from utils.player import Player
from utils.driver import Driver
from utils.detector import Detector

# initialize player, driver, and detector
plr = Player()
drv = Driver()
det = Detector("color", "cyan")

path = []
count = 0
while True:
    count += 1
    if count > 30:  # keep tcp from dying
        drv.keepAwake()

    frame = plr.read()
    # p = size of contour, 0,1,2 is where in the screen its center is
    p0, p1, p2 = det.predict(frame)
    print(p0, p1, p2)
    if max(p0, p1,
           p2) > 100:  # if contour size is greater than 100 pixels squared
        if p2 > p1:
            drv.right()
            time.sleep(0.1)
Ejemplo n.º 12
0
from utils.detector import Detector

det = Detector("hand")
Ejemplo n.º 13
0
from utils.player import Player
from utils.driver import Driver
from utils.detector import Detector

# initialize videoplayer, car driver, and cup detector
plr = Player()
drv = Driver()
det = Detector("cup")

while True:
    ret, frame = plr.read()
    p0, p1, p2 = det.predict(frame)

    print("------------")
    print(p0, "|", p1, "|", p2)
    print("------------")

    # if a cup is likely >50% on screen, engage movement
    if max(p0, p1, p2) > 0.5:
        if p2 > p1 and p2 > p0:
            drv.right()
        elif p0 > p1 and p0 > p2:
            drv.left()
        else:
            drv.forward()

# remember to close the streams
drv.close()
plr.close()
Ejemplo n.º 14
0
def main(args):
    det = Detector(model_path=args.model_path,
                   input_size=args.input_size,
                   num_classes=args.num_classes,
                   threshold=0.0)

    img_paths = glob.glob(os.path.join(args.image_dir, '*'))
    predict_labels = []
    true_labels = []

    for img_path in tqdm.tqdm(img_paths):
        label_path = os.path.join(
            args.label_dir,
            os.path.splitext(os.path.basename(img_path))[0] + '.txt')
        if not os.path.exists(label_path):
            continue

        with open(label_path) as f:
            lines = f.read().splitlines()
        true_label = [line.split('\t') for line in lines]
        true_labels.append(true_label)

        img = cv2.imread(img_path)
        h_img, w_img = img.shape[:2]
        results = det.detect(img)

        label = []
        for cls, result in results.items():
            result = sorted(result, key=lambda x: x[0], reverse=True)
            for prob, coord in result:
                xmin, ymin, xmax, ymax = [int(i) for i in coord]
                xmin /= w_img
                ymin /= h_img
                xmax /= w_img
                ymax /= h_img
                label.append([prob, cls, xmin, ymin, xmax, ymax])
        predict_labels.append(label)

    print('data size: {}'.format(len(predict_labels)))

    APs = []
    for clsid in range(args.num_classes):
        APs.append([])
        for iou_threshold in np.arange(0.50, 0.96, 0.05):
            precisions = []
            recalls = []
            for prob_threshold in np.arange(0.0, 1.01, 0.1):
                precision, recall = calc_precision(predict_labels, true_labels,
                                                   clsid, prob_threshold,
                                                   iou_threshold)
                if recall is not None:
                    precisions.append(precision)
                    recalls.append(recall)

            if len(recalls) == 0:
                continue

            step_num = 10
            maximum_precision = np.zeros(step_num + 1)
            for jx in range(len(recalls)):
                v = precisions[jx]
                k = int(recalls[jx] * step_num)  # horizontal axis
                maximum_precision[k] = max(maximum_precision[k], v)

            # intrepolation
            v = 0
            for jx in range(step_num + 1):
                v = max(v, maximum_precision[-jx - 1])
                maximum_precision[-jx - 1] = v

            AP = np.mean(maximum_precision)
            APs[clsid].append(AP)

    mAP50 = []
    mAP5095 = []
    for ix, AP in enumerate(APs):
        if len(AP) == 0:
            continue
        name = get_classes(ix)
        mAP50.append(AP[0])
        mAP5095.append(np.mean(AP))
        print('{} {}: {}'.format(ix, name, AP[0] * 100))

    print('----------')
    print('[email protected]: {}'.format(np.mean(mAP50) * 100))
    print('mAP@[0.5:0.95]: {}'.format(np.mean(mAP5095) * 100))
Ejemplo n.º 15
0
import PyQt5
from utils.driver import Driver
from utils.neuron import Neuron
from utils.detector import Detector
from utils.player import Player

import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import time
import keyboard
from brian2 import NeuronGroup, ms

# initialise classes
# drv = Driver()
det = Detector("color", "orange")
plr = Player()

## Comment to remove plotting
fig, ax = plt.subplots()
ax.set_ylim([-0.08, 0.07])
line1, = ax.plot(np.linspace(0, 10000, 10000), np.zeros(10000), 'r-')
line2, = ax.plot(np.linspace(0, 10000, 10000), np.zeros(10000), 'b-')
line3, = ax.plot(np.linspace(0, 10000, 10000), np.zeros(10000), 'g-')
## /Comment to remove plotting

plt.show(block=False)
fig.canvas.draw()

# Izhikevitch Neuron Model from http://brian2.readthedocs.io/en/stable/introduction/brian1_to_2/library.html
eqs = '''dv/dt = (0.04*active/ms/mV + 0.04/ms/mV)*v**2+(5/ms)*v+130*mV/ms-w + I : volt (unless refractory)
Ejemplo n.º 16
0
def main(args):
    det = Detector(model_path=args.model_path,
                   input_size=args.input_size,
                   num_classes=args.num_classes,
                   use_sfam=args.sfam,
                   threshold=0.05)

    img_paths = glob.glob(os.path.join(args.image_dir, '*'))
    img_paths.sort()

    predict_labels = []
    true_labels = []

    for img_path in tqdm.tqdm(img_paths):
        label_path = os.path.join(
            args.label_dir,
            os.path.splitext(os.path.basename(img_path))[0] + '.txt')
        if not os.path.exists(label_path):
            continue

        with open(label_path) as f:
            lines = f.read().splitlines()

        label = []
        for line in lines:
            elements = line.split('\t')
            class_index = int(elements[0])
            xmin = float(elements[1])
            ymin = float(elements[2])
            xmax = float(elements[3])
            ymax = float(elements[4])
            label.append([class_index, xmin, ymin, xmax, ymax])

        true_labels.append(label)

        img = cv2.imread(img_path)
        h_img, w_img = img.shape[:2]
        results = det.detect(img)

        label = []
        for res in results:
            confidence = res['confidence']
            class_index = get_class_index(res['name'])
            xmin = res['left'] / w_img
            ymin = res['top'] / h_img
            xmax = res['right'] / w_img
            ymax = res['bottom'] / h_img
            label.append([confidence, class_index, xmin, ymin, xmax, ymax])
        predict_labels.append(label)

    print('data size: {}'.format(len(predict_labels)))

    AP = {}
    iou_threshold = 0.50
    for clsid in range(args.num_classes):
        precisions = []
        recalls = []
        for prob_threshold in np.arange(0.0, 1.01, 0.1):
            precision, recall = calc_precision(predict_labels, true_labels,
                                               clsid, prob_threshold,
                                               iou_threshold)
            if recall is not None:
                precisions.append(precision)
                recalls.append(recall)

        if len(recalls) == 0:
            continue

        step_num = 10
        maximum_precision = np.zeros(step_num + 1)
        for jx in range(len(recalls)):
            v = precisions[jx]
            k = int(recalls[jx] * step_num)  # horizontal axis
            maximum_precision[k] = max(maximum_precision[k], v)

        # intrepolation
        v = 0
        for jx in range(step_num + 1):
            v = max(v, maximum_precision[-jx - 1])
            maximum_precision[-jx - 1] = v

        AP[clsid] = np.mean(maximum_precision)

    for class_index, elem in AP.items():
        class_name = get_class_name(class_index)
        print('{} {}: {}'.format(class_index + 1, class_name, elem * 100))

    print('----------')
    print('[email protected]: {}'.format(np.mean([v for i, v in AP.items()]) * 100))