Example #1
0
    def start_camera(self):
        self.start = time.time()
        self.finish = time.time()
        self.connect_server('192.168.1.183', 8989)
        with picamera.PiCamera() as camera:
            try:
                self.camera = camera
                detector = detection.Detection()
                self.reader = SocketReader(self.client_socket)
                self.reader.start()
                print('Start run Thread writer')
                self.pool = [(SocketWriter(self.connection_lock,
                                           self.client_socket, detector))
                             for i in range(1)]
                camera.resolution = (640, 480)
                camera.framerate = 10
                time.sleep(2)
                for writer in enumerate(
                        camera.capture_sequence(self.writers(),
                                                'jpeg',
                                                use_video_port=True)):
                    print(type(writer))
                    # writer.event.set()

            except Exception as e:
                print(e)
                print('Connect to server error')
            finally:
                print('Stop streaming')
                camera.close()
                self.terminal_streaming()
Example #2
0
 def __init__(self, im, width, height, svm, maske):
     cv2.namedWindow("Hallo")
     cv2.setMouseCallback("Hallo", self.mausi)
     self.cam = kamera.kamera("../data/calibration/iscam2.cali", "")
     self.h, self.w, _ = np.shape(im)
     self.width = width
     self.height = height
     self.det = detection.Detection(im, maske)
     self.svm = svm
Example #3
0
    def __init__(self):
        rospy.on_shutdown(self.shutdown)
        rospy.init_node('robocops')

        self.mover = movement.Movement(self)
        self.detector = detection.Detection(self)
        self.scorer = score.Score()
        self.after = aftergettingshot.AfterGettingShot(self)

        self.rate = rospy.Rate(50)
        self.TO_SHOOT_OR_NOT_TO_SHOOT = 15
        self.cool_down = timer() - 15
        self.prev_disabled = False
Example #4
0
def readDetectionsFromFile(filepath, **kwargs):
    """ Read a list of detections from a file 
    
    Parameters:
    -----------
    filepath: path to text file containing comma-delimited bounding box coordinates, one box per line
    **kwargs: additional arguments to pass to the detection constructor
    
    Returns:
    ------------
    detections: list of Detection objects with the bounding boxes specified in the file
    """
    coords = np.loadtxt(filepath, delimiter=',')
    detections = []
    for bbox_flat in coords:
        # convert flat list of coordinates into the 4x2 bbox representation
        xs = bbox_flat[0::2]
        ys = bbox_flat[1::2]
        bbox = np.transpose(np.array([xs, ys]))
        detections.append(detection.Detection(bbox, **kwargs))
    return detections
Example #5
0
def main():
    camID = 0

    det = detection.Detection(0, './cfg/yolov3.cfg', './cfg/yolov3.weights')

    # fetch the selected video
    video_init_path = "videos/cam_00_20190203.mp4"
    # Create a video capture object to read videos
    cap = cv2.VideoCapture(video_init_path)

    # success, frame = cap.read()
    # h,w,c = frame.shape
    # if not success:
    #     print('Failed to read video')
    #     sys.exit(1)

    colors = (randint(64, 255), randint(64, 255), randint(64, 255))
    counter = 0

    while cap.isOpened():

        success, frame = cap.read()
        if not success:
            break
        bbox_list = det.detect_one_frame(frame)

        for i, newbox in enumerate(bbox_list):
            p1 = (int(newbox[0]), int(newbox[1]))
            p2 = (int(newbox[2]), int(newbox[3]))
            cv2.rectangle(frame, p1, p2, colors[0], 2, 1)

        # show frame
        cv2.imwrite(
            os.path.join('./detection_results',
                         str(counter).zfill(5) + '.jpg'), frame)

        counter += 1
Example #6
0
LED_CONTROL = 18

import board
import neopixel
from time import sleep

pixels = neopixel.NeoPixel(board.D18, 1)

pixels[0] = (255, 0, 0)

import detection
det = detection.Detection()
while (1):
    read = det.read_square(0, 0)
    if read:
        pixels[0] = (255, 0, 0)
    else:
        pixels[0] = (0, 0, 255)
    sleep(0.001)

while (1):
    pixels[0] = (255, 0, 0)
    sleep(2)
    pixels[0] = (0, 255, 0)
    sleep(2)
    pixels[0] = (0, 0, 255)
    sleep(2)
    pixels[0] = (255, 255, 255)
    sleep(2)
Example #7
0
# -*- coding:utf-8 -*-
__author__ = 'shichao'

import detection
import reid_api

import cv2
import numpy as np
import glob
import os
import sys
from random import randint

REID = reid_api.ReID(0, './weights/reID/PRID/60/ft_ResNet50')
det = detection.Detection(0, './cfg/yolov3.cfg', './cfg/yolov3.weights')
feature_set = []
previous_bbox = []
target_cam_ID = 0
CAM_SWITCH = False


def compute_iou(rec1, rec2):

    S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
    S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])

    # computing the sum_area
    sum_area = S_rec1 + S_rec2

    left_line = max(rec1[1], rec2[1])
    right_line = min(rec1[3], rec2[3])
Example #8
0
    '''
    print('読み込み済み商品')
    for goods, num in cart_num.items():
        if num != 0:
            print('{} : {}個'.format(goods, num))
        else:
            pass

    #カート内商品の合計金額を出す。
    print('合計金額 : {}\n'.format(sum(amount)))


#if __name__ == __'main'__でここから処理開始
if __name__ == '__main__':
    # クラスがインスタンス化
    detecter1 = detection.Detection()
    claster = classify.predict_class(model='../models/bottle_weight_2.hdf5')

    #初期値
    cart, amount, cart_num = reset()
    cart_loop = True
    pet_dict = {
        'namacha': 140,
        'soda_float': 150,
        'cclemon': 160,
        'fanta_litchi': 170,
        'cocacola': 180
    }

    #音楽の初期設定
    pygame.mixer.quit()
Example #9
0
 def setUp(self) -> None:
     detectionService = detection.Detection()
     self.detectionDataWorker = detection.DetectionDataWorker(
         MagicMock(), detectionService.shared_memory_manager_dict)
Example #10
0
#! /usr/bin/env python

# Ref: http://mirror.umd.edu/roswiki/doc/diamondback/api/tf/html/python/tf_python.html

import time

import rospy
from tf import TransformListener

import detection
import movement

rospy.init_node('test', anonymous=True)
tf = TransformListener()
detector = detection.Detection()
mover = movement.Movement()

while True:
    # raw_input()
    time.sleep(0.2)

    if not detector.detected:
        continue

    pose = detector.detection_data.detections[0].pose
    pose.pose.position.z -= 0.5
    # frame_id = detector.detection_data.detections[0].pose.header.frame_id
    # pos = detector.detection_data.detections[0].pose.pose.position
    # ori = detector.detection_data.detections[0].pose.pose.orientation

    print(str(pose))
Example #11
0
import detection

import sys
sys.path.append("/home/pi/WebCam/")
import cv2

detector = detection.Detection("../../model/detect.tflite",
                               "../../model/coco_labels_list.txt", 1)
out = detection.OutputInfo()

img = cv2.imread("../test.bmp")

img_1 = cv2.resize(img, (300, 300))
img_1 = cv2.cvtColor(img_1, cv2.COLOR_BGR2RGB)

detector.frameDetect(img_1, out)

for n in range(out.numbers):
    ymin = int(out.locations[4 * n] * img.shape[0])
    xmin = int(out.locations[4 * n + 1] * img.shape[1])
    ymax = int(out.locations[4 * n + 2] * img.shape[0])
    xmax = int(out.locations[4 * n + 3] * img.shape[1])
    cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 0), thickness=1)
    cv2.putText(img, out.classes[n], (xmin, ymin - 5),
                cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 0))

cv2.imwrite("posttest.bmp", img)
Example #12
0
def main():
    detection.Detection(0, './detection/cfg/yolov3.cfg',
                        './detection/cfg/yolov3.weights')
    #detection.detect_one_frame()

    print('weight files loaded')
Example #13
0
    cap.release()
    cv2.destroyAllWindows()

    print 'exited.'


# main function
if __name__ == "__main__":
    # initialize font
    font = cv2.FONT_HERSHEY_PLAIN

    # track bars
    trackbarWindowName = 'color range parameter'

    # word detection
    detection = det.Detection()

    #train SVM model
    model = st.trainSVMLight(TRAIN_LETTERS)
    #model.save("model.dat")
    #model = st.SVM()
    #model.load("model.dat")

    # open camera (default is 0)
    #cam=int(raw_input("Enter Camera number: "))
    cam = int(CAMERA_INDEX)
    cap = cv2.VideoCapture(cam)
    cap.set(3, CAMERA_WIDTH)
    cap.set(4, CAMERA_HEIGHT)

    actualCameraWidth = cap.get(3)
Example #14
0
if __name__ == "__main__":
    print(os.getcwd())
    model = '20180402-114759'

    print("Create Session")
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                            log_device_placement=False))

    recognition_threshold = 0.85
    conf_threshold = 0.7
    resize_rate = 0.5

    print("Load Network")
    detection = detection.Detection(sess=sess,
                                    resize_rate=resize_rate,
                                    conf_threshold=conf_threshold)
    recognition = Recognition(sess=sess,
                              recognition_threshold=recognition_threshold,
                              resize_rate=resize_rate,
                              model_name=model,
                              classifier_name="test_2")

    bounding_boxes = match_names = p = []

    video = cv2.VideoCapture("../video/test.mp4")

    print("Start Reading...")
    while True:
        _, img = video.read()
        img = cv2.transpose(img)
Example #15
0
 def toDetection(self, params):
     return detection.Detection(np.reshape(params, (4, 2)))
Example #16
0
def main():
    camID = 0
    #model_path = '/home/user/reID/demo/weights/reID/ft_ResNet50'  #marker
    model_path = '/home/user/reID/demo_eng/weights/reID/PRID/60/ft_ResNet50'  #person-2011

    REID = reid_api.ReID(0, model_path)

    det = detection.Detection(0, './cfg/yolov3.cfg', './cfg/yolov3.weights')

    #load_reID_weight()
    #load_yolo_weight()

    # fetch the selected video
    video_init_path = "videos/cam_00_20190203.mp4"
    # Create a video capture object to read videos
    cap = cv2.VideoCapture(video_init_path)

    # fetch data from multi mp4 files into the video list
    video_all_paths = glob.glob(os.path.join('./videos', '*.mp4'))
    cap_list = []
    for i in range(len(video_all_paths)):
        cap_list.append(
            cv2.VideoCapture(
                os.path.join('videos',
                             'cam_' + str(i).zfill(2) + '_20190203.mp4')))
    # video_seq = [cv2.VideoCapture(video_path) for video_path in video_all_paths]
    # camID = str(os.path.basename(video_init_path).split('_')[1])

    # Read first frame
    success, frame = cap.read()
    h, w, c = frame.shape
    frameID = 1

    # quit if unable to read the video file
    if not success:
        print('Failed to read video')
        sys.exit(1)

    ## Select boxes
    bbox_cv = cv2.selectROI('target ROI', frame)
    cv2.destroyAllWindows()
    #print('the bbox coord is {0} and its length {1}'.format(bbox_cv,len(bbox_cv))
    bbox = list([
        [
            int(bbox_cv[0]),
            int(bbox_cv[1]),
            int(bbox_cv[0] + bbox_cv[2]),
            int(bbox_cv[1] + bbox_cv[3])
        ],
    ])
    print('the bbox coord is {0} and its length {1}'.format(
        bbox_cv, len(bbox_cv)))
    colors = (randint(64, 255), randint(64, 255), randint(64, 255))

    feature_set = []
    #print('bbox given to lyj {0}'.format(bbox))
    #print('the type of bbox {0}'.format(type(bbox)))
    init_feature, match_bbox = REID.rank_feature(bbox,
                                                 frame,
                                                 feature_set,
                                                 init_frame=True)
    feature_set.append(init_feature)
    counter = 0
    while cap.isOpened():

        success, frame = cap.read()
        if not success:
            break
        if counter % 5 == 0:
            bbox_list = det.detect_one_frame(frame)
            #bbox_list = get_bbox(frame)
            #print('feature set {0}'.format(feature_set))
            #print('len of feature set {0}'.format(len(feature_set)))
            match_feature_list, match_bbox_list, confidence_list = REID.rank_feature(
                bbox_list, frame, feature_set, init_frame=False)

            index = confidence_list.index(max(confidence_list))
            match_feature = match_feature_list[index]
            match_bbox = match_bbox_list[index]
            confidence = confidence_list[index]

            feature_set.append(match_feature)
            print('match bbox {0}'.format(match_bbox))
            #for i, newbox in enumerate(match_bbox):
            p1 = (int(match_bbox[0]), int(match_bbox[1]))
            p2 = (int(match_bbox[2]), int(match_bbox[3]))
            cv2.rectangle(frame, p1, p2, colors[0], 2, 1)

            # show frame
            cv2.imwrite(
                os.path.join('./results',
                             str(counter).zfill(5) + '.jpg'), frame)

            if min(p1) < 10 or max(p1) > (w - 10) or min(p2) < 0 or max(p2) > (
                    h - 10):
                #model = model.cuda()
                feature_new = []
                bbox_new = []

                confidence_candidate = []
                bbox_candidate = []
                feature_candidate = []

                confidence_max = 0.6
                # needs multi threads here
                for i in range(1, len(video_all_paths) - 1):
                    cap = cap_list[i]
                    print('the {0} of the videos'.format(i))
                    success, frame = cap.read()
                    if not success:
                        break

                    bbox_list = det.detect_one_frame(frame)
                    match_feature_list, match_bbox_list, confidence_list = REID.rank_feature(
                        bbox_list, frame, feature_set, init_frame=False)

                    print('the confidence list is {0}'.format(confidence_list))
                    index = confidence_list.index(max(confidence_list))
                    match_feature = match_feature_list[index]
                    match_bbox = match_bbox_list[index]
                    confidence = confidence_list[index]

                    # feature_candidate += match_feature
                    # bbox_candidate += match_bbox
                    # confidence_candidate += confidence_list

                    if confidence > confidence_max:
                        confidence_max = confidence
                        feature_new = match_feature
                        bbox_new = match_bbox
                        camID = i

                cap = cap_list[camID]
                feature_set.append(feature_new)
                print(bbox_new)
                p1 = (int(bbox_new[0]), int(bbox_new[1]))
                p2 = (int(bbox_new[2]), int(bbox_new[3]))
                cv2.rectangle(frame, p1, p2, colors[0], 2, 1)
                # show frame
                cv2.imwrite(
                    os.path.join('./results',
                                 str(counter).zfill(5) + '.jpg'), frame)
        counter += 1
Example #17
0
import random
import keras_preprocessing.image
import detection
import numpy as np

face_detector = detection.Detection()
face_detector.load_CNN_detector()


def load_and_crop_img(path,
                      grayscale=False,
                      color_mode='rgb',
                      target_size=None,
                      interpolation='nearest'):
    """Wraps keras_preprocessing.image.utils.loag_img() and adds cropping.
    Cropping method enumarated in interpolation

    # Arguments
        path: Path to image file.
        color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
            The desired image format.
        target_size: Either `None` (default to original size)
            or tuple of ints `(img_height, img_width)`.
        interpolation: Interpolation and crop methods used to resample and crop the image
            if the target size is different from that of the loaded image.
            Methods are delimited by ":" where first part is interpolation and second is crop
            e.g. "lanczos:random".
            Supported interpolation methods are "nearest", "bilinear", "bicubic", "lanczos",
            "box", "hamming" By default, "nearest" is used.
            Supported crop methods are "none", "center", "random".
Example #18
0
import cv2
import detection
from time import sleep

if __name__ == '__main__':
    detecter1 = detection.Detection()

    cap = cv2.VideoCapture(0)
    print('撮影を開始します。')

    #検出タスク
    count = 1
    while True:
        sleep(0.001)
        ret, flame = cap.read()
        cv2.imshow('scan_Running',flame)

        #成功
        if ret:
            detected_image = detecter1.object_detection(flame)

            #検出完了したら出力へ
            if detected_image is not None:
                print('撮影{}回目'.format(count))
                cv2.imwrite('./image3/fanta_litchi/fanta_litchi__' + str(count) + '.jpg', detected_image)
                count += 1

                if count == 100:
                    break
logger.addHandler(file_handler)

# Register the signal handlers
signal.signal(signal.SIGTERM, service_shutdown)
signal.signal(signal.SIGINT, service_shutdown)

logger.info("Program started")
logger.debug('OpenCV version: {} '.format(cv2.__version__))

stop_event = threading.Event()
orig_img_q = queue.Queue(maxsize=1)

capturing_thread = None

try:
    detection_routine = detection.Detection(stop_event, orig_img_q, config)  # Not a thread!
    capturing_thread = capturing.Camera(orig_img_q, stop_event, config)

    capturing_thread.start()
    detection_routine.run()

except ServiceExit:
    # Terminate the running threads.
    # Set the shutdown flag on each thread to trigger a clean shutdown of each thread.
    stop_event.set()

except Exception as crash_err:
    crash_msg = '\n{0}\nAPP CRASH. Error msg:\n{1}\n{0}'.format(100 * '-', crash_err)
    logger.exception(crash_msg)
    stop_event.set()
    sys.stderr.write(crash_msg)
Example #20
0
    print(
        'Inicio de la obtención de la matriz de los  K ={k} vecinos más cercanos'
        .format(k=k))
    ti = time()
    search_knn = ss.Searcher(television_path=path_to_save_tv,
                             comercial_path=path_to_save_comerciales)
    search_knn.knn(distance_measure='l2', k=k)
    path_to_save_knn = television_path.split('.')[0] + '_knn.npy'
    search_knn.save(path_to_save_knn)
    comerciales_names = search_knn.comerciales_names
    comerciales_len = search_knn.comerciales_len
    television_name = television_path.split('.')[0]
    knn_matrix = np.load(path_to_save_knn)
    tf = time()
    T = tf - ti
    print(
        'Fin de la obtención de la matriz de los  K ={k} vecinos más cercanos tras {T} segundos'
        .format(k=k, T=T))

    #Detectar apariciones de comerciales y registrarlas en un .txt
    print('Inicio de la detección de apariciones')
    ti = time()
    detections = dt.Detection(knn_matrix, comerciales_names, comerciales_len,
                              television_name)
    detections.get_detections()
    detections.save_detections('detecciones.txt')
    tf = time()
    T = tf - ti
    print('Fin de la detección de apariciones tras {T} segundos'.format(T=T))
    print('FIN DEL PROGRAMA!')
Example #21
0
 def start_detection_thread(self):
     self.detection_running = True
     self.detection_thread = detection.Detection(name="Detection",
                                                 shared_variables=self)
     self.detection_thread.start()
Example #22
0
    start = time.time()
    finish = time.time()

    def streams():
        global count, finish
        while finish - start < 60:
            with pool_lock:
                if len(pool) == 0:
                    continue
                streamer = pool.pop()
            yield streamer.stream
            streamer.event.set()
            count += 1
            finish = time.time()

    detection = detection.Detection()
    with picamera.PiCamera() as camera:
        pool = [ImageStreamer(detection) for i in range(2)]
        camera.resolution = (640, 480)
        time.sleep(2)
        camera.capture_sequence(streams(), 'jpeg', use_video_port=True)
    # Shut down the streamers in an orderly fashion
    while pool:
        with pool_lock:
            streamer = pool.pop()
        streamer.terminated = True
        streamer.join()

    # Write the terminating 0-length: to the connection to let the server
    # know we're done
    with connection_lock: