コード例 #1
0
ファイル: decisor.py プロジェクト: camilo912/smart_car
    def __init__(self):
        self.shape = (224, 320, 3)

        # model sess for being loaded just one time
        model_name = "models/model_2.ckpt"
        self.sess = tf.Session()
        tf.saved_model.loader.load(self.sess, ['train'], model_name)

        detector_stop = dlib.fhog_object_detector('data/detector_stop.svm')
        detector_sem = dlib.fhog_object_detector('data/detector_sem.svm')
        self.detectors = [detector_stop, detector_sem]
        self.classes = ['stop', 'sem']
コード例 #2
0
def GetMultiBuildmodel():
    model = []
    model.append(dlib.fhog_object_detector('ML_model/CE.svm'))
    model.append(dlib.fhog_object_detector('ML_model/WEEE.svm'))
    model.append(dlib.fhog_object_detector('ML_model/ROHS.svm'))
    model.append(dlib.fhog_object_detector('ML_model/ULCSA.svm'))
    model.append(dlib.fhog_object_detector('ML_model/Fortinet.svm'))
    model.append(dlib.fhog_object_detector('ML_model/ICES_A.svm'))
    # model.append(dlib.fhog_object_detector('ML_model/ICES_B.svm'))
    model.append(dlib.fhog_object_detector('ML_model/RCM.svm'))
    model.append(dlib.fhog_object_detector('ML_model/Address.svm'))

    modelname = []
    modelname.append(os.path.splitext(os.path.basename('ML_model/CE.svm'))[0])
    modelname.append(
        os.path.splitext(os.path.basename('ML_model/WEEE.svm'))[0])
    modelname.append(
        os.path.splitext(os.path.basename('ML_model/ROHS.svm'))[0])
    modelname.append(
        os.path.splitext(os.path.basename('ML_model/ULCSA.svm'))[0])
    modelname.append(
        os.path.splitext(os.path.basename('ML_model/Fortinet.svm'))[0])
    modelname.append(
        os.path.splitext(os.path.basename('ML_model/ICES_A.svm'))[0])
    # modelname.append( os.path.splitext(os.path.basename('ML_model/ICES_B.svm'))[0] )
    modelname.append(os.path.splitext(os.path.basename('ML_model/RCM.svm'))[0])
    modelname.append(
        os.path.splitext(os.path.basename('ML_model/Address.svm'))[0])

    global COLORS
    COLORS = np.random.uniform(0, 255, size=(len(model), 3))

    return model, modelname
コード例 #3
0
def trainObjects(images_folder):
    folders = os.listdir(images_folder)
    folders.remove('extras')
    detectors = []
    for folder in folders:
        xml = os.path.join(images_folder, "{0}\{0}.xml".format(folder))
        #print("{0}.svm".format(folder))
        dlib.train_simple_object_detector(xml, "{0}.svm".format(folder),
                                          options)
        # save all detectors as list
        detectors.append("{0}.svm".format(folder))

    # Next, suppose you have trained multiple detectors and you want to run them
    # efficiently as a group. You can do this as follows
    detectorsModels = list()
    for detector in detectors:
        detectorsModels.append(dlib.fhog_object_detector(detector))

    # testing multiple detectors with image
    image = dlib.load_rgb_image(
        images_folder +
        '/head n shoulder/head-and-shoulder-best-oily-hair-shampoo.jpg')
    [boxes, confidences, detector_idxs
     ] = dlib.fhog_object_detector.run_multiple(detectorsModels,
                                                image,
                                                upsample_num_times=1,
                                                adjust_threshold=0.5)
    for i in range(len(boxes)):
        print("detector {} found box {} with confidence {}.".format(
            detector_idxs[i], boxes[i], confidences[i]))
    return detectorsModels, detectors
 def detect_plate(self, cropped_image):
     
     detector = dlib.fhog_object_detector("plate_detector_100.svm")
     
     gray = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2GRAY)
     
     cars = detector(gray)
     
     a = 1
     b = 1
     h = 1
     w = 1
     
     for i in cars:
         cv2.rectangle(cropped_image, (i.left(), i.top()), (i.right(), i.bottom()), (0, 0, 255), 2)
         a = i.left()
         b = i.top()
         h = i.bottom() - i.top()
         w = i.right() - i.left()
     
     cropped_plate = cropped_image[b:b+h, a:a+w]
     
     if len(cropped_plate) <= 1:
         if self.no_plate_counter == 1:
             return self.same_frame, self.no_color
         else:
             self.no_plate_counter = 1
             return self.no_plate, self.get_car_color(cropped_image)
     else:
         self.no_plate_counter = 0
         return self.main(cropped_plate, cropped_image)
 def detect_car(self, original_image):
 
     detector = dlib.fhog_object_detector("car_detector_100.svm")
     
     gray = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
     
     cars = detector(gray)
     
     a = 1
     b = 1
     h = 1
     w = 1
     
     for i in cars:
         cv2.rectangle(original_image, (i.left(), i.top()), (i.right(), i.bottom()), (0, 0, 255), 2)
         a = i.left()
         b = i.top()
         h = i.bottom() - i.top()
         w = i.right() - i.left()
     
     cropped = original_image[b:b+h, a:a+w]
     
     if len(cropped) <= 1:
         return self.no_car, self.no_color
     else:
         return self.detect_plate(cropped)
コード例 #6
0
def main(args):
   '''Initializes and cleanup ros node'''
   try:
      rospy.init_node('image_feature', anonymous=True);
   except:
      pass;
    
   ic = image_feature();

   count = 0;
   # create detector object
   #
   detector = dlib.fhog_object_detector("/home/roboboat/dlib/tools/imglab/detector2.svm");

   # Unpersists graph from file
   #
   with tf.gfile.FastGFile("/home/roboboat/tf_files_01/retrained_graph.pb", 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      _ = tf.import_graph_def(graph_def, name='')

   try:
      while True:
          ic.image_process(detector, count)
          count = count + 1;

   except KeyboardInterrupt:
      print "shutting down ROS Image buoy detector module"

   cv2.destroyAllWindows()
コード例 #7
0
 def __init__(self, xsize, ysize, path_or_io):
     """
     Image is numpy array
     """
     assert path_or_io, "Please pass an image path"
     self.image = io.imread(path_or_io)
     self.detector = dlib.fhog_object_detector()
     self.objects = self.detector.run(self.image)
コード例 #8
0
    def __init__(self, sensor_name):
        self.sensor_name = sensor_name

        self.cfg_server = Server(FaceDetectionConfig, self.cfg_callback)
        self.cv_bridge = cv_bridge.CvBridge()
        self.detector = dlib.fhog_object_detector(self.face_detector_path)
        self.pool = multiprocessing.Pool(3)

        # get transformation between world, color, and depth images
        now = rospy.Time(0)
        tf_listener = tf.TransformListener()
        print self.sensor_name
        self.ir2rgb = recutils.lookupTransform(
            tf_listener, self.sensor_name + '_ir_optical_frame',
            self.sensor_name + '_rgb_optical_frame', 10.0, now)
        # self.ir2rgb = numpy.eye(4, 4).astype(numpy.float64)
        print '--- ir2rgb ---\n', self.ir2rgb
        self.world2rgb = recutils.lookupTransform(
            tf_listener, '/world', self.sensor_name + '_rgb_optical_frame',
            10.0, now)
        print '--- world2rgb ---\n', self.world2rgb

        self.pub = rospy.Publisher('/face_detector/detections',
                                   DetectionArray,
                                   queue_size=10)
        self.pub_local = rospy.Publisher(self.sensor_name +
                                         '/face_detector/detections',
                                         DetectionArray,
                                         queue_size=10)

        try:
            print 'tryingnsecs_round to listen raw rgb image topic...'
            rospy.client.wait_for_message(self.sensor_name + '/hd/image_color',
                                          Image, 1.0)
            img_subscriber = message_filters.Subscriber(
                self.sensor_name + '/hd/image_color', Image)
        except rospy.ROSException:
            print 'failed, listen compressed rgb image topic'
            img_subscriber = message_filters.Subscriber(
                self.sensor_name + '/hd/image_color/compressed',
                CompressedImage)

        self.subscribers = [
            img_subscriber,
            message_filters.Subscriber(self.sensor_name + '/hd/camera_info',
                                       CameraInfo),
            message_filters.Subscriber('/detector/detections', DetectionArray)
        ]

        # TypeSynchronizer doesn't work, the image time and the detection time are slightly different?
        # self.ts = message_filters.TimeSynchronizer(self.subscribers, 5)
        # self.ts = message_filters.ApproximateTimeSynchronizer(self.subscribers, 5, 0.0001)
        self.ts = recutils.TimeSynchronizer(self.subscribers, 60, 1000)
        self.ts.registerCallback(self.callback)

        self.reset_time_sub = rospy.Subscriber('/reset_time', Empty,
                                               self.reset_time)
        print("init complete")
コード例 #9
0
ファイル: DJEleven.py プロジェクト: stong1108/DJ-Eleven
 def __init__(self):
     self.ratio_threshold = 0.25
     self.min_frames_below_threshold = 20
     self.frame_counter = 0
     self.face_detector = dlib.get_frontal_face_detector()
     self.face_predictor = dlib.shape_predictor('models/shape_predictor_68_face_landmarks.dat')
     self.hand_detector = dlib.fhog_object_detector('models/HandDetector.svm')
     self.blink_counter = 0
     self.hand_counter = 0
     self.min_hand_frames = 5
     self.pause_flag = False
     self.mp = MusicPlayer()
コード例 #10
0
    def __init__(self, makeup_artist):
        self.to_process = []
        self.to_output = []
        self.makeup_artist = makeup_artist
        self.face_detector = dlib.get_frontal_face_detector()
        self.landmark_detector = dlib.shape_predictor(
            "./resources/shape_68_dots.dat")
        self.handetector = dlib.fhog_object_detector(
            './resources/HandDetector.svm')
        self.pts = deque(maxlen=4)

        thread = threading.Thread(target=self.keep_processing, args=())
        thread.daemon = True
        thread.start()
コード例 #11
0
 def __init__(self, model):
     if isinstance(model, STRING_TYPES) or isinstance(model, Path):
         m_path = Path(model)
         if not Path(m_path).exists():
             raise ValueError('Model {} does not exist.'.format(m_path))
         # There are two different kinds of object detector, the
         # simple_object_detector and the fhog_object_detector, but we
         # can't tell which is which from the file name. Therefore, try one
         # and then the other. Unfortunately, it throws a runtime error,
         # which we have to catch.
         try:
             model = dlib.simple_object_detector(str(m_path))
         except RuntimeError:
             model = dlib.fhog_object_detector(str(m_path))
     self._dlib_model = model
コード例 #12
0
ファイル: project.py プロジェクト: andreaberti/BrowserHandler
def runProject():
    fist_cascade = cv2.CascadeClassifier()
    fist_cascade.load('./fist.xml')
    # <-- hand detector with dlib -->
    detector = dlib.fhog_object_detector("./HandDetector.svm")
    speech_recognizer = speechInit()
    try:
        win = dlib.image_window()
        driver = None
        cap = cv2.VideoCapture(0)
        fistCount = 0
        while True:
            win.clear_overlay()
            ret, image = cap.read()

            #image = imutils.resize(image, width=800)
            win.set_image(image)

            if isFist(image, fist_cascade):
                print("«-- Fist Found --»")
                if fistCount < 5:
                    fistCount += 1
                elif fistCount == 5 and driver == None:
                    fistCount = 0
                    print("«-- Can't close something that is not open --»")
                elif fistCount == 5 and not (driver == None):
                    fistCount = 0
                    driver = fistDecisions(speech_recognizer, driver)
                continue
            else:
                fistCount = 0

            rects = detector(image)
            win.add_overlay(rects)

            if len(rects) == 0:
                count = 0
                print("NOT detected")
                time.sleep(0.4)
            else:
                count += 1
                print("detected")
                if count == 8:
                    driver = searchMySpeech(driver, speech_recognizer)
                    count = 0
    except KeyboardInterrupt:
        cap.release()
        driver.quit() if not (driver == None) else None
コード例 #13
0
def Test_model(listNameModel, scaleim, cam):
    detectors = []
    cap = cv2.VideoCapture(cam)
    for model in listNameModel:
        detectors.append(dlib.fhog_object_detector(model))
    while (1):
        start = time.time()
        ok, img = cap.read()
        image = pyramid(img, scaleim)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        [boxes, confidences, detector_idxs
         ] = dlib.fhog_object_detector.run_multiple(detectors,
                                                    image,
                                                    upsample_num_times=0,
                                                    adjust_threshold=0.0)
        for i in range(len(boxes)):
            if (confidences[i] > 1):
                confidences[i] = 1
            elif (confidences[i] < 0.1):
                continue
            face = boxes[i]
            x = int(face.left() * scaleim)
            y = int(face.top() * scaleim)
            w = int((face.right() - face.left()) * scaleim)
            h = int((face.bottom() - face.top()) * scaleim)
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
            cv2.putText(img,
                        str(detector_idxs[i]) + '  ' +
                        str(np.round(confidences[i] * 100, 2)) + '%', (x, y),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 250, 0),
                        1,
                        lineType=cv2.LINE_AA)
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break
        end = time.time()
        cv2.putText(img,
                    'FPS:' + str(np.round(1 / (end - start))), (20, 20),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.5, (100, 50, 150),
                    1,
                    lineType=cv2.LINE_AA)
        cv2.imshow("image", img)
    cap.release()
    cv2.destroyAllWindows()
コード例 #14
0
def detect_car(frame):
    #load car detector
    detector = dlib.fhog_object_detector("../DATA/SVM/car_detector.svm")
    #win = dlib.image_window()
    try:
        dets = detector(frame)
        for d in dets:
            frame = frame[int(d.top()):int(d.bottom() + 20),
                          int(d.left()):int(d.right() + 20)]
            # Display the resulting frame
            cv2.imshow("frame", frame)
        return True, frame, (int(d.left()), int(d.top()),
                             int(d.right() + 20 - d.left()),
                             int(d.bottom() + 20 - d.top()))

    except:
        #print "HOG detector failed"
        return False, None, None
コード例 #15
0
ファイル: camera.py プロジェクト: AMIRANDA9112/Inmersion
    def __init__(self, makeup_artist):
        self.to_process = deque(maxlen=2)
        self.to_output = deque(maxlen=2)
        self.makeup_artist = makeup_artist
        self.face_detector = dlib.get_frontal_face_detector()
        self.landmark_detector = dlib.shape_predictor(
            "../resources/shape_68_dots.dat")
        self.handetector = dlib.fhog_object_detector(
            '../resources/HandDetector.svm')
        self.count = 0
        self.max = 0
        self.charge = False
        self.user = "******"
        self.background = cv2.imread("../media/home1.png")
        self.r1 = 0
        self.r2 = 0

        thread = threading.Thread(target=self.keep_processing, args=())
        thread.daemon = True
        thread.start()
コード例 #16
0
 def __init__(self,
              ert_model_path="",
              auxiliary_model_path="",
              face_detection_model_path="",
              facial_landmark_localiser=None,
              auxiliary_utility=None,
              face_detector=None):
     if len(ert_model_path) > 0:
         facial_landmark_localiser = FacialLandmarkLocaliser(ert_model_path)
     if len(auxiliary_model_path) > 0:
         auxiliary_utility = AuxiliaryUtility(auxiliary_model_path)
     super(ZenoFaceTracker, self).__init__(facial_landmark_localiser,
                                           auxiliary_utility)
     if len(face_detection_model_path) > 0:
         self._face_detector = dlib.fhog_object_detector(
             face_detection_model_path)
     elif face_detector is not None:
         self._face_detector = face_detector
     else:
         self._face_detector = dlib.get_frontal_face_detector()
     self._face_detection_gap = 0xFFFFFFFF
コード例 #17
0
    def __init__(self, node_name): 
        super(GoodFeatures, self).__init__(node_name)
          
        # Do we show text on the display?
        self.show_text = rospy.get_param("~show_text", True)
        
        # How big should the feature points be (in pixels)?
        self.feature_size = rospy.get_param("~feature_size", 1)
        
        # Good features parameters
        self.gf_maxCorners = rospy.get_param("~gf_maxCorners", 200)
        self.gf_qualityLevel = rospy.get_param("~gf_qualityLevel", 0.02)
        self.gf_minDistance = rospy.get_param("~gf_minDistance", 7)
        self.gf_blockSize = rospy.get_param("~gf_blockSize", 10)
        self.gf_useHarrisDetector = rospy.get_param("~gf_useHarrisDetector", True)
        self.gf_k = rospy.get_param("~gf_k", 0.04)
        
        # Store all parameters together for passing to the detector
        self.gf_params = dict(maxCorners = self.gf_maxCorners, 
                       qualityLevel = self.gf_qualityLevel,
                       minDistance = self.gf_minDistance,
                       blockSize = self.gf_blockSize,
                       useHarrisDetector = self.gf_useHarrisDetector,
                       k = self.gf_k)

        # Initialize key variables
        self.keypoints = list()
        self.detect_box = None
        self.mask = None
        self.boolFlag = Bool()
        #self.detector1 = dlib.fhog_object_detector("Pedestriandetector.svm")
        #self.detector2 = dlib.fhog_object_detector("Pedestriandetector2.svm")
        self.detector3 = dlib.fhog_object_detector("Pedestriandetector33.svm")
        self.detectors = [self.detector3]
        pygame.init()
        self.s = pygame.mixer.Sound('Dong.WAV')
コード例 #18
0
DETECTOR = 'data/cat_face_detector.svm'
# Pre-trained shape predictor from iBUG 300-W dataset for human facial landmarks
SHAPE_PREDICTOR = 'data/cat_landmark_predictor.dat'

# finds landmarks in the form (from viewer perspective):
# index - (x,y)
MOUTH_INDEX = 0
LEFT_EYE_INDEX = 1
LEFT_EAR_LEFT_INDEX = 2
RIGHT_EAR_LEFT_INDEX = 3
NOSE_INDEX = 4
RIGHT_EYE_INDEX = 5
LEFT_EAR_RIGHT_INDEX = 6
RIGHT_EAR_RIGHT_INDEX = 7

detector = dlib.fhog_object_detector(DETECTOR)
haar_detector = dlib.fhog_object_detector(DETECTOR)
landmarks_predictor = dlib.shape_predictor(SHAPE_PREDICTOR)


# convenience function from imutils
def dlib_to_cv_bounding_box(box):
    # convert dlib bounding box for OpenCV display
    x = box.left()
    y = box.top()
    w = box.right() - x
    h = box.bottom() - y

    return x, y, w, h

コード例 #19
0
    print("Processing file: {}".format(f))
    img = io.imread(f)
    dets = detector(img)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))

    win.clear_overlay()
    win.set_image(img)
    win.add_overlay(dets)
    dlib.hit_enter_to_continue()

# Next, suppose you have trained multiple detectors and you want to run them
# efficiently as a group.  You can do this as follows:
detector1 = dlib.fhog_object_detector("dog_detector.svm")
# In this example we load detector.svm again since it's the only one we have on
# hand. But in general it would be a different detector.
detector2 = dlib.fhog_object_detector("dog_detector.svm") 
# make a list of all the detectors you wan to run.  Here we have 2, but you
# could have any number.
detectors = [detector1, detector2]
image = io.imread(faces_folder + '/2008_002506.jpg')
[boxes, confidences, detector_idxs] = dlib.fhog_object_detector.run_multiple(detectors, image, upsample_num_times=1, adjust_threshold=0.0)
for i in range(len(boxes)):
    print("detector {} found box {} with confidence {}.".format(detector_idxs[i], boxes[i], confidences[i]))

# Finally, note that you don't have to use the XML based input to
# train_simple_object_detector().  If you have already loaded your training
# images and bounding boxes for the objects then you can call it as shown
# below.
コード例 #20
0
    print("Processing file: {}".format(f))
    img = dlib.load_rgb_image(f)
    dets = detector(img)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))

    win.clear_overlay()
    win.set_image(img)
    win.add_overlay(dets)
    dlib.hit_enter_to_continue()

# Next, suppose you have trained multiple detectors and you want to run them
# efficiently as a group.  You can do this as follows:
detector1 = dlib.fhog_object_detector("../biblioteca/detector_focinhos.svm")
# In this example we load detector.svm again since it's the only one we have on
# hand. But in general it would be a different detector.
detector2 = dlib.fhog_object_detector("../biblioteca/detector_focinhos.svm")
# make a list of all the detectors you want to run.  Here we have 2, but you
# could have any number.
detectors = [detector1, detector2]
image = dlib.load_rgb_image(faces_folder + '/20201104_131526 (2).jpg')
[boxes, confidences,
 detector_idxs] = dlib.fhog_object_detector.run_multiple(detectors,
                                                         image,
                                                         upsample_num_times=1,
                                                         adjust_threshold=0.0)
for i in range(len(boxes)):
    print("detector {} found box {} with confidence {}.".format(
        detector_idxs[i], boxes[i], confidences[i]))
コード例 #21
0
from keras.models import load_model
from keras.preprocessing.image import img_to_array
import glob
import os
from matplotlib import pyplot as plt
from difflib import SequenceMatcher 

DIR = 'D:\\UFPR-ALPR dataset\\testing_seg\\'

digit_model_path = 'classification\\digit_otsu.26-0.08.hdf5'
letter_model_path = 'classification\\letter_otsu.49-0.83.hdf5'

W, H = 300, 100

print("loading detector model...")
detector = dlib.fhog_object_detector("plate_detector.svm")
print("done!")

print("loading digit model...")
digit_model = load_model(digit_model_path)
print("done!")


print("loading letter model...")
letter_model = load_model(letter_model_path)
print("done!")

all_correct = 0
six_correct = 0
five_correct = 0
total = len(glob.glob(os.path.join(DIR,'**','*.png'), recursive=True))
コード例 #22
0
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('ip', help='ip for raspberry pi car')
    args = parser.parse_args()
    ip = args.ip  # 10.42.0.235
    port = '8000'
    base_url = 'http://' + ip + ':' + port + '/'

    actions = [
        'stop', 'forward_left', 'forward', 'forward_right', 'backward_left',
        'backward', 'backward_right', 'fwstraight'
    ]

    #if(not connection_ok(base_url)):
    #    raise Exception('Connection failed')

    detector1 = dlib.fhog_object_detector("./../detector_stop.svm")
    detector2 = dlib.fhog_object_detector("./../detector_sem.svm")
    detector3 = dlib.fhog_object_detector("./../detector_der.svm")
    #detector4 = dlib.fhog_object_detector("./../detector_izq.svm")
    detectors = [detector1, detector2]
    detectors2 = [detector3]
    #cap = cv2.VideoCapture(base_url + '?action=stream')
    #cap = cv2.VideoCapture('http://10.42.0.235:8080/?action=stream')
    #cap = cv2.VideoCapture(0)
    cap = cv2.VideoCapture('./../linesData2.mp4')
    ret, frame = cap.read()

    if (not ret):
        raise Exception('no image read')

    model = load_model("./lineModels/anonymous_modelv7.h5")
コード例 #23
0
#匯入必要的library
from __future__ import print_function
from imutils import paths
from scipy.io import loadmat
from skimage import io
import argparse
import dlib
import cv2
import re
import os


cup = dlib.fhog_object_detector("output/cup")
forestPark = dlib.fhog_object_detector("output/forestPark")
waterNang = dlib.fhog_object_detector("output/waterNang")
yehliu = dlib.fhog_object_detector("output/yehliu")
turtleIsland = dlib.fhog_object_detector("output/turtleIsland")
detectors = [cup, forestPark, waterNang, yehliu, turtleIsland]
detectorList = ["cup", "forestPark", "waterNang", "yehliu", "turtleIsland"]


#define user
num= 0
num = num + 1
user1 = 1
usr = "******".format(num)
print(usr)

for imgPath in paths.list_images("spot/test1"):

    #load image
コード例 #24
0
import os
import sys
import glob
import matplotlib.pyplot as plt
import cv2
import dlib

dataset_folder_path = "./dataset/2019-06-26/lowsize/"
svn_path = os.path.join(dataset_folder_path, "detector.svm")
landmarks_path = os.path.join(dataset_folder_path, "landmarks.dat")

detector = dlib.fhog_object_detector(svn_path)

landmarks_detector = dlib.shape_predictor(landmarks_path)

image_name = "robot_1.png"
image_path = os.path.join(dataset_folder_path, image_name)
image = dlib.load_rgb_image(image_path)

[boxes, confidences, detector_idxs] = dlib.fhog_object_detector.run(detector, 
                                                                    image, 
                                                                    upsample_num_times=1, 
                                                                    adjust_threshold=0.0)
for i in range(len(boxes)):
    print("detector {} found box {} with confidence {}.".format(detector_idxs[i], 
                                                                boxes[i], 
                                                                confidences[i]))

def printLandmark(image, landmarks, color):    
    for p in landmarks.parts():
        cv2.circle(image, (p.x, p.y), 20, color, 2)
コード例 #25
0
 def __init__(self, input_image):
     self.input_image = input_image
     self.image_data = io.imread(input_image)
     self.detector = dlib.fhog_object_detector(DETECTOR_SVM)
     self.predictor = dlib.shape_predictor(PREDICTOR_DAT)
     self.result = DetectorResult()
コード例 #26
0
    print("Processing file: {}".format(f))
    img = dlib.load_rgb_image(f)
    dets = detector(img)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))

    win.clear_overlay()
    win.set_image(img)
    win.add_overlay(dets)
    dlib.hit_enter_to_continue()

# Next, suppose you have trained multiple detectors and you want to run them
# efficiently as a group.  You can do this as follows:
detector1 = dlib.fhog_object_detector("detector.svm")
# In this example we load detector.svm again since it's the only one we have on
# hand. But in general it would be a different detector.
detector2 = dlib.fhog_object_detector("detector.svm") 
# make a list of all the detectors you wan to run.  Here we have 2, but you
# could have any number.
detectors = [detector1, detector2]
image = dlib.load_rgb_image(faces_folder + '/2008_002506.jpg')
[boxes, confidences, detector_idxs] = dlib.fhog_object_detector.run_multiple(detectors, image, upsample_num_times=1, adjust_threshold=0.0)
for i in range(len(boxes)):
    print("detector {} found box {} with confidence {}.".format(detector_idxs[i], boxes[i], confidences[i]))

# Finally, note that you don't have to use the XML based input to
# train_simple_object_detector().  If you have already loaded your training
# images and bounding boxes for the objects then you can call it as shown
# below.
コード例 #27
0
    print("Processing file: {}".format(f))
    img = io.imread(f)
    dets = detector(img)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))

    win.clear_overlay()
    win.set_image(img)
    win.add_overlay(dets)
    dlib.hit_enter_to_continue()

# Next, suppose you have trained multiple detectors and you want to run them
# efficiently as a group.  You can do this as follows:
detector1 = dlib.fhog_object_detector("detector_0.svm")
# In this example we load detector.svm again since it's the only one we have on
# hand. But in general it would be a different detector.
detector2 = dlib.fhog_object_detector("detector_0.svm")
# make a list of all the detectors you wan to run.  Here we have 2, but you
# could have any number.
detectors = [detector1, detector2]
image = io.imread(faces_folder + '/2008_002506.jpg')
[boxes, confidences,
 detector_idxs] = dlib.fhog_object_detector.run_multiple(detectors,
                                                         image,
                                                         upsample_num_times=1,
                                                         adjust_threshold=0.0)
for i in range(len(boxes)):
    print("detector {} found box {} with confidence {}.".format(
        detector_idxs[i], boxes[i], confidences[i]))
コード例 #28
0
import numpy as np

test_folder = "img/"

df = pd.DataFrame(
    100 * np.ones((6, 6)),
    columns=["name", "x", "y", "w", "h", "confidences"],
)

dict = {0.0: "sz", 1.0: "cz", 2.0: "dp", 3.0: "pd", 4.0: "zb", 5.0: "mf"}

# 定义编解码器并创建 VideoWriter 对象
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter("predict.avi", fourcc, 20.0, (1280, 480))

detector1 = dlib.fhog_object_detector("sz.svm")
detector2 = dlib.fhog_object_detector("cz.svm")
detector3 = dlib.fhog_object_detector("dp.svm")
detector4 = dlib.fhog_object_detector("pb.svm")
detector5 = dlib.fhog_object_detector("zb.svm")
detector6 = dlib.fhog_object_detector("mf.svm")

detectors = [detector1, detector2, detector3, detector4, detector5, detector6]


def draw_circle(frame, df):
    global dict
    for i in range(6):
        if df.iloc[i, 0] != 100:
            cv2.putText(
                frame,
コード例 #29
0
import imutils
import dlib
import cv2

# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.fhog_object_detector(
    "/home/vsmart/Works/trafcamanprengine/data/model/car_175x75.svm")

# Video capture source
cap = cv2.VideoCapture(
    "/home/vsmart/Works/Dlib_SVM/testmodle/Oto_in_020719_13h.mp4")

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
win_det = dlib.image_window()
win_det.set_image(detector)

win = dlib.image_window()
writer = None

while True:

    ret, image = cap.read()
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    #image = imutils.resize(image, width=800)

    rects = detector(image)

    for k, d in enumerate(rects):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))
コード例 #30
0
from openalpr import Alpr
import sys
import cv2
import argparse
import imutils
from camera import VideoCamera
from hog_detector import detect_car
import dlib

from carDetector import *
car_detect = carDetector()

#load car detector
detector = dlib.fhog_object_detector("../DATA/SVM/car_detector.svm")
win = dlib.image_window()

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64, help="max buffer size")
ap.add_argument("-m", "--model", help="path to trained model")
args = vars(ap.parse_args())

if not args.get("video", False):
    video_camera = VideoCamera(0)
    # otherwise, grab a reference to the video file
else:
    video_camera = VideoCamera(args["video"])


def read_number_plate(im):
コード例 #31
0
import os,sys,glob,dlib,cv2
from skimage import io
cord_pts=[]
detector = dlib.fhog_object_detector("anpr_detector.svm")
img = io.imread(sys.argv[1])
dets = detector(img)
for k, d in enumerate(dets):
    cord_pts.append(d.left())
    cord_pts.append(d.top())
    cord_pts.append(d.right())
    cord_pts.append(d.bottom())
print cord_pts
anpr_part = img[cord_pts[1]:cord_pts[3],max(0,cord_pts[0]):min(cord_pts[2],cord_pts[2])]
cv2.imwrite("anpr3.jpg",anpr_part)
コード例 #32
0
ファイル: fhog_detector.py プロジェクト: markpp/MTB
 def load_dlib_detector(self):
     print("Loading detector...")
     self.detectors = [dlib.fhog_object_detector("../data/models/detector_mtb.svm"), dlib.fhog_object_detector("../data/models/detector_ped.svm")]
コード例 #33
0
 def add_detector(self, det):
     self._detectors.append(dlib.fhog_object_detector(det))