コード例 #1
0
    def use_object_detector(self):

        if (HTTPRequestHandler.detector == None):
            if (os.path.isfile("detector.svm")):
                print("initializing HOG detector")
                HTTPRequestHandler.detector = dlib.simple_object_detector(
                    "detector.svm")
                HTTPRequestHandler.detector0 = dlib.simple_object_detector(
                    "detector_0.svm")
                HTTPRequestHandler.detector1 = dlib.simple_object_detector(
                    "detector_1.svm")
                HTTPRequestHandler.detector2 = dlib.simple_object_detector(
                    "detector_2.svm")
            else:
                print("No detector available")
                return

        self.wfile.write(b"[")
        firstFile = True
        for filename in glob.glob(os.path.join('tmp', "*.png")):
            print("Processing file: {}".format(filename))
            if (not firstFile):
                self.wfile.write(b",")
            self.wfile.write(
                ("{\"image\":\"%s\",\"detections\":[" % filename).encode())

            image = dlib.load_rgb_image(filename)
            #            dets = HTTPRequestHandler.detector(image)
            detectors = [
                HTTPRequestHandler.detector0, HTTPRequestHandler.detector1,
                HTTPRequestHandler.detector2
            ]
            [boxes, confidences, detector_idxs
             ] = dlib.fhog_object_detector.run_multiple(detectors,
                                                        image,
                                                        upsample_num_times=1,
                                                        adjust_threshold=0.0)
            print("boxes: {}".format(boxes))
            print("confidences: {}".format(confidences))
            print("detector_idxs: {}".format(detector_idxs))
            firstDetection = True
            for i in range(0, len(boxes)):
                print("detected object: " + str(boxes[i]))
                if (not firstDetection):
                    self.wfile.write(b",")
                self.wfile.write((
                    "{\"top\":%i,\"bottom\":%i,\"left\":%i,\"right\":%i,\"index\":%i}"
                    % (boxes[i].top(), boxes[i].bottom(), boxes[i].left(),
                       boxes[i].right(), detector_idxs[i])).encode())
                #                 self.draw_rectangle(image, d)
                #                 processed_file = self.path_to_processed_file(filename, "trainedhog")
                #                 dlib.save_image(image, processed_file)
                firstDetection = False
            self.wfile.write(b"]}")
            firstFile = False
        self.wfile.write(b"]")
コード例 #2
0
def gen(anom_type):
	if anom_type=="mobilephones":
		detector=dlib.simple_object_detector("detector.svm")
	elif anom_type=="ciggarette":
		detector=dlib.simple_object_detector("cigg_detector.svm")
	elif anom_type=="id":
		detector=dlib.simple_object_detector("ID_detector.svm")
	
	try:
		host = "10.15.2.7:8080/video"
		hoststr = 'http://' + host

		stream=urllib2.urlopen(hoststr)

		bytes=''

		while True:
			bytes+=stream.read(1024)
			a = bytes.find('\xff\xd8')
			b = bytes.find('\xff\xd9')
			if a!=-1 and b!=-1:
				jpg = bytes[a:b+2]
				bytes= bytes[b+2:]
				streamline = StringIO.StringIO(jpg)
				img = Image.open(streamline)
				


				#basewidth = 300
				#wpercent = (basewidth/float(img.size[0]))
				#hsize = int((float(img.size[1])*float(wpercent)))
				#img = img.resize((basewidth,hsize), PIL.Image.ANTIALIAS)

				frame=np.array(img)		
				
				color = np.array([0, 255, 0], dtype=np.uint8)
				dets = detector(frame)
				for k, d in enumerate(dets):
					print("Mobile Detected")
					boundingbox=(d.left(), d.top()), (d.right(), d.bottom())
					im = Image.fromarray(frame)
					dr = ImageDraw.Draw(im)
					dr.rectangle(((d.left(),d.top()),(d.right(),d.bottom())), outline = "blue")
					frame=np.array(im)
				convjpg = Image.fromarray(frame)
				imgByteArr=io.BytesIO()
				convjpg.save(imgByteArr,format="jpeg")
				imgByteArr=imgByteArr.getvalue()				
				#print("-------------")
				#print(convjpg)
				#print(frame)
				yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + imgByteArr + b'\r\n')
	except Exception as e:
		pass
コード例 #3
0
 def train_object_detector(self):
     self.train_object_detector_for_path("landmarks.xml", "detector.svm")
     self.train_object_detector_for_path("landmarks_0.xml",
                                         "detector_0.svm")
     self.train_object_detector_for_path("landmarks_1.xml",
                                         "detector_1.svm")
     self.train_object_detector_for_path("landmarks_2.xml",
                                         "detector_2.svm")
     self.detector = dlib.simple_object_detector("detector.svm")
     self.detector0 = dlib.simple_object_detector("detector_0.svm")
     self.detector1 = dlib.simple_object_detector("detector_1.svm")
     self.detector2 = dlib.simple_object_detector("detector_2.svm")
コード例 #4
0
ファイル: detector.py プロジェクト: MMichels/RoboJogoVelha
    def __init__(self, fonteImagem: str, svm: str):
        """
        :param fonteImagem: fonte da imagem a ser analizada, pode ser um diretorio de arquivo
        ou a String 'camera:numerodispositivo', por padrao o numero é 0
        :param svm: diretorio do arquivo .svm
        """
        self.fonteImagem = fonteImagem
        if svm is not None:
            self.svm = dlib.simple_object_detector(svm)
        else:
            self.svm = dlib.simple_object_detector(DetectorX.SVM)

        self.objs = []
        self.deteccoes = dict
        self.detectado = False
コード例 #5
0
def process_image():
    if request:
        r = request
        # convert string of image data to uint8
        nparr = np.frombuffer(r.data, np.uint8)
        # decode image
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)


        # do some fancy processing here....
        detector = dlib.simple_object_detector("data/models/cone_hog.svm")
        h, w, n = img.shape
        dets = detector(img)
        direction = 0
        if dets:
                direction = (w / 2) - dets[0].center().x

        cv2.imshow("Foto", img)


        # build a response dict to send back to client
        response = {'message': 'image received. size={}x{}, direction={}'.format(img.shape[1], img.shape[0], direction)
                    }
        # encode response using jsonpickle
        response_pickled = jsonpickle.encode(response)

        return Response(response=response_pickled, status=200, mimetype="application/json")
コード例 #6
0
def predict(image_folder):
    # Doing the detection
    detector = dlib.simple_object_detector("detector.svm")

    # Looking at the HOG filter the machine has learned.
    win_det = dlib.image_window()
    win_det.set_image(detector)

    # running the detector for all the images in the folder
    print("Using ML the images for detection...")
    win = dlib.image_window()
    for f in glob.glob(os.path.join(image_folder, "*.jpg")):
        print("Processing file: {}".format(f))
        img = io.imread(f)

        io.imshow(img)

        dets = detector(img)
        print("Number of detections: {}".format(len(dets)))
        for k, d in enumerate(dets):
            print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                k, d.left(), d.top(), d.right(), d.bottom()))

        win.clear_overlay()
        win.set_image(img)
        win.add_overlay(dets)
        dlib.hit_enter_to_continue()
コード例 #7
0
def detect(testpath="Testing/"):
    test_folder=testpath
    testing_xml_path = os.path.join(test_folder, "testing.xml")
# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
    detector = dlib.simple_object_detector("detector.svm")

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
    win_det = dlib.image_window()
    win_det.set_image(detector)

# Now let's run the detector over the images in the faces folder and display the
# results.
    print("Showing detections on the images in the testing folder...")
    win = dlib.image_window()
    for f in glob.glob(os.path.join(test_folder, "*.jpg")):
        print("Processing file: {}".format(f))
        img = io.imread(f)
        dets = detector(img)
        print(dets)
        print("Number of cards detected: {}".format(len(dets)))
        for k, d in enumerate(dets):
            print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                k, d.left(), d.top(), d.right(), d.bottom()))
        win.clear_overlay()
        win.set_image(img)
        win.add_overlay(dets)
        dlib.hit_enter_to_continue()
コード例 #8
0
    def __init__(self, detector_file, classifer_file, device='cpu'):
        # Object detector
        self.detector = dlib.simple_object_detector(detector_file)

        # Shape Classifier
        self.classifier = ShapeClassifier().to(device)
        self.classifier.load_state_dict(torch.load(classifer_file))
        self.device = device

        self.colors = np.array([[0, 0, 255], [0, 255, 0], [255, 0, 0],
                                [0, 156, 255], [128, 128, 128], [0, 255, 255]])

        self.idx2color = {
            0: 'red',
            1: 'green',
            2: 'blue',
            3: 'orange',
            4: 'gray',
            5: 'yellow'
        }
        self.preproc = tfms.Compose([
            tfms.Grayscale(),
            tfms.Resize((40, 40)),
            tfms.ToTensor(),
            Binarize()
        ])
コード例 #9
0
def facedetector_dlib(img, image_path):
    try:
        # detector = dlib.get_frontal_face_detector()
        detector = dlib.simple_object_detector('./detector.svm')
        # RGB変換 (opencv形式からskimage形式に変換)
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        # frontal_face_detectorクラスは矩形, スコア, サブ検出器の結果を返す
        # dets, scores, idx = detector.run(img_rgb, 0)
        dets = detector.run(img_rgb, 0)
        # 矩形の色
        color = (0, 0, 255)
        s = ''
        if len(dets) > 0:
            # 顔画像ありと判断された場合
            for i, rect in enumerate(dets):
                # detsが矩形, scoreはスコア、idxはサブ検出器の結果(0.0がメインで数が大きい程弱い)
                # print rect, scores[i], idx[i]
                cv2.rectangle(img, (rect.left(), rect.top()),
                              (rect.right(), rect.bottom()),
                              color,
                              thickness=10)
                s += (str(rect.left()) + ' ' + str(rect.top()) + ' ' +
                      str(rect.right()) + ' ' + str(rect.bottom()) + ' ')
            s += image_path
        # 矩形が書き込まれた画像とs = 'x1 y1 x2 y2 x1 y1 x2 y2 file_name'
        # 顔が無ければ s='' が返る
        return img, s
    except:
        # メモリエラーの時など
        return img, ""
コード例 #10
0
    def __init__(self, options=None, loadPath=None):
        self.options = options
        if self.options is None:
            self.options = dlib.simple_object_detector_training_options()

        if loadPath is not None:
            self._detector = dlib.simple_object_detector(loadPath)
コード例 #11
0
def result( request, pk, svm ):
    brand = get_object_or_404( Brands, pk = pk )
    if request.POST.has_key( 'client_response' ):
        data = request.POST[ 'client_response' ]
        response_dict = {}
        response_dict[ 'tags' ] = {}
        print svm
        paths = Paths.objects.filter( brand_id = pk )
        if paths.exists():
            for pt in paths:
                dpath = pt.demo_path
                svmpath = pt.svm_path
        #svms = Svms.objects.filter( brand_id = pk ).latest( 'id' )
        svmname = svm+".svm"
        print svmname
        test_folder = dpath
        outputpath = svmpath +"/"+ svmname
        if not os.path.exists(outputpath):
            return HttpResponse("File has been deleted")
        fileformat = "jpg"
        print outputpath
        detector = dlib.simple_object_detector( str(outputpath) )
        for f in glob.glob( os.path.join( test_folder, data ) ):
            print("Processing file: {}".format(f))
            img = io.imread(f)
            dets = detector(img)
            print("Number of Objects detected: {}".format(len(dets)))
            for k, d in enumerate(dets):
                print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(k, d.left(), d.top(), d.right(), d.bottom()))
                response_dict['tags'][ k ] = { 'name':data,'l': format(d.left()),'t': format(d.top()),'r': format(d.right()),'b': format(d.bottom()),'count': format(len(dets)) }
        return HttpResponse(json.dumps(response_dict))
    else:
        return render(request,template_name, context_instance=RequestContext(request))
コード例 #12
0
def handle_locations(img_fp, detector_fp):
    detector = dlib.simple_object_detector(detector_fp)
    img = io.imread(img_fp)
    img = color.rgb2gray(img)
    img = img_as_ubyte(img)
    dets = detector(img)
    return dets
コード例 #13
0
def cut_face(origin_path, dist_path):
    # 各画像の処理
    img_path_list = glob.glob(origin_path + "/*")
    detector = dlib.simple_object_detector("detector.svm")
    for img_path in img_path_list:
        print(img_path)
        # ファイル名解析
        base_name = os.path.basename(img_path)
        name, ext = os.path.splitext(base_name)
        if (ext != '.jpg') and (ext != '.jpeg') and (ext != '.png'):
            print("not a picture")
            continue

        img_src = cv2.imread(img_path, 1)

        #顔判定
        dets = detector(img_src)

        # 顔があった場合
        if len(dets) > 0:
            i = 0
            for d in dets:
                face = img_src[d.top():d.bottom(), d.left():d.right()]
                file_name = dist_path + name + "_" + str(i) + ext
                cv2.imwrite(file_name, face)
                i += 1
        else:
            print("not find any faces")
        shutil.move(img_path, origin_path + "/../finished/")
コード例 #14
0
def cut_BoardPhoto():
    detector_model = dlib.simple_object_detector('detector.svm')
    board_box = {'left': 0.0, 'top': 0.0, 'right': 0.0, 'bottom': 0.0}
    current_path = os.getcwd()
    test_folder = current_path + '/20200820/'
    for imgName in glob.glob(test_folder + '*.jpg'):
        name = os.path.basename(imgName)
        print("Processing file: {}".format(imgName))
        img = cv2.imread(imgName, cv2.IMREAD_COLOR)
        b, g, r = cv2.split(img)
        img2 = cv2.merge([r, g, b])
        board_rects = detector_model(img2)
        count = 0
        for index, board in enumerate(board_rects):
            count = count + 1
            print('board {}; left {}; top {}; right {}; bottom {}'.format(
                index, board.left(), board.top(), board.right(),
                board.bottom()))
            board_box['left'] = board.left()
            board_box['top'] = board.top()
            board_box['right'] = board.right()
            board_box['bottom'] = board.bottom()
            cv2.rectangle(img, (board_box['left'], board_box['top']),
                          (board_box['right'], board_box['bottom']), 3)
            crop_frame = img[board_box['top']:board_box['bottom'],
                             board_box['left']:board_box['right']]
            print(count)
            # cv2.imshow('name',crop_frame)
            # if cv2.waitKey(1) & 0xFF == ord('q'):
            #     break
            print(
                cv2.imwrite(
                    '/Users/peterxli/Hualai/detect_object/crop_frame/' +
                    str(count) + name, crop_frame))
コード例 #15
0
ファイル: boobs-dlib.py プロジェクト: mazurkin/boobs-detector
def camera_boobs():
    import cv2

    # cam = cv2.VideoCapture(-1)
    cam = cv2.VideoCapture("/home/nick/temp/boobs/b1.flv")

    win = dlib.image_window()
    detector = dlib.simple_object_detector("detector.svm")

    def detect():
        s, img = cam.read()
        if not s:
            return

        img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        dets = detector(img2)
        num = len(dets)

        if num > 0:
            print("found!")

        win.clear_overlay()
        win.set_image(img2)
        win.add_overlay(dets)

    try:
        while True:
            detect()
    except KeyboardInterrupt as e:
        print("exiting")

    cam.release()
    cv2.destroyAllWindows()
コード例 #16
0
def test():
    detector = dlib.simple_object_detector("detector/" + sys.argv[1])
    testList = []
    [
        testList.append(cv2.cvtColor(cv2.imread(item), cv2.COLOR_BGR2RGB))
        for i in [glob.glob('testImg/*.%s' % ext) for ext in ["jpg", "png"]]
        for item in i
    ]
    for image in testList:
        detectedBoxes = detector(image)
        print(detectedBoxes)
        finalBoxes = []
        for detectedBox in detectedBoxes:
            (x, y, xb, yb) = [
                detectedBox.left(),
                detectedBox.top(),
                detectedBox.right(),
                detectedBox.bottom()
            ]
            finalBoxes.append((x, y, xb, yb))
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        for finalBox in finalBoxes:
            (x, y, xb, yb) = finalBox
            cv2.rectangle(image, (x, y), (xb, yb), (0, 0, 255), 2)
        cv2.imshow("Detected", image)
        cv2.waitKey(0)
コード例 #17
0
    def step5(self, btn):
        #Take photo
        self.imgnum += 1
        os.system('fswebcam %s %s/img%s.jpg' %
                  (self.extraparam, self.tmp, self.imgnum))

        detector = dlib.simple_object_detector(
            os.path.join(self.tmp, "detector.svm"))
        win = dlib.image_window()

        start = time.time()
        img = io.imread('%s/img%s.jpg' % (self.tmp, self.imgnum))
        # The 1 in the second argument indicates that we should upsample the image
        # 1 time.  This will make everything bigger and allow us to detect more
        # faces.
        dets = detector(img, 1)
        print("detected: {}".format(len(dets)))
        for i, d in enumerate(dets):
            print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                i, d.left(), d.top(), d.right(), d.bottom()))

        print("took " + str(time.time() - start))
        win.clear_overlay()
        win.set_image(img)
        win.add_overlay(dets)
        sleep(2)
コード例 #18
0
def run_detector(detector):

    folder = "../data/TrainHOG/"
    coins_folder = folder
    detective = str(detector)

    # Now let's use the detector as you would in a normal application.  First we
    # will load it from disk.
    detector = dlib.simple_object_detector(detective)

    # We can look at the HOG filter we learned.  It should look like a coin.  Neat!
    #win_det = dlib.image_window()
    #win_det.set_image(detector)

    # Now let's run the detector over the images in the coins/Test folder and display the
    # results.
    test_fold = os.path.join(coins_folder, "ExData/rep/")
    print("Showing detections on the images in the test folder...")

    for f in glob.glob(os.path.join(test_fold, "*.jpg")):
        win = dlib.image_window()
        print("Processing file: {}".format(f))
        img = io.imread(f)
        dets = detector(img)
        print("Number of coins detected: {}".format(len(dets)))
        for k, d in enumerate(dets):
            print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                k, d.left(), d.top(), d.right(), d.bottom()))
        win.set_image(img)
        win.add_overlay(dets)
        #time.sleep(5)
        win.wait_until_closed()
コード例 #19
0
    def __init__(self, default_speed):
        #各クラスの初期化
        self.Tracking = whiteLineTrackingImage.TrackingLine()
        self.capture = cv2.VideoCapture(0)
        #self.capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('H', '2', '6', '4'));
        self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 0)
        self.default_speed = default_speed
        carController.set_speed(default_speed, default_speed)

        #使用する画像(カメラ)の大きさを保存
        ref, testimg = self.capture.read()
        print(testimg.shape)
        self.height = int(testimg.shape[0] / 2)
        self.width = int(testimg.shape[1] / 2)
        self.mid_height = int(math.ceil(self.height / 4))
        self.mid_width = int(math.ceil(self.width / 4))

        fps = int(self.capture.get(cv2.CAP_PROP_FPS))
        w = int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))
        h = int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
        self.video = cv2.VideoWriter('video.mp4', fourcc, fps, (w, h))

        self.coe = 1.2  #左右に曲がる時の強さ
        self.tracking_point = 5  #白線を検知する下からの位置
        self.obs_point = 200  #障害物を検知する下からの位置

        SvmFile = "./detector.svm"
        self.detector = dlib.simple_object_detector(SvmFile)

        print("Initialization finished")
コード例 #20
0
def threaded(c):
    data = b""
    detector = dlib.simple_object_detector("data/models/cone_hog.svm")
    while True:
        # dados recebidos do cliente
        data = recv_size(c)
        if not data:
            print_lock.release()
            break
        frame = pickle.loads(data, fix_imports=True, encoding="bytes")
        frame = cv2.imdecode(frame, cv2.COLOR_BGR2GRAY)

        h, w, n = frame.shape
        dets = detector(frame)
        direction = 0
        if dets:
            direction = dets[0].center().x - (w / 2)

        for det in dets:
            p1 = (det.left(), det.top())
            p2 = (det.right(), det.bottom())
            color = (0, 0, 255)  # Red
            cv2.rectangle(frame, p1, p2, color)
        cv2.imshow('ImageWindow', frame)
        if cv2.waitKey(1) == 27:
            break
        response = 'direcao={}'.format(direction)
        # mandar a mensagem para o cliente
        c.send(response.encode())
    # fechar conexao
    c.close()
コード例 #21
0
 def __init__(self, model):
     if isinstance(model, STRING_TYPES) or isinstance(model, Path):
         m_path = Path(model)
         if not Path(m_path).exists():
             raise ValueError('Model {} does not exist.'.format(m_path))
         model = dlib.simple_object_detector(str(m_path))
     self._dlib_model = model
コード例 #22
0
    def __init__(self,
                 detector_filepath,
                 confidence_threshold=7.5,
                 backwards_confidence_threshold=7.5):
        # Our object detector
        self.dlib_detector = dlib.simple_object_detector(detector_filepath)
        # List of tracked objects, which are dlib_ObjectTracker objects
        self.tracked_objects = []
        # Threshold for confidence. If an object tracker falls below this
        # confidence, do not consider it a new object
        self.confidence_threshold = confidence_threshold
        # If a new object overlaps with at least new_area_threshold,
        # we DO NOT consider it a new object
        self.new_area_threshold = 0.5
        # If two existing objects overlap with at least
        # exist_area_threshold, we do NOT consider them different and
        # will merge them into a single object
        self.existing_area_threshold = 0.7
        # For adding new objects
        # Helper object for tracking possible new objects (need to see it in
        # multiple consecutive frames before starting to fully track it)
        self.possible_object_tracker = PossibleObjectTracker()
        # List of bounding boxes on new objects that we have begun to track
        # This is emptied at the start of each call to update, but can be called
        # after update to get the locations of new objects, if any
        self.new_bounding_boxes = []

        self.backwards_tracked_objects = []
        self.backwards_confidence_threshold = backwards_confidence_threshold
コード例 #23
0
def camera_boobs():
    import cv2

    # cam = cv2.VideoCapture(-1)
    cam = cv2.VideoCapture("/home/nick/temp/boobs/b1.flv")

    win = dlib.image_window()
    detector = dlib.simple_object_detector("detector.svm")

    def detect():
        s, img = cam.read()
        if not s:
            return

        img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        dets = detector(img2)
        num = len(dets)

        if num > 0:
            print("found!")

        win.clear_overlay()
        win.set_image(img2)
        win.add_overlay(dets)

    try:
        while True:
            detect()
    except KeyboardInterrupt as e:
        print("exiting")

    cam.release()
    cv2.destroyAllWindows()
コード例 #24
0
def image_api(request):
    if request.method == "POST":
        postDatum = request.POST.keys()
        data = []
        print postDatum
        for postData in postDatum:
            url = request.POST[postData]
            if (url.lower()).endswith(('jpg','jpeg')):
                urllib.urlretrieve(url, "static/test/abc.jpg")
            elif (url.lower()).endswith(('.png')):
                urllib.urlretrieve(url, "static/test/abc.png")
                im = Image.open("static/test/abc.png")
                im.save("static/test/abc.jpg")
                os.remove("static/test/abc.png")
            else:
                continue
            brand = request.POST['brand']
            brand = brand.lower()
            print brand
            if (brand <> 'engage_fem' and brand <> 'adpl' and brand <> "engage_men"):
                return HttpResponse(json.dumps("Enter valid brand name ex: engage_men, engage_fem, adpl"))
            if brand == 'engage_fem':
                outputpath = "engage_fem.svm"
            elif brand == 'engage_men':
                outputpath = "engage_men.svm"
            else:
                outputpath = "ruslan.svm"
            name = os.path.basename(url)
            test_folder = "/var/sites/thirdauth/static/test/"
            detector = dlib.simple_object_detector(outputpath)
            img_count = []
            img_tags_count = []
            img_name = { }
            img_tags = { }
            t5tags = [ ]
            t5out = [ ]
            for f in glob.glob(os.path.join(test_folder,"abc.jpg")):
                print("Processing file: {}".format(f))
                img = io.imread(f)
                dets = detector(img)
                print("Number of Objects detected: {}".format(len(dets)))
                for k, d in enumerate(dets):
                    print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(k, d.left(), d.top(), d.right(), d.bottom()))
                    img_tags = { 'left': format(d.left()),'top': format(d.top()),'right': format(d.right()),'bottom': format(d.bottom()),'count': format(len(dets)) }
                    img_tags_count.append(img_tags)
                    h = abs( int( format(d.top()) ) - int( format(d.bottom()) ) )
                    w = abs( int( format(d.right()) ) - int( format(d.left()) ) )
                    t5tags = [ int( format(d.top()) ), int( format(d.left()) ), w , h ]
                    t5out.append( t5tags )
                img_name = { 'count':format(len(dets)), 'name':name ,'tags': img_tags_count }
                img_count.append(img_name)
                data.append(img_count)
                t5out = json.dumps( t5out )
                print t5out
                T5hawktags( picture = name, tag_count = format( len( dets ) ), tags = t5out, brandName = brand, imageType = 'activitypictures', attribute = ' ' ).save()
                os.remove("static/test/abc.jpg")
        return HttpResponse(json.dumps(data))
    else:
        return render(request,template_name, context_instance=RequestContext(request))
コード例 #25
0
    def __init__(self, options=None, loadPath=None):
        if loadPath is not None:  #Loading exisiting hog model
            self.detector = dlib.simple_object_detector(loadPath)

        if options is None:
            self.options = dlib.simple_object_detector_training_options()
        else:
            self.options = options
コード例 #26
0
 def __init__(self, options=None, loadPath=None):
     #create detector options
     self.options = options
     if self.options is None:
         self.options = dlib.simple_object_detector_training_options()
     #load the trained detector (for testing)
     if loadPath is not None:
         self._detector = dlib.simple_object_detector(loadPath)
コード例 #27
0
    def __init__(self,
                 model_pupil="model/pupil.svm",
                 model_eye="model/eye.svm"):
        self.detector_pupil = dlib.simple_object_detector(model_pupil)
        self.detector_eye = dlib.simple_object_detector(model_eye)

        self.eye_x = 0
        self.eye_y = 0
        self.eye_w = 0
        self.eye_h = 0

        self.pupil_x = 0
        self.pupil_y = 0
        self.pupil_w = 0
        self.pupil_h = 0

        self.pupil_c_x = 0
        self.pupil_c_y = 0
コード例 #28
0
ファイル: detection_hog.py プロジェクト: MickeyKen/cv_neuroud
    def __init__(self):
        # self.image_pub = rospy.Publisher("image_topic_2",Image)
        self.pnt_pub = rospy.Publisher("human_point_xy",
                                       Int32MultiArray,
                                       queue_size=100)
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/fixed_camera_rgb/rgb/image_raw",
                                          Image, self.callback)

        self.path = rospkg.RosPack().get_path('cv_neuroud') + '/config/'

        self.human_SvmFile = self.path + "human_detector.svm"
        self.ud_SvmFile = self.path + "ud_detector.svm"
        self.human_detector = dlib.simple_object_detector(self.human_SvmFile)
        self.ud_detector = dlib.simple_object_detector(self.ud_SvmFile)
        self.win_det = dlib.image_window()
        self.win_det.set_image(self.human_detector)
        self.win_det.set_image(self.ud_detector)
コード例 #29
0
ファイル: boobs-dlib.py プロジェクト: mazurkin/boobs-detector
def test():
    detector = dlib.simple_object_detector("../boobs.svm")

    win_det = dlib.image_window()
    win_det.set_image(detector)
    # dlib.hit_enter_to_continue()

    print("Showing detections on the images in the faces folder...")
    win = dlib.image_window()

    tp = 0
    fn = 0
    dur = 0

    test_path = "../pics/test/*.jpg"
    # test_path = "/home/external/moderation-p**n-detector/boobs-oboobs/*.jpg"

    for f in glob.glob(test_path):
        try:
            img = io.imread(f)
        except IOError as e:
            print("Image {} can't be loaded: {}".format(f, e.message))
            continue

        try:
            t_start = time.clock()
            dets = detector(img)
            t_end = time.clock()
        except RuntimeError as e:
            print("Image {} can't be detected: {}".format(f, e.message))
            continue

        dur += t_end - t_start

        num = len(dets)
        if num > 0:
            print("Boobs {} detected in file: {}".format(num, f))
            for k, d in enumerate(dets):
                print(
                    "Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                        k, d.left(), d.top(), d.right(), d.bottom()
                    )
                )

            win.clear_overlay()
            win.set_image(img)
            win.add_overlay(dets)
            # dlib.hit_enter_to_continue()

            tp += 1
        else:
            fn += 1

        if tp + fn > 100:
            break

    print("tp={} fn={} precision={} dur={}".format(tp, fn, 1.0 * tp / (tp + fn), 1.0 * dur / (tp + fn)))
コード例 #30
0
def test():
    detector = dlib.simple_object_detector("../boobs.svm")

    win_det = dlib.image_window()
    win_det.set_image(detector)
    # dlib.hit_enter_to_continue()

    print("Showing detections on the images in the faces folder...")
    win = dlib.image_window()

    tp = 0
    fn = 0
    dur = 0

    test_path = "../pics/test/*.jpg"
    # test_path = "/home/external/moderation-p**n-detector/boobs-oboobs/*.jpg"

    for f in glob.glob(test_path):
        try:
            img = io.imread(f)
        except IOError as e:
            print("Image {} can't be loaded: {}".format(f, e.message))
            continue

        try:
            t_start = time.clock()
            dets = detector(img)
            t_end = time.clock()
        except RuntimeError as e:
            print("Image {} can't be detected: {}".format(f, e.message))
            continue

        dur += (t_end - t_start)

        num = len(dets)
        if num > 0:
            print("Boobs {} detected in file: {}".format(num, f))
            for k, d in enumerate(dets):
                print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".
                      format(k, d.left(), d.top(), d.right(), d.bottom()))

            win.clear_overlay()
            win.set_image(img)
            win.add_overlay(dets)
            # dlib.hit_enter_to_continue()

            tp += 1
        else:
            fn += 1

        if tp + fn > 100:
            break

    print("tp={} fn={} precision={} dur={}".format(tp, fn,
                                                   1.0 * tp / (tp + fn),
                                                   1.0 * dur / (tp + fn)))
コード例 #31
0
    def detectDogHead(self, img):
        detector = dlib.simple_object_detector("dog_detector.svm")
        dets = detector(img)

        if (len(dets) == 0):
            print("Can't detect Head")
            dets_pop = dlib.rectangle(0, 0, 0, 0)
        else:
            dets_pop = dets.pop()
        return dets_pop
コード例 #32
0
    def detectDogHead(self, img):
        detector = dlib.simple_object_detector(
            os.path.join(root_path, "data/dog_detector.svm"))
        dets = detector(img)

        if (len(dets) == 0):
            print("Can't detect Head")
            return False
        else:
            dets_pop = dets.pop()
        return dets_pop
コード例 #33
0
ファイル: detector.py プロジェクト: alisaadati97/Pyteeth
    def __init__(self, options=None, loadPath=None):
        #create detector options
        self.options = options
        if self.options is None:
            self.options = dlib.simple_object_detector_training_options()

        #load the trained detector (for testing)
        if loadPath is not None:
            base_path = os.path.abspath(os.path.dirname(__file__))
            self._detector = dlib.simple_object_detector(base_path + "/" +
                                                         loadPath)
コード例 #34
0
 def handle(self, *args, **options ):
     test_folder = args[0]
     outputpath = args[1]
     data = []
     brand = outputpath
     images = os.listdir( test_folder )
     print images
     for image in images:
         imageName = image.split( '.' )
         if image.endswith(('.png','.PNG')):
             try:
                 im = Image.open(test_folder+image)
                 im.save( test_folder+imageName[0]+'.jpg' )
             except:
                 continue
         picName = imageName[0]+".jpg"
         detector = dlib.simple_object_detector(outputpath)
         img_count = [ ]
         img_tags_count = [ ]
         img_name = { }
         img_tags = { }
         t5hawktags = [ ]
         t5hawkout = [ ]
         for f in glob.glob(os.path.join(test_folder,picName)):
             print("Processing file: {}".format(f))
             img = io.imread( f )
             dets = detector( img )
             print("Number of Objects detected: {}".format( len( dets ) ) )
             for k, d in enumerate( dets ):
                 print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(k, d.left(), d.top(), d.right(), d.bottom()))
                 img_tags = { 'left': format( d.left() ),'top': format( d.top() ),'right': format( d.right() ),'bottom': format( d.bottom() ),'count': format( len(dets) ) }
                 t = int( format( d.top() ) )
                 l = int( format( d.left() ) ) 
                 w = abs( int( format( d.right() ) ) - int( format( d.left() ) ) )
                 h = abs( int( format( d.top() ) ) - int( format( d.bottom() ) ) )
                 t5hawktags = [ t, l, w, h ]
                 t5hawkout.append( t5hawktags )
                 img_tags_count.append( img_tags )
             img_name = { 'count':format(len(dets)), 'name':image, 'tags': img_tags_count }
             img_count.append( img_name )
             data.append( img_count )
             print t5hawkout
             t5hawkout = json.dumps( t5hawkout )
             #try:
                 #obj = T5hawktags.objects.get( picture = image, t5hawkbrand_id = 5 )
             #except T5hawktags.DoesNotExist:
              #   obj = T5hawktags( picture = image, tag_count = format( len( dets ) ), tags = t5hawkout, brandName = brand, t5hawkbrand_id = 5,attribute = " " )
               #  obj.save()
     self.stdout.write( json.dumps(data) )
コード例 #35
0
ファイル: detect.py プロジェクト: VLAM3D/menpodetect
 def __init__(self, model):
     if isinstance(model, STRING_TYPES) or isinstance(model, Path):
         m_path = Path(model)
         if not Path(m_path).exists():
             raise ValueError('Model {} does not exist.'.format(m_path))
         # There are two different kinds of object detector, the
         # simple_object_detector and the fhog_object_detector, but we
         # can't tell which is which from the file name. Therefore, try one
         # and then the other. Unfortunately, it throws a runtime error,
         # which we have to catch.
         try:
             model = dlib.simple_object_detector(str(m_path))
         except RuntimeError:
             model = dlib.fhog_object_detector(str(m_path))
     self._dlib_model = model
コード例 #36
0
def classify(img):
    detector = dlib.simple_object_detector("cupdetector_2.svm")

    win_det = dlib.image_window()
    win_det.set_image(detector)

    win = dlib.image_window()
    test_dir = '/home/jyotiska/Dropbox/Computer Vision/Cups_test'
    convert_dir = '/home/jyotiska/Dropbox/Computer Vision/Cups_test_convert'
    assorted_dir = '/home/jyotiska/Dropbox/Computer Vision/Item bucket'

    items = os.listdir(assorted_dir)

    convert_i = 0
    for f in glob.glob(convert_dir+"/*.*"):
        print "processing file:", f
        img = io.imread(f)
        extension = f.split(".")[1]
        convert_file = "convert_"+str(convert_i)+"."+extension
        shutil.copy(f,convert_file)
        print "convert file:",convert_file
        background = Image.open(convert_file)
        dets = detector(img)

        print "number of cups detected:", len(dets)
        for d in dets:
        	x = d.left()
        	y = d.top()
        	width = d.right() - x
        	height = d.bottom() - y
            print "  detection position left,top,right,bottom:", d.left(), d.top(), d.right(), d.bottom()
            print width,height

            r = random.randint(0,len(items)-1)
            print r,items[r]
            random_item = Image.open(assorted_dir+"/"+items[r])
            # scale it a bit more, and adjust position

            # Apply blur?
            resized = random_item.resize( (int(1.2*width),int(1.2*height)) )
            background.paste(resized, (d.left()-12,d.top()-10), resized)

        background.show()
        background.save(convert_file)
        win.clear_overlay()
        win.set_image(img)
        win.add_overlay(dets)
        convert_i += 1
コード例 #37
0
ファイル: classify_func.py プロジェクト: amruthv/Dionysus
def classify(dir, svm_param_file):
  detMap = {}
  print type(svm_param_file)
  detector = dlib.simple_object_detector(svm_param_file)
  print detector
  for dirr, _, files in os.walk(test_images_dir):
    for f in files:
      if f.endswith('.jpeg'):
        im = io.imread(dirr + f)
        dets = detector(im)

        # split filename take first piece
        # match 'filename' with detected count
        detMap[f.split('.')[0]] = len(dets)
        print f + ' had: ', len(dets)

  return score(detMap)
コード例 #38
0
ファイル: cron.py プロジェクト: praveenbkulkarni/pavan
 def TagImage( picName, name, imgName, count, attributes ):
         i = 1
         for svmName in svmNames:
             t5hawkout = [ ]
             brandName = svmName
             outputpath = '/var/sites/thirdauth/static/test/'+svmName+'.svm'
             detector = dlib.simple_object_detector( str( outputpath ) )
             for f in glob.glob( os.path.join( test_folder, picName ) ):
                 print("Processing file: {}".format(f))
                 img = io.imread( f )
                 dets = detector( img )
                 print("Number of Objects detected: {}".format( len( dets ) ) )
                 for k, d in enumerate( dets ):
                     print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(k, d.left(), d.top(), d.right(), d.bottom()))
                     img_tags = { 'left': format( d.left() ),'top': format( d.top() ),'right': format( d.right() ),'bottom': format( d.bottom() ),'count': format( len(dets) ) }
                     t = int( format( d.top() ) )
                     l = int( format( d.left() ) ) 
                     w = abs( int( format( d.right() ) ) - int( format( d.left() ) ) )
                     h = abs( int( format( d.top() ) ) - int( format( d.bottom() ) ) )
                     t5hawktags = [ t, l, w, h ]
                     t5hawkout.append( t5hawktags )
                     img_tags_count.append( img_tags )
                 img_name = { 'count':format(len(dets)), 'name':name, 'tags': img_tags_count }
                 img_count.append( img_name )
                 data.append( img_count )
                 t5hawkout = json.dumps( t5hawkout )
                 HawkTags = { 'picture': name, 'tag_count':format( len( dets ) ), 'tags':t5hawkout, 'brandName': brandName, 't5hawkbrand_id':i, 'attributes': attributes   }
                 Hawk.append( HawkTags )
                 try:
                     obj = T5hawktags.objects.get( picture = name, brandName = brandName )
                     pictable = Picturetables.objects.all()
                 except T5hawktags.DoesNotExist:
                     obj = T5hawktags( picture = name, tag_count = format( len( dets ) ), tags = t5hawkout, brandName = brandName, t5hawkbrand_id = i,attribute = json.dumps( attributes ) )
                     obj.save()
                 #os.remove("static/test/"+imgName[0]+".jpg")
             #print Hawk
             #self.stdout.write(json.dumps(data))
             sendtourl = "http://"+domainName+".bizom.in/activities/save35hawktags"
             req = urllib2.Request(sendtourl, json.dumps( Hawk ), {'Content-Type': 'application/json'})
             f = urllib2.urlopen( req )
             response = f.read()
             f.close()
             i = i + 1
         return json.dumps(data)
コード例 #39
0
def get_frame_detections(requests, frame):
    import os
    import sys
    import glob

    import dlib
    from skimage import io
    detector = dlib.simple_object_detector("detector.svm")
    img = io.imread(os.path.join(settings.FRAMES_DIR, frame))
    dets = detector(img)
    detections = []
    for k, d in enumerate(dets):
        x1 = d.left()
        x2 = d.right()
        y1 = d.top()
        y2 = d.bottom()
        w = x2 - x1
        h = y2 - y1
        detections.append((x1, y1, w, h))    
    return HttpResponse(json.dumps(detections), content_type="application/json")
コード例 #40
0
ファイル: bottle_classifier.py プロジェクト: amruthv/Dionysus
bottle_training_data = 'helpers/bottles_dataset.xml'
img_list = 'fetch_images/image_urls.txt'
options = dlib.simple_object_detector_training_options()
options.add_left_right_image_flips = True

# options.detection_window_size = 8000
options.C = 4
options.epsilon = 0.01
options.num_threads = 8
options.be_verbose = True

dlib.train_simple_object_detector(bottle_training_data,"square_bottle_classifier.svm",options)

ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') + '/'

parentDir = 'models/'
if not os.path.exists(parentDir + st):
    os.makedirs(parentDir + st)
    shutil.copyfile("square_bottle_classifier.svm", parentDir + st + "square_bottle_classifier.svm")
    shutil.copyfile(bottle_training_data, parentDir + st + "bottles_dataset.xml")
    shutil.copyfile(img_list, parentDir + st + "image_urls.txt")

detector = dlib.simple_object_detector("square_bottle_classifier.svm")
win = dlib.image_window()
win.set_image(detector)

# print "\nTraining accuracy: ", dlib.test_simple_object_detector(bottle_training_data, "square_bottle_classifier.svm")

コード例 #41
0
ファイル: views.py プロジェクト: praveenbkulkarni/pavan
def HawkTags( request ):
    companies = Companies.objects.filter( id = 3 )
    if companies.exists():
        for company in companies:
            domainName = company.domainname
    print domainName
    fromdate = '2014-07-01'
    todate = '2014-09-01'
    tableName = 'activitypicture'
    brands_id = 1
    url = "http://stagingadpl.bizom.in/activities/getPictures?&fromdate=2014-07-01&todate=2014-09-01&pageno=6&limit=10&tableName=activitypictures"
    req = urllib2.Request ( url )
    req.add_header('Accept', url )
    resp = urllib2.urlopen( req )
    content = resp.read()
    content = json.loads( content )
    print content
    error = content[ 'result' ]
    if error != True:
        return HttpResponse( "No data found for this date range.." )
    data = []
    attributes = { }
    bucket = content[ "data" ][ "bucket" ]
    postDatum = content[ "data" ][ "pictures" ]
    print postDatum
    for postData in postDatum:
        img_name = postData[ 'img_name' ]
        activity_ids = postData[ 'activity_id' ]
        ids = postData[ 'id' ]
        for key, val in postData.iteritems():
            if key !='img_name':
                if key !='id':
                    attributes[ key ] = val
                if key == 'id':
                    attributes[ tableName+"_"+ key ] = val
        url = bucket + img_name
        attempts = 0
        imgName = img_name.split( '.' )
        picName = imgName[0]+".jpg"
        while attempts <= 3:
            timeout = 30
            socket.setdefaulttimeout(timeout)
            try:
                if not os.path.isfile( "static/test/"+imgName[0]+".jpg" ):
                    if (url.lower()).endswith(('.png')):
                        urllib.urlretrieve(url, "static/test/"+img_name)
                        im = Image.open("static/test/"+img_name)
                        im.save("static/test/"+imgName[0]+".jpg")
                        os.remove("static/test/"+img_name)
                    else:
                        continue
                print "try"
                if os.path.isfile( "static/test/"+imgName[0]+".jpg" ):
                    print "m here"
                    attempts = 4
            except:
                attempts += 1
                print "except"
        brand = "adpl"
        brand = brand.lower()
        print brand
        if (brand <> 'engage_fem' and brand <> 'adpl' and brand <> "engage_men"):
            return HttpResponse(json.dumps("Enter valid brand name ex: engage_men, engage_fem, adpl"))
        if brand == 'engage_fem':
            outputpath = "engage_fem.svm"
        elif brand == 'engage_men':
            outputpath = "engage_men.svm"
        else:
            outputpath = "ruslan.svm"
        name = os.path.basename(url)
        test_folder = "/var/sites/thirdauth/static/test/"
        detector = dlib.simple_object_detector(outputpath)
        img_count = [ ]
        img_tags_count = [ ]
        img_name = { }
        img_tags = { }
        t5hawktags = [ ]
        t5hawkout = [ ]
        for f in glob.glob(os.path.join(test_folder,picName)):
            print("Processing file: {}".format(f))
            img = io.imread( f )
            dets = detector( img )
            print("Number of Objects detected: {}".format( len( dets ) ) )
            for k, d in enumerate( dets ):
                print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(k, d.left(), d.top(), d.right(), d.bottom()))
                img_tags = { 'left': format( d.left() ),'top': format( d.top() ),'right': format( d.right() ),'bottom': format( d.bottom() ),'count': format( len(dets) ) }
                t = int( format( d.top() ) )
                l = int( format( d.left() ) ) 
                w = abs( int( format( d.right() ) ) - int( format( d.left() ) ) )
                h = abs( int( format( d.top() ) ) - int( format( d.bottom() ) ) )
                t5hawktags = [ t, l, w, h ]
                t5hawkout.append( t5hawktags )
                img_tags_count.append( img_tags )
            img_name = { 'count':format(len(dets)), 'name':name, 'tags': img_tags_count }
            img_count.append( img_name )
            data.append( img_count )
            t5hawkout = json.dumps( t5hawkout )
            try:
                obj = T5hawktags.objects.get( picture = name, t5hawkbrand_id = brands_id )
            except T5hawktags.DoesNotExist:
                obj = T5hawktags( picture = name, tag_count = format( len( dets ) ), tags = t5hawkout, brandName = brand, t5hawkbrand_id = brands_id,attribute = json.dumps( attributes ) )
                obj.save()
            #os.remove("static/test/abc.jpg")
    return HttpResponse(json.dumps(data))
コード例 #42
0
import os,sys,Image,dlib,random
from skimage import io
import numpy as np

# print "\nTest1 accuracy: ", dlib.test_simple_object_detector('/home/jyotiska/Dropbox/Computer Vision/cupdataset_2_test.xml',"cupdetector_2.svm")
# print "\nTraining accuracy: ", dlib.test_simple_object_detector('/home/jyotiska/Dropbox/Computer Vision/cupdataset_3.xml',"cupdetector_3.svm")

detector = dlib.simple_object_detector("cupdetector_4.svm")

# win_det = dlib.image_window()
# win_det.set_image(detector)

# win = dlib.image_window()
# test_dir = '/home/jyotiska/Dropbox/Computer Vision/Cups_test'
# convert_dir = '/home/jyotiska/Dropbox/Computer Vision/Cups_test_convert'
assorted_dir = 'ItemBucket/'

items =os.listdir(assorted_dir)

def classify(img):
  dets = detector(img)
  background = Image.fromarray(np.array(img))
  for d in dets:
    x = d.left()
    y = d.top()
    width = d.right() - x
    height = d.bottom() - y
    print ">> detection position left,top,right,bottom:", d.left(), d.top(), d.right(), d.bottom()

    r = random.randint(0,len(items)-1)
    random_item = Image.open(assorted_dir+"/"+items[r])
コード例 #43
0
ファイル: app.py プロジェクト: skyli42/RiceKrispies
methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED','cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
ntemplates = [file for file in os.scandir("./masks/nums")]


#for matching output of classifier --> actual suit
suits = ["dclubs", "ddiamonds", "dhearts", "dspades","uclubs","udiamonds", "uhearts", "uspades"]
suits_true = ["clubs", "diamonds", "hearts", "spades"]
tmp = {}
for i, suit in enumerate(suits):
	tmp[suit] = i
	tmp[i] = suit
suits= tmp


#load detectors
card_detector = dlib.simple_object_detector("./detector.svm")
suit_detector = dlib.simple_object_detector("./suit_detector.svm")

#load suit classifier
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=64)]
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns, hidden_units=[1024, 512, 256] , n_classes = 8,model_dir = "./weights")


bin_n = 16 # Number of bins (for HOG)
def hog(img):
	"""
	Computes the HOG [histogram of oriented gradients] for an image

	Parameters
	----------
	img: np.ndarray
コード例 #44
0
ファイル: detection.py プロジェクト: imagedl/TrafficSign
import dlib
import glob
import os
from skimage import io

info_det = dlib.simple_object_detector("info.svm")
giveway_det = dlib.simple_object_detector("giveway.svm")
mandatory_det = dlib.simple_object_detector("mandatory.svm")
priority_det = dlib.simple_object_detector("priority.svm")
prohibitory_det = dlib.simple_object_detector("prohibitory.svm")
stop_det = dlib.simple_object_detector("stop.svm")
warning_det = dlib.simple_object_detector("warning.svm")

win = dlib.image_window()

pictures_folder = ('pics')

for f in glob.glob(os.path.join(pictures_folder, "*.png")):
    print ("Processing file: {}". format(f))
    img = io.imread(f)
    dets_info = info_det(img)
    dets_giveway = giveway_det(img)
    dets_manda = mandatory_det(img)
    dets_priority = priority_det(img)
    dets_proh = prohibitory_det(img)
    dets_stop = stop_det(img)
    dets_warn = warning_det(img)
    print (len(dets_info), len(dets_giveway), len(dets_manda), len(dets_priority), len(dets_proh), len(dets_stop), len(dets_warn))
    for k, d in enumerate(dets_info):
        print (("Detection {}: Left: {} Top: {} Right: {} Bottom: {}").format(
            k, d.left(), d.top(), d.right(), d.bottom()))
コード例 #45
0
ファイル: learning.py プロジェクト: imagedl/TrafficSign
import cv2
import dlib
import os
from skimage import filter, data, io
from skimage.viewer import ImageViewer


learning_folder = 'learning'

options = dlib.simple_object_detector_training_options()
options.add_left_right_image_flips = True
options.C = 5
options.num_threads = 1
options.be_verbose = True

training_xml_path = os.path.join(learning_folder,'info.xml')

dlib.train_simple_object_detector(training_xml_path, 'info.svm', options)

print ("")
print ("Training accuracy: {}".format(
    dlib.test_simple_object_detector(training_xml_path, "info.svm")
))

detector = dlib.simple_object_detector("info.svm")

win_det = dlib.image_window()
win_det.set_image(detector)

dlib.hit_enter_to_continue()
コード例 #46
0
print("")  # Print blank line to create gap from previous output
print("Training accuracy: {}".format(
    dlib.test_simple_object_detector(training_xml_path, "detector.svm")))
# However, to get an idea if it really worked without overfitting we need to
# run it on images it wasn't trained on.  The next line does this.  Happily, we
# see that the object detector works perfectly on the testing images.
print("Testing accuracy: {}".format(
    dlib.test_simple_object_detector(testing_xml_path, "detector.svm")))





# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector("detector.svm")

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
win_det = dlib.image_window()
win_det.set_image(detector)

# Now let's run the detector over the images in the faces folder and display the
# results.
print("Showing detections on the images in the faces folder...")
win = dlib.image_window()
for f in glob.glob(os.path.join(faces_folder, "*.jpg")):
    print("Processing file: {}".format(f))
    img = io.imread(f)
    dets = detector(img)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
コード例 #47
0
import Image
import shutil
import dlib
import random
import os
import glob
from skimage import io
# print "\nTest1 accuracy: ", dlib.test_simple_object_detector('/home/jyotiska/Dropbox/Computer Vision/cupdataset_2_test.xml',"cupdetector_2.svm")
# print "\nTraining accuracy: ", dlib.test_simple_object_detector('/home/jyotiska/Dropbox/Computer Vision/cupdataset_3.xml',"cupdetector_3.svm")

detector = dlib.simple_object_detector("bottledetector.svm")

win_det = dlib.image_window()
win_det.set_image(detector)

win = dlib.image_window()
test_dir = '/home/jyotiska/Dropbox/Computer Vision/Bottle_test_convert'
assorted_dir = '/home/jyotiska/Dropbox/Computer Vision/Item bucket'

items =os.listdir(assorted_dir)


convert_i = 0
for f in glob.glob(test_dir+"/*.*"):
    print "processing file:", f
    img = io.imread(f)
    extension = f.split(".")[1]
    convert_file = "convert_bottle_"+str(convert_i)+"."+extension
    shutil.copy(f,convert_file)
    print "convert file:",convert_file
    background = Image.open(convert_file)
コード例 #48
0
ファイル: faster.py プロジェクト: imagedl/TrafficSign
import cv2
import dlib
import sys
import getopt


info_det = dlib.simple_object_detector("detectors/info.svm")
giveway_det = dlib.simple_object_detector("detectors/giveway.svm")
mandatory_det = dlib.simple_object_detector("detectors/mandatory.svm")
priority_det = dlib.simple_object_detector("detectors/priority.svm")
prohibitory_det = dlib.simple_object_detector("detectors/prohibitory.svm")
stop_det = dlib.simple_object_detector("detectors/stop.svm")
warning_det = dlib.simple_object_detector("detectors/warning.svm")



VERBOSE = False

def cli_progress(current_val, end_val, bar_length=20):
    percent = float(current_val) / end_val
    hashes = '#' * int(round(percent * bar_length))
    spaces = ' ' * (bar_length - len(hashes))
    sys.stdout.write("\rPercent: [{0}] {1}%".format(hashes + spaces, int(round(percent * 100))))
    sys.stdout.flush()

def getDetectedFrame(img):
    dets_info = info_det(img)
    dets_giveway = giveway_det(img)
    dets_manda = mandatory_det(img)
    dets_priority = priority_det(img)
    dets_proh = prohibitory_det(img)
コード例 #49
0
# Tell the code how many CPU cores your computer has for the fastest training.
options.num_threads = 8
options.be_verbose = True

training_xml_path = "signs.xml"
## testing_xml_path = os.path.join(faces_folder, "testing.xml")
# This function does the actual training.  It will save the final detector to
# detector.svm.  The input is an XML file that lists the images in the training
# dataset and also contains the positions of the face boxes.  To create your
# own XML files you can use the imglab tool which can be found in the
# tools/imglab folder.  It is a simple graphical tool for labeling objects in
# images with boxes.  To see how to use it read the tools/imglab/README.txt
# file.  But for this example, we just use the training.xml file included with
# dlib.
dlib.train_simple_object_detector(training_xml_path, TRAINING, options)

# Now that we have a face detector we can test it.  The first statement tests
# it on the training data.  It will print(the precision, recall, and then)
# average precision.
print("")  # Print blank line to create gap from previous output
print("Training accuracy: {}".format(dlib.test_simple_object_detector(training_xml_path, TRAINING)))

# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector(TRAINING)

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
win_det = dlib.image_window()
win_det.set_image(detector)
コード例 #50
0
ファイル: test_detector.py プロジェクト: ChienHsiung/python
# python test_detector.py --detector output/stop_sign_detector.svm --testing stop_sign_testing

# import the necessary packages
from imutils import paths
import argparse
import dlib
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--detector", required=True, help="Path to trained object detector")
ap.add_argument("-t", "--testing", required=True, help="Path to directory of testing images")
args = vars(ap.parse_args())

# load the detector
detector = dlib.simple_object_detector(args["detector"])

# loop over the testing images
for testingPath in paths.list_images(args["testing"]):
	# load the image and make predictions
	image = cv2.imread(testingPath)
	boxes = detector(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

	# loop over the bounding boxes and draw them
	for b in boxes:
		(x, y, w, h) = (b.left(), b.top(), b.right(), b.bottom())
		cv2.rectangle(image, (x, y), (w, h), (0, 255, 0), 2)
		print (x,y)
	# show the image
	cv2.imshow("Image", image)
	cv2.waitKey(0)
コード例 #51
0
ファイル: pic_eval.py プロジェクト: marron-akanishi/yayoi
import sys
import json
import os
import random
import numpy as np
import cv2
import dlib
import tensorflow as tf
import study
import study_52

# 顔認識特徴量ファイル
face_detector = dlib.simple_object_detector("./detector_face.svm")

# 顔サイズ
FACE_SIZE = 64
# CNN向けサイズ
IMAGE_SIZE = 28
IMAGE_SIZE_52 = 48
# エリア拡大
ZOOM = 5

# キャラ判定本体
def chara_detect(img, ckpt_path, names, isTheater):
    if isTheater:
        img = cv2.resize(img, (IMAGE_SIZE_52, IMAGE_SIZE_52))
    else:
        img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE))
    # データを入れる配列
    image = []
    # 画像情報を一列にした後、0-1のfloat値にする