예제 #1
0
class ImageDetector(object):
    def __init__(self):
        self.detector = Detector(YOLO_DATA_PATH, YOLO_CFG_PATH,
                                 YOLO_WEIGHTS_PATH)

    # @pysnooper.snoop()
    def classify_image(self, image_path):
        print("Classfy : ", image_path)
        res = self.detector.detect(image_path)
        print(res)

        img = Image.open(image_path)
        dr = ImageDraw.Draw(img)
        for data in res:
            class_name = data['class']
            x, y, w, h = data['left'], data['top'], data['right'] - data[
                'left'], data['bottom'] - data['top']
            # 画矩形框
            dr.rectangle((x, y, x + w, y + h), outline=(46, 254, 46), width=3)

            # 写文字
            # 设置字体和大小
            # myfont = ImageFont.truetype("static/glyphicons-halflings-regular.ttf", 100)

            dr.text((data['left'], data['top']),
                    class_name,
                    font=myfont,
                    fill='red')
        out_image_path = image_path[:image_path.rfind(
            '.')] + '_deect' + image_path[image_path.rfind('.'):]
        img.save(out_image_path)
        return out_image_path
예제 #2
0
파일: yolov3.py 프로젝트: Woffee/DSL-FR
class Yolo():
    def __init__(self):
        abs_path = '/Users/woffee/www/language_design/yolo/'
        self.detector = Detector(abs_path + 'darknet/cfg/coco.data',
                                 abs_path + 'darknet/cfg/yolov3.cfg',
                                 abs_path + 'darknet/yolov3.weights')

    def detect(self, img_path):
        results = self.detector.detect(img_path)
        return results
예제 #3
0
class AutoLabeler:
    def __init__(self):
        self.current_directory = os.getcwd()
        self.class_list = np.loadtxt("class_list.txt", dtype=np.str)
        yolo_cfg_path_absolute = self.current_directory + YOLOCFGPATH
        self.detector = Detector(
            yolo_cfg_path_absolute + 'cfg/obj.data',
            yolo_cfg_path_absolute + 'cfg/yolov3-tiny.cfg',
            yolo_cfg_path_absolute + 'yolov3-tiny_final.weights')
        if not os.path.isdir(LABEL_PATH):
            os.mkdir(LABEL_PATH)

    def label_images(self):
        files_to_label = glob.glob(IMAGES_TO_LABEL_GLOB)
        number_of_files = len(files_to_label)
        print(f"{number_of_files} to label in total")
        start_time = time.time()
        for index, file_path in enumerate(files_to_label):
            image = Image.open(file_path)
            label_matrix = []
            print(f"Labeling {index + 1}/{number_of_files}")
            results = self.detector.detect(file_path)
            is_first = True
            for result in results:
                if result["prob"] < 0.6:
                    continue
                c = [
                    index for index, label in enumerate(self.class_list)
                    if label == result["class"]
                ][0]
                width = result["right"] - result["left"]
                height = result["bottom"] - result["top"]
                x_coord = width / 2 + result["left"]
                y_coord = height / 2 + result["top"]
                width_relative = width / image.width
                height_relative = height / image.height
                x_coord_relative = x_coord / image.width
                y_coord_relative = y_coord / image.height
                label_matrix.append([
                    c, x_coord_relative, y_coord_relative, width_relative,
                    height_relative
                ])

            label_matrix_np = np.array(label_matrix)
            base_label_file_name = os.path.basename(
                file_path)[:os.path.basename(file_path).rfind('.')] + ".txt"
            label_file_name = f"{LABEL_PATH}/{base_label_file_name}"
            np.savetxt(label_file_name, label_matrix_np)
            time_elapsed = time.time() - start_time
            estimated_time_remaining = ((time_elapsed / (index + 1)) *
                                        (len(files_to_label) -
                                         (index + 1))) / 60
            print(
                f"Saving file as {label_file_name} -- Est {estimated_time_remaining:.2f} mins remaining"
            )
class YOLOThreeObjectDetector(ObjectDetector):
    DARKNET_CFG_COCO_DATA = "./yolo_darknet_cfg/coco.data"
    DARKNET_CFG_YOLO_CFG = "./yolo_darknet_cfg/yolov3.cfg"
    YOLO_WEIGHTS = "./yolov3.weights"

    def __init__(self):
        self.detector = Detector(self.DARKNET_CFG_COCO_DATA,
                                 self.DARKNET_CFG_YOLO_CFG, self.YOLO_WEIGHTS)

    def detect_humans(self, img, img_path, logger):
        return self.detector.detect(img_path)

    def get_human_boxes(self, outputs, logger):
        boxes_list = [[b['left'], b['top'], b['right'], b['bottom']]
                      for b in outputs
                      if b['class'] == 'person' and b['prob'] >= 0.95]
        logger.log['detected_humans_per_frame'].append(len(boxes_list))
        logger.log['boxes_per_frame'].append(boxes_list)
        return boxes_list
예제 #5
0
class Vision:
    def __init__(self):
        self.rs_pipeline = rs.pipeline()
        self.current_directory = os.getcwd()
        yolo_cfg_path_absolute = self.current_directory + YOLOCFGPATH
        self.image_path = self.current_directory + "/" + IMAGE_NAME
        self.detector = Detector(yolo_cfg_path_absolute + 'cfg/obj.data', yolo_cfg_path_absolute + 'cfg/yolov3-tiny.cfg', yolo_cfg_path_absolute + 'yolov3-tiny_final.weights')
        self.counter = 0
        self.first_run = True
        self.results = None
        self.orientationCNN = OrientationDetector(ORIENTATION_MODEL_PATH)

    def __del__(self):
        # Stop streaming
        self.rs_pipeline.stop()

    def capture_image(self):
        if self.first_run:
            cfg = rs.config()
            # cfg.enable_stream(realsense.stream.depth, 1280, 720, realsense.format.z16, 30)
            cfg.enable_stream(rs.stream.color, 1920, 1080, rs.format.rgb8, 30)

            profile = self.rs_pipeline.start(cfg)
            sensors = profile.get_device().query_sensors()
            rgb_camera = sensors[1]
            rgb_camera.set_option(rs.option.white_balance, 4600)
            rgb_camera.set_option(rs.option.exposure, 80)
            #rgb_camera.set_option(rs.option.saturation, 65)
            #rgb_camera.set_option(rs.option.contrast, 50)


            frames = None
            # wait for autoexposure to catch up
            for i in range(90):
                frames = self.rs_pipeline.wait_for_frames()
            self.first_run = False

        frames = self.rs_pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()

        # Convert images to numpy arrays
        color_image = np.asanyarray(color_frame.get_data())
        color_image_ready_to_save = pimg.fromarray(color_image, 'RGB')
        color_image_ready_to_save.save(self.image_path)
        return color_image

    def find_parts(self, class_id, fuse_index=-1):
        class_id1, class_id2 = class_id
        part = (-1, -1, -1, -1, -1)
        # result is an array of dictionaries
        found_class_index = 0
        for i in range(len(self.results)):
            d = self.results[i]
            if (d['class'] == class_id1 or d['class'] == class_id2) and d['prob'] > 0.6:
                if fuse_index > -1 and fuse_index != found_class_index:
                    found_class_index += 1
                    continue
                part_class = d['class']
                prob = d['prob']
                width = d['right'] - d['left']
                height = d['bottom'] - d['top']
                x_coord = width / 2 + d['left']
                y_coord = height / 2 + d['top']
                if height > width:
                    orientation = OrientationEnum.VERTICAL.value
                    grip_width = width * 0.58
                elif width > height:
                    orientation = OrientationEnum.HORIZONTAL.value
                    grip_width = height * 0.58
                else:
                    orientation = OrientationEnum.HORIZONTAL.value
                    grip_width = height * 0.58
                    print("[W] Could not determine orientation, using 1 as default")
                new_part_id = convert_to_part_id(part_class)
                part = (new_part_id, x_coord, y_coord, orientation, grip_width)
                break
        print(part)
        return part

    def detect_object(self):
        self.results = self.detector.detect(self.image_path)
        self.draw_boxes(self.results)

    def draw_boxes(self, results):
        source_img = pimg.open(self.image_path).convert("RGBA")
        for i in range(len(results)):
            d = results[i]
            if d['prob'] > 0.6:
                classify = d['class']
                prob = d['prob']
                width = d['right'] - d['left']
                height = d['bottom'] - d['top']
                x_coord = width / 2 + d['left']
                y_coord = height / 2 + d['top']
                draw = ImageDraw.Draw(source_img)
                draw.rectangle(((d['left'], d['top']), (d['right'], d['bottom'])), fill=None, outline=(200, 0, 150), width=6)
                draw.text((x_coord, y_coord), d['class'])
        source_img.save('boundingboxes.png')

    def is_facing_right(self, np_image):
        result = self.orientationCNN.is_facing_right(np_image)
        print("[INFO] Part is facing right. {}".format(result))
        return result

    def get_image_path(self):
        return self.image_path
예제 #6
0
파일: example.py 프로젝트: snobu/darknetpy
from darknetpy.detector import Detector

detector = Detector('/Users/daniel/Workspace/darknet',
                    '/Users/daniel/Workspace/darknet/cfg/coco.data',
                    '/Users/daniel/Workspace/darknet/cfg/yolo.cfg',
                    '/Users/daniel/Workspace/darknet/yolo.weights')

results = detector.detect('/Users/daniel/Workspace/darknet/data/dog.jpg')

print(results)
예제 #7
0
jsonfile = open('/FilepathHere/blob.json', 'a+')
filenum = 0
if os.path.isfile(index):
    filename = open(index, 'r')
    if filename:
        filenum = filename.read()
        filename.close()
#darknetpy
detector = Detector('/FilepathHere/darknet',
                    '/FilepathHere/darknet/cfg/coco.data',
                    '/FilepathHere/darknet/cfg/yolov2.cfg',
                    '/FilepathHere/darknet/yolov2.weights')

#Define array to hold results
results = {}

#JSON
output = {}
output['ImageID'] = ''
output['YOLOOutputString'] = results
for i in range(int(filenum), len(file_list) - 1):
    results[i] = detector.detect(img_path + file_list[i])
    output['ImageID'] = file_list[i]
    output['YOLOOutputString'] = results[i]
    json_blob = json.dumps(output)
    print(file_list[i] + ' ###COMPLETE###' + '\n')
    print(i, file=open('/FilepathHere/index.txt', 'w'))
    jsonfile.write(json_blob + '\n')
print(results)
#print (json.loads(stdout_json)['ImageID'])
예제 #8
0
import cv2

prototxt = "/media/victor/57a90e07-058d-429d-a357-e755d0820324/Projects/Tiny-yolo/yolo_tiny_deploy.prototxt"
model = "/media/victor/57a90e07-058d-429d-a357-e755d0820324/Projects/Tiny-yolo/yolo_tiny.caffemodel"
darknet = "/media/victor/57a90e07-058d-429d-a357-e755d0820324/Projects/darknet"
image = "/home/victor/Projects/Thesis/Testimages/test1.jpg"
conf = 0.1

from darknetpy.detector import Detector

detector = Detector(darknet, '%s/cfg/coco.data' % darknet,
                    '%s/cfg/tiny-yolo.cfg' % darknet,
                    '%s/tiny-yolo.weights' % darknet)

start = time.time()
results = detector.detect(image)
end = time.time()
print("[INFO] it took %s seconds." % (end - start))

print(results)

# image = cv2.imread(image)
# (h, w) = image.shape[:2]
# # https://www.pyimagesearch.com/2017/11/06/deep-learning-opencvs-blobfromimage-works/
# blob = cv2.dnn.blobFromImage(cv2.resize(image, (448, 448)), 1, (448, 448), (104, 117, 123))
#
# # cv2.imshow("Output", image)
# # cv2.waitKey(0)
#
#
# # load our serialized model from disk
예제 #9
0

# need to pip3 install darknetpy
# run on python3


from darknetpy.detector import Detector

detector = Detector('dev/darknet',
                    'ds/swx_longgun.data',
                    'ds/swx-yolo-voc.2.0.cfg',
                    'swx-yolo-voc_18000.weights')


results = detector.detect('data/gun.jpg')

print(results)