示例#1
0
 def __init__(self,
              wanted_labels=None,
              model_file=None,
              label_file=None,
              num_threads=None,
              edgetpu=False,
              libedgetpu=None,
              score_threshold=0.5):
     if model_file is None:
         model_file = 'detectors/mobilenet/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite'
     if label_file is None:
         label_file = 'detectors/mobilenet/labels.txt'
     self.use_edgetpu = True
     self.num_threads = 1
     self.engine = DetectionEngine(model_file)
     self.labels = dataset_utils.ReadLabelFile(label_file)
     if wanted_labels is None:
         wanted_labels = ['person']
     self.wanted_labels = wanted_labels
     self.score_threshold = score_threshold
     # newer versions of edgetpu library have a new interface
     detect_op = getattr(self.engine, 'detect_with_image', None)
     if callable(detect_op):
         self.detector = detect_op
     else:
         self.detector = self.engine.DetectWithImage
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='File path of Tflite model.',
                        required=True)
    parser.add_argument('--label',
                        help='File path of label file.',
                        required=True)
    args = parser.parse_args()

    labels = dataset_utils.ReadLabelFile(args.label)
    engine = ClassificationEngine(args.model)

    with picamera.PiCamera() as camera:
        camera.resolution = (640, 480)
        camera.framerate = 30
        _, height, width, _ = engine.get_input_tensor_shape()
        camera.start_preview()
        try:
            stream = io.BytesIO()
            for _ in camera.capture_continuous(stream,
                                               format='rgb',
                                               use_video_port=True,
                                               resize=(width, height)):
                stream.truncate()
                stream.seek(0)
                input_tensor = np.frombuffer(stream.getvalue(), dtype=np.uint8)
                start_ms = time.time()
                results = engine.ClassifyWithInputTensor(input_tensor, top_k=1)
                elapsed_ms = time.time() - start_ms
                if results:
                    camera.annotate_text = '%s %.2f\n%.2fms' % (labels[
                        results[0][0]], results[0][1], elapsed_ms * 1000.0)
        finally:
            camera.stop_preview()
示例#3
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--model', help='File path of Tflite model.', required=True)
  parser.add_argument('--label', help='File path of label file.', required=True)
  parser.add_argument(
      '--image', help='File path of the image to be recognized.', required=True)
  args = parser.parse_args()

  # Prepare labels.
  labels = dataset_utils.ReadLabelFile(args.label)
  # Initialize engine.
  engine = ClassificationEngine(args.model)
  # Run inference.
  img = Image.open(args.image)
  for result in engine.ClassifyWithImage(img, top_k=3):
    print('---------------------------')
    print(labels[result[0]])
    print('Score : ', result[1])
    def __init__(self):
        MODEL_FILE_NAME = "mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite"
        LABEL_FILE_NAME = "coco_labels.txt"

        MODEL_URL = "https://dl.google.com/coral/canned_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite"
        LABEL_URL = "https://dl.google.com/coral/canned_models/coco_labels.txt"


        self.download_file(MODEL_URL, MODEL_FILE_NAME)
        self.download_file(LABEL_URL, LABEL_FILE_NAME)


        self.last_5_scores = collections.deque(np.zeros(5), maxlen=5)
        self.engine = DetectionEngine(MODEL_FILE_NAME)
        self.labels = dataset_utils.ReadLabelFile(LABEL_FILE_NAME)

        self.TRAFFIC_LIGHT_CLASS = 9
        self.LAST_5_SCORE_THRESHOLD = 0.4
        self.MIN_SCORE = 0.2
        '--model',
        help='File path of Tflite model.',
        default=
        '/home/pi/Desktop/AUTO_ML/models_edge_ICN6216886327266610278_2019-08-26_07-02-41-723_tflite_model.tflite',
        required=False)
    parser.add_argument(
        '--label',
        help='File path of label file.',
        default=
        '/home/pi/Desktop/AUTO_ML/models_edge_ICN6216886327266610278_2019-08-26_07-02-41-723_tflite_dict.txt',
        required=False)
    #parser.add_argument(
    #    '--image', help='File path of the image to be tested.', required=False)
    args = parser.parse_args()
    # Prepare labels.
    labels = dataset_utils.ReadLabelFile(args.label)
    # Initialize engine
    engine = ClassificationEngine(args.model)
    print("load model costs %s sec" % (time.time() - start_time))

    dir_path = '/home/pi/Desktop/test-dataset/'
    os.chdir(dir_path)
    subdir_list = next(os.walk('.'))[1]
    print(subdir_list)

    acc = []
    for subdir in subdir_list:
        files = []
        count = 0
        sub_p = os.path.join(dir_path, subdir)
        for r, d, f in os.walk(sub_p):
示例#6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model',
        help=
        'Path of the detection model, it must be a SSD model with postprocessing operator.',
        required=True)
    parser.add_argument('--label', help='Path of the labels file.')
    parser.add_argument('--input',
                        help='File path of the input image.',
                        required=True)
    parser.add_argument('--output', help='File path of the output image.')
    parser.add_argument(
        '--keep_aspect_ratio',
        dest='keep_aspect_ratio',
        action='store_true',
        help=
        ('keep the image aspect ratio when down-sampling the image by adding '
         'black pixel padding (zeros) on bottom or right. '
         'By default the image is resized and reshaped without cropping. This '
         'option should be the same as what is applied on input images during '
         'model training. Otherwise the accuracy may be affected and the '
         'bounding box of detection result may be stretched.'))
    parser.set_defaults(keep_aspect_ratio=False)
    args = parser.parse_args()

    if not args.output:
        output_name = 'object_detection_result.jpg'
    else:
        output_name = args.output

    # Initialize engine.
    engine = DetectionEngine(args.model)
    labels = dataset_utils.ReadLabelFile(args.label) if args.label else None

    # Open image.
    img = Image.open(args.input)
    draw = ImageDraw.Draw(img)

    # Run inference.
    ans = engine.DetectWithImage(img,
                                 threshold=0.05,
                                 keep_aspect_ratio=args.keep_aspect_ratio,
                                 relative_coord=False,
                                 top_k=10)

    # Display result.
    if ans:
        for obj in ans:
            print('-----------------------------------------')
            if labels:
                print(labels[obj.label_id])
            print('score = ', obj.score)
            box = obj.bounding_box.flatten().tolist()
            print('box = ', box)
            # Draw a rectangle.
            draw.rectangle(box, outline='red')
        img.save(output_name)
        if platform.machine() == 'x86_64':
            # For gLinux, simply show the image.
            img.show()
        elif platform.machine() == 'armv7l':
            # For Raspberry Pi, you need to install 'feh' to display image.
            subprocess.Popen(['feh', output_name])
        else:
            print('Please check ', output_name)
    else:
        print('No object detected!')
示例#7
0
from edgetpu.utils import dataset_utils
from PIL import Image
import json
import picamera
import RPi.GPIO as GPIO
import datetime
import requests
import os

comebtn = 27
leavebtn = 22
url = "https://2a8043f9.ngrok.io/tag"
headers = {'content-type': 'application/json'}
filepath = '/home/pi/edgetpu/retrain-imprinting2/'

labels = dataset_utils.ReadLabelFile(
    os.path.join(filepath, 'Attendance_model2.txt'))
engine = ClassificationEngine(
    os.path.join(filepath, 'Attendance_model2.tflite'))

camera = picamera.PiCamera()
print('camera made')

GPIO.setmode(GPIO.BCM)
GPIO.setup(comebtn, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(leavebtn, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)

print('Start camera')
print('Press red btn or blue btn')


def comein(channel):
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--model',
      help='Path of the detection model, it must be a SSD model with postprocessing operator.',
      required=True)
  parser.add_argument('--label', help='Path of the labels file.')
  parser.add_argument('--output', help='File path of the output image.')
  parser.add_argument(
      '--keep_aspect_ratio',
      dest='keep_aspect_ratio',
      action='store_true',
      help=(
          'keep the image aspect ratio when down-sampling the image by adding '
          'black pixel padding (zeros) on bottom or right. '
          'By default the image is resized and reshaped without cropping. This '
          'option should be the same as what is applied on input images during '
          'model training. Otherwise the accuracy may be affected and the '
          'bounding box of detection result may be stretched.'))
  parser.set_defaults(keep_aspect_ratio=False)
  args = parser.parse_args()

  if not args.output:
    output_name = 'object_detection_result.jpg'
  else:
    output_name = args.output

  # Initialize engine.
  engine = DetectionEngine(args.model)
  labels = dataset_utils.ReadLabelFile(args.label) if args.label else None

  # Open image.
 # img = Image.open(args.input)
  #draw = ImageDraw.Draw(img)



  cap = cv2.VideoCapture(0)

  while(True):
    # 從攝影機擷取一張影像
    ret, frame = cap.read()
    
#    img = Image.open(im)
    img = Image.fromarray(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
    draw = ImageDraw.Draw(img)


    # Run inference.
    ans = engine.DetectWithImage(
        img,
        threshold=0.05,
        keep_aspect_ratio=args.keep_aspect_ratio,
        relative_coord=False,
        top_k=10)

    # Display result.
    if ans:
      for obj in ans:
        print('-----------------------------------------')
        if labels:
          print(labels[obj.label_id])
        print('score = ', obj.score)
        box = obj.bounding_box.flatten().tolist()
        print('box = ', box)
        # Draw a rectangle.
        draw.rectangle(box, outline='red')

    img_ = cv2.cvtColor(numpy.asarray(img),cv2.COLOR_RGB2BGR)  

    cv2.imshow('frame', img_)

    # 若按下 q 鍵則離開迴圈
    if cv2.waitKey(1) & 0xFF == ord('q'):
      break

  # 釋放攝影機
  cap.release()

  # 關閉所有 OpenCV 視窗
  cv2.destroyAllWindows()