예제 #1
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("-m",
                        "--model",
                        required=True,
                        help="File path of .tflite file.")
    parser.add_argument("-i",
                        "--input",
                        required=True,
                        help="Image to be classified.")
    parser.add_argument("-l", "--labels", help="File path of labels file.")
    parser.add_argument("-k",
                        "--top_k",
                        type=int,
                        default=1,
                        help="Max number of classification results")
    parser.add_argument(
        "-t",
        "--threshold",
        type=float,
        default=0.0,
        help="Classification score threshold",
    )
    parser.add_argument("-c",
                        "--count",
                        type=int,
                        default=5,
                        help="Number of times to run inference")
    args = parser.parse_args()

    labels = load_labels(args.labels) if args.labels else {}

    interpreter = make_interpreter(args.model)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)
    image = Image.open(args.input).convert("RGB").resize(size, Image.ANTIALIAS)
    classify.set_input(interpreter, image)

    print("----INFERENCE TIME----")
    print(
        "Note: The first inference on Edge TPU is slow because it includes",
        "loading the model into Edge TPU memory.",
    )
    for _ in range(args.count):
        start = time.perf_counter()
        interpreter.invoke()
        inference_time = time.perf_counter() - start
        classes = classify.get_output(interpreter, args.top_k, args.threshold)
        print("%.1fms" % (inference_time * 1000))

    print("-------RESULTS--------")
    for klass in classes:
        print("%s: %.5f" % (labels.get(klass.id, klass.id), klass.score))
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-m',
                        '--model',
                        required=True,
                        help='File path of .tflite file.')
    parser.add_argument('-i',
                        '--input',
                        required=True,
                        help='Image to be classified.')
    parser.add_argument('-l', '--labels', help='File path of labels file.')
    parser.add_argument('-k',
                        '--top_k',
                        type=int,
                        default=1,
                        help='Max number of classification results')
    parser.add_argument('-t',
                        '--threshold',
                        type=float,
                        default=0.0,
                        help='Classification score threshold')
    parser.add_argument('-c',
                        '--count',
                        type=int,
                        default=5,
                        help='Number of times to run inference')
    args = parser.parse_args()

    labels = load_labels(args.labels) if args.labels else {}

    interpreter = make_interpreter(args.model)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)
    image = Image.open(args.input).convert('RGB').resize(size, Image.ANTIALIAS)
    classify.set_input(interpreter, image)

    print('----INFERENCE TIME----')
    print('Note: The first inference on Edge TPU is slow because it includes',
          'loading the model into Edge TPU memory.')
    for _ in range(args.count):
        start = time.perf_counter()
        interpreter.invoke()
        inference_time = time.perf_counter() - start
        classes = classify.get_output(interpreter, args.top_k, args.threshold)
        print('%.1fms' % (inference_time * 1000))

    print('-------RESULTS--------')
    for klass in classes:
        print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))
예제 #3
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-m',
                        '--model',
                        required=True,
                        help='File path of .tflite file.')
    parser.add_argument('-i',
                        '--input',
                        required=True,
                        help='Image to be classified.')
    parser.add_argument('-l', '--labels', help='File path of labels file.')
    parser.add_argument('-c',
                        '--count',
                        type=int,
                        default=5,
                        help='Number of times to run inference')
    parser.add_argument('-e',
                        '--edge',
                        action="store_true",
                        help='set for edge tpu')
    parser.add_argument('-o',
                        '--output',
                        type=str,
                        default=None,
                        help="Saves the inferred to the given folder")
    args = parser.parse_args()

    interpreter = make_interpreter(args.model, args.edge)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)
    print(f"Size: {size}")
    image = Image.open(args.input).convert('RGB').resize(size, Image.ANTIALIAS)
    classify.set_input(interpreter, image)

    print('----INFERENCE TIME----')
    print('Note: The first inference on Edge TPU is slow because it includes',
          'loading the model into Edge TPU memory.')
    for _ in range(args.count):
        start = time.perf_counter()
        interpreter.invoke()
        inference_time = time.perf_counter() - start
        seg = classify.get_output(interpreter)

        print('%.1fms' % (inference_time * 1000))

    if args.output is not None:
        vis_segmentation(image, seg, out=args.output)
예제 #4
0
def main():
    labels = load_labels(PATH_LABELS) if PATH_LABELS else {}
    interpreter = make_interpreter(PATH_MODEL)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)
    # we need to convert to gray scale if source image is colored
    # image = Image.open(args.input).convert('L').resize(size, Image.ANTIALIAS)
    image = Image.open(PATH_IMAGE).resize(size, Image.ANTIALIAS)

    classify.set_input(interpreter, image)

    print('----INFERENCE TIME----')
    print('Note: The first inference on Edge TPU is slow because it includes',
          'loading the model into Edge TPU memory.')
    for _ in range(REPEAT):
        start = time.perf_counter()
        interpreter.invoke()
        inference_time = time.perf_counter() - start
        classes = classify.get_output(interpreter, TOP_K, THRESHOLD)
        print('%.1fms' % (inference_time * 1000))
        for klass in classes:
            print('%d %s: %.5f' % (klass.id, labels[klass.id], klass.score))
예제 #5
0
def main():
    mic.c_mic()
    wavetxt.wave_to_txt()
    writeimage.write()

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-m',
                        '--model',
                        default="./models/CNN_Model_6_v1_edgetpu.tflite",
                        help='File path of .tflite file.')
    parser.add_argument('-i',
                        '--input',
                        default="./images/test1/0.png",
                        help='Image to be classified.')
    parser.add_argument('-l',
                        '--labels',
                        default="./models/classification.txt",
                        help='File path of labels file.')
    parser.add_argument('-k',
                        '--top_k',
                        type=int,
                        default=1,
                        help='Max number of classification results')
    parser.add_argument('-t',
                        '--threshold',
                        type=float,
                        default=0.0,
                        help='Classification score threshold')
    parser.add_argument('-c',
                        '--count',
                        type=int,
                        default=1,
                        help='Number of times to run inference')
    args = parser.parse_args()

    labels = load_labels(args.labels) if args.labels else {}

    interpreter = make_interpreter(args.model)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)
    image = Image.open(args.input).convert('RGB').resize(size, Image.ANTIALIAS)
    classify.set_input(interpreter, image)

    print('----INFERENCE TIME----')
    print('Note: The first inference on Edge TPU is slow because it includes',
          'loading the model into Edge TPU memory.')
    for _ in range(args.count):
        start = time.perf_counter()
        interpreter.invoke()
        inference_time = time.perf_counter() - start
        classes = classify.get_output(interpreter, args.top_k, args.threshold)
        print('%.1fms' % (inference_time * 1000))

    print('-------RESULTS--------')
    filename = "./Data/result/anomaly_detection_result.txt"
    with open(filename, "w") as fid:
        for klass in classes:
            print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))
            # np.savetxt(fid, labels.get(klass.id, klass.id))
            # d = ('%s: %s\n'% (str(round(time.time())), labels.get(klass.id, klass.id)))
            d = ('%s' % (labels.get(klass.id, klass.id)))
            fid.write(d)
        # fid.writelines(str(round(time.time()))+"\t"+labels.get(klass.id, klass.id)+"\n")
    send_data(d)
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-m',
                        '--model',
                        required=True,
                        help='File path of .tflite file.')
    parser.add_argument('-l', '--labels', help='File path of labels file.')
    parser.add_argument('-k',
                        '--top_k',
                        type=int,
                        default=1,
                        help='Max number of classification results')
    parser.add_argument('-t',
                        '--threshold',
                        type=float,
                        default=0.0,
                        help='Classification score threshold')
    parser.add_argument('-c',
                        '--count',
                        type=int,
                        default=5,
                        help='Number of times to run inference')
    args = parser.parse_args()

    labels = load_labels(args.labels) if args.labels else {}

    interpreter = make_interpreter(args.model)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)

    # initialize the camera and grab a reference to the raw camera capture
    camera = PiCamera(resolution=(640, 480), framerate=30)

    # allow the camera to warmup
    time.sleep(0.1)
    print('Note: The first inference on Edge TPU is slow because it includes',
          'loading the model into Edge TPU memory.')
    while True:
        rawCapture = PiRGBArray(camera)
        # grab an image from the camera
        camera.capture(rawCapture, format="bgr")
        image = rawCapture.array
        cv2.imshow("Image", image)
        # display the image on screen and wait for a keypress
        size = classify.input_size(interpreter)
        image = cv2.resize(image, size)
        classify.set_input(interpreter, image)

        print('----INFERENCE TIME----')
        for _ in range(args.count):
            start = time.perf_counter()
            interpreter.invoke()
            inference_time = time.perf_counter() - start
            classes = classify.get_output(interpreter, args.top_k,
                                          args.threshold)
            print('%.1fms' % (inference_time * 1000))

        print('-------RESULTS--------')
        for klass in classes:
            print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
예제 #7
0
def main():
  
  # establish connection with Google API
  
  SCOPES = ['https://www.googleapis.com/auth/spreadsheets']

  creds = None
  # The file token.pickle stores the user's access and refresh tokens, and is
  # created automatically when the authorization flow completes for the first
  # time.
  if os.path.exists('token.pickle'):
    with open('token.pickle', 'rb') as token:
        creds = pickle.load(token)
    # If there are no (valid) credentials available, let the user log in.


    
  if not creds or not creds.valid:
    if creds and creds.expired and creds.refresh_token:
        creds.refresh(Request())
    else:
        flow = InstalledAppFlow.from_client_secrets_file(
            'credentials.json', SCOPES)
        creds = flow.run_local_server(port=0)
    # Save the credentials for the next run
    with open('token.pickle', 'wb') as token:
        pickle.dump(creds, token)

            
  service = build('sheets', 'v4', credentials=creds)
  SCOPES = ['https://www.googleapis.com/auth/spreadsheets']

  creds = None
  # The file token.pickle stores the user's access and refresh tokens, and is
  # created automatically when the authorization flow completes for the first
  # time.
  if os.path.exists('token.pickle'):
    with open('token.pickle', 'rb') as token:
        creds = pickle.load(token)
    # If there are no (valid) credentials available, let the user log in.


    
  if not creds or not creds.valid:
    if creds and creds.expired and creds.refresh_token:
        creds.refresh(Request())
    else:
        flow = InstalledAppFlow.from_client_secrets_file(
            'credentials.json', SCOPES)
        creds = flow.run_local_server(port=0)
    # Save the credentials for the next run
    with open('token.pickle', 'wb') as token:
        pickle.dump(creds, token)
          
  service = build('sheets', 'v4', credentials=creds)




  # set up the servo
  
  time_since_lastfed = 1000
  camera = PiCamera()
  while True:
    # obtain what bird was selected by user
    selected_bird = read_selected_bird(service)
    
    #camera.start_preview()
    sleep(.5)
    camera.capture('images/current_photo.jpg')
    #camera.stop_preview()
    
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '-m', '--model', required=True, help='File path of .tflite file.')
    parser.add_argument(
        '-i', '--input', required=False, help='Image to be classified.')
    parser.add_argument(
        '-l', '--labels', help='File path of labels file.')
    parser.add_argument(
        '-k', '--top_k', type=int, default=1,
        help='Max number of classification results')
    parser.add_argument(
        '-t', '--threshold', type=float, default=0.0,
        help='Classification score threshold')
    parser.add_argument(
        '-c', '--count', type=int, default=5,
        help='Number of times to run inference')
    args = parser.parse_args()

    labels = load_labels(args.labels) if args.labels else {}

    interpreter = make_interpreter(args.model)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)
    image = Image.open('images/current_photo.jpg').convert('RGB').resize(size, Image.ANTIALIAS)
    classify.set_input(interpreter, image)

    print('----INFERENCE TIME----')
    print('Note: The first inference on Edge TPU is slow because it includes',
          'loading the model into Edge TPU memory.')
    for _ in range(args.count):
      start = time.perf_counter()
      interpreter.invoke()
      inference_time = time.perf_counter() - start
      classes = classify.get_output(interpreter, args.top_k, args.threshold)
      #print('%.1fms' % (inference_time * 1000))

    print('-------RESULTS--------')
    for klass in classes:
      label = labels.get(klass.id, klass.id)
      ## Only if a bird was identified
      print(label)
      
      print(selected_bird)
      if label != '' and label != 'background':
        name = label.split('(')[1].split(')')[0]
        print(name, ' , confidence: ', klass.score)
      #print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))
        
        if name == selected_bird and klass.score>.60:
          if time_since_lastfed > 12:
            servoPIN = 17
            GPIO.setmode(GPIO.BCM)
            GPIO.setup(servoPIN, GPIO.OUT)

            p = GPIO.PWM(servoPIN, 50) # GPIO 17 for PWM with 50Hz
            p.start(4.7) # Initialization
  
            p.ChangeDutyCycle(6)
            time.sleep(0.2)

            p.ChangeDutyCycle(4.7)
            time.sleep(0.5)

            p.stop()
            GPIO.cleanup()
            

            dateTimeObj = datetime.now()
            date = dateTimeObj.strftime("%b %d, %Y")
            hour = dateTimeObj.strftime("%I %p")
            dateandtime = dateTimeObj.strftime("%b %d, %Y, %I:%M %p")
            
            upload_observation(date, hour, dateandtime, name,'no', '', '', service)

            time_since_lastfed = 0
            
        elif klass.score>.60:
             
            dateTimeObj = datetime.now()
            date = dateTimeObj.strftime("%b %d, %Y")
            hour = dateTimeObj.strftime("%I %p")
            dateandtime = dateTimeObj.strftime("%b %d, %Y, %I:%M %p")
            
            upload_observation(date, hour, dateandtime, name,'yes', '', '', service)
             
    time_since_lastfed += 1
    print(time_since_lastfed)
예제 #8
0
def main():
    subprocess.run('/usr/bin/snapshot', shell=False)#calls "snapshot.py"           
    image_file = os.listdir(rootdir) 
    
    for root, subdirs, files in os.walk(rootdir):

        labels = getLabel(root, files)

        interpreter = getInterpreter(root, files)
                
        if interpreter is not None:
            size = classify.input_size(interpreter)
            
            #image_path = getImage(root, files)
            image_path = getImage(dir_path, image_file)
            
            image = Image.open(image_path).convert('RGB').resize(size, Image.ANTIALIAS)
    
            classify.set_input(interpreter, image)
    
            print('*The first inference on Edge TPU is slow because it includes',
                  'loading the model into Edge TPU memory*')
            for _ in range(count):
                start = time.perf_counter()
                interpreter.invoke()
                inference_time = time.perf_counter() - start
                classes = classify.get_output(interpreter, top_k, threshold)
                #print('%.1f' % (inference_time * 1000)) 
                dummy.append(f'Time(ms):{(inference_time*1000):.4}')
                print('Time(ms):', '%.1f' % (inference_time * 1000))
            print("\n")   
                
            for klass in classes:
                #print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))
                dummy.append(f'Inference:{(labels.get(klass.id, klass.id))}')
                print('Inference:', '%s' % (labels.get(klass.id, klass.id)))
                dummy.append(f'Score:{(klass.score):.5}')
                print('Score:', '%.5f' % (klass.score))
                print("\n")
    
    
    maX_group = max_group() 
      
    temperature = check_temperature_status()
    maX_group.append(f'TPU_temp(°C):{int(temperature)/1000}')
    #print("maX_group:", maX_group)
    print('#####################################')
    print("\n")
    
   
    

    es=initialize_elasticsearch() 
    initialize_mapping(es)   


    actions = [
        {
            '_index': INDEX_NAME,
            '_type': DOC_TYPE,
            "@timestamp": str(datetime.datetime.utcnow().strftime("%Y-%m-%d"'T'"%H:%M:%S")),
            "Labels": maX_group[0].split(":")[1],
            "Model": maX_group[1].split(":")[1],
            "Image": maX_group[2].split(":")[1],
            "Time(ms)": maX_group[4].split(":")[1],
            "Inference": maX_group[5].split(":")[1],
            "Score": maX_group[6].split(":")[1],
            "TPU_temp(°C)": maX_group[7].split(":")[1]
        
        }]

    try:
        res=helpers.bulk(client=es, index = INDEX_NAME, actions = actions) 
        print ("\nhelpers.bulk() RESPONSE:", res)
        print ("RESPONSE TYPE:", type(res))
        
    except Exception as err: 
        print("\nhelpers.bulk() ERROR:", err)
    
    print("\n")
    print("\n")
    
    os.remove(image_path)
    print("Photo has been deleted")
예제 #9
0
def main():

    labels = load_labels(etiquetas_calibre) if etiquetas_calibre else {}
    interpreter = make_interpreter(modelo_calibre)
    interpreter.allocate_tensors()
    while cv2.waitKey(1) & 0xFF != ord('q'):

        print()
        print('____________________________________')
        print()

        # Open the url image, set stream to True, this will return the stream content.
        try:
            r = requests.get(image_url, stream=True)
            r2 = requests.get(image_url2, stream=True)
        except:
            print("Error al descagar imagenes")

        # Check if the image was retrieved successfully
        if (r.status_code == 200) and (r2.status_code == 200):
            # Set decode_content value to True, otherwise the downloaded image file's size will be zero.
            r.raw.decode_content = True
            r2.raw.decode_content = True

            # Open a local file with wb ( write binary ) permission.
            with open(filename, 'wb') as f:
                shutil.copyfileobj(r.raw, f)

            print('Image sucessfully Downloaded1 : ', filename)
            print()

            # Open a local file with wb ( write binary ) permission.
            with open(filename2, 'wb') as f2:
                shutil.copyfileobj(r2.raw, f2)

            print('Image sucessfully Downloaded2 : ', filename2)
            print()

        else:
            print('Images Couldn\'t be retreived')

        im = Image.open(filename)
        im = im.crop((50, 100, 500, 300))
        im.save(filename)

        im2 = Image.open(filename2)
        im2 = im2.crop((150, 100, 500, 300))
        im2.save(filename2)

        # Opening the image (R prefixed to string
        # in order to deal with '\' in paths)
        image = Image.open(filename)

        # Converting the image to greyscale, as edge detection
        # requires input image to be of mode = Greyscale (L)
        image = image.convert("L")

        # Detecting Edges on the Image using the argument ImageFilter.FIND_EDGES
        image = image.filter(ImageFilter.FIND_EDGES)

        # Saving the Image Under the name Edge_Sample.png
        image.save(filename)

        # Opening the image (R prefixed to string
        # in order to deal with '\' in paths)
        image2 = Image.open(filename2)

        # Converting the image to greyscale, as edge detection
        # requires input image to be of mode = Greyscale (L)
        image2 = image2.convert("L")

        # Detecting Edges on the Image using the argument ImageFilter.FIND_EDGES
        image2 = image2.filter(ImageFilter.FIND_EDGES)

        # Saving the Image Under the name Edge_Sample.png
        image2.save(filename2)

        #motor de clasificación
        #XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX

        size = classify.input_size(interpreter)
        image = Image.open(img_input).convert('RGB').resize(
            size, Image.ANTIALIAS)
        classify.set_input(interpreter, image)
        print("XXXXXXXXXXXXXXXXXXXXXXXXXXX")
        print("")
        print('----INFERENCE TIME----')
        print(
            'Note: The first inference on Edge TPU is slow because it includes',
            'loading the model into Edge TPU memory.')
        for _ in range(5):
            start = time.perf_counter()
            interpreter.invoke()
            inference_time = time.perf_counter() - start
            classes = classify.get_output(interpreter, 1, 0)
            print('%.1fms' % (inference_time * 1000))

        print('-------RESULTS--------')
        for klass in classes:
            print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))

        #segunda imagen

        size = classify.input_size(interpreter)
        image2 = Image.open(img_input2).convert('RGB').resize(
            size, Image.ANTIALIAS)
        classify.set_input(interpreter, image2)
        print("")
        print("XXXXXXXXXXXXXXXXXXXXXXXXXXX")
        print("")
        print('----INFERENCE TIME 2----')
        print(
            'Note: The first inference on Edge TPU is slow because it includes',
            'loading the model into Edge TPU memory.')
        for _ in range(5):
            start = time.perf_counter()
            interpreter.invoke()
            inference_time = time.perf_counter() - start
            classes = classify.get_output(interpreter, 1, 0)
            print('%.1fms' % (inference_time * 1000))

        print('-------RESULTS2--------')
        for klass in classes:
            print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))

        #XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX

        # Window name in which image is displayed
        image_final = cv2.imread(img_input2)
        window_name = ''

        # Using cv2.imshow() method
        # Displaying the image
        cv2.imshow(window_name, image_final)

        #closing all open windows

    cv2.destroyAllWindows()