Beispiel #1
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("-m",
                        "--model",
                        required=True,
                        help="File path of .tflite file.")
    parser.add_argument("-i",
                        "--input",
                        required=True,
                        help="Image to be classified.")
    parser.add_argument("-l", "--labels", help="File path of labels file.")
    parser.add_argument("-k",
                        "--top_k",
                        type=int,
                        default=1,
                        help="Max number of classification results")
    parser.add_argument(
        "-t",
        "--threshold",
        type=float,
        default=0.0,
        help="Classification score threshold",
    )
    parser.add_argument("-c",
                        "--count",
                        type=int,
                        default=5,
                        help="Number of times to run inference")
    args = parser.parse_args()

    labels = load_labels(args.labels) if args.labels else {}

    interpreter = make_interpreter(args.model)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)
    image = Image.open(args.input).convert("RGB").resize(size, Image.ANTIALIAS)
    classify.set_input(interpreter, image)

    print("----INFERENCE TIME----")
    print(
        "Note: The first inference on Edge TPU is slow because it includes",
        "loading the model into Edge TPU memory.",
    )
    for _ in range(args.count):
        start = time.perf_counter()
        interpreter.invoke()
        inference_time = time.perf_counter() - start
        classes = classify.get_output(interpreter, args.top_k, args.threshold)
        print("%.1fms" % (inference_time * 1000))

    print("-------RESULTS--------")
    for klass in classes:
        print("%s: %.5f" % (labels.get(klass.id, klass.id), klass.score))
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-m',
                        '--model',
                        required=True,
                        help='File path of .tflite file.')
    parser.add_argument('-i',
                        '--input',
                        required=True,
                        help='Image to be classified.')
    parser.add_argument('-l', '--labels', help='File path of labels file.')
    parser.add_argument('-k',
                        '--top_k',
                        type=int,
                        default=1,
                        help='Max number of classification results')
    parser.add_argument('-t',
                        '--threshold',
                        type=float,
                        default=0.0,
                        help='Classification score threshold')
    parser.add_argument('-c',
                        '--count',
                        type=int,
                        default=5,
                        help='Number of times to run inference')
    args = parser.parse_args()

    labels = load_labels(args.labels) if args.labels else {}

    interpreter = make_interpreter(args.model)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)
    image = Image.open(args.input).convert('RGB').resize(size, Image.ANTIALIAS)
    classify.set_input(interpreter, image)

    print('----INFERENCE TIME----')
    print('Note: The first inference on Edge TPU is slow because it includes',
          'loading the model into Edge TPU memory.')
    for _ in range(args.count):
        start = time.perf_counter()
        interpreter.invoke()
        inference_time = time.perf_counter() - start
        classes = classify.get_output(interpreter, args.top_k, args.threshold)
        print('%.1fms' % (inference_time * 1000))

    print('-------RESULTS--------')
    for klass in classes:
        print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))
Beispiel #3
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-m',
                        '--model',
                        required=True,
                        help='File path of .tflite file.')
    parser.add_argument('-i',
                        '--input',
                        required=True,
                        help='Image to be classified.')
    parser.add_argument('-l', '--labels', help='File path of labels file.')
    parser.add_argument('-c',
                        '--count',
                        type=int,
                        default=5,
                        help='Number of times to run inference')
    parser.add_argument('-e',
                        '--edge',
                        action="store_true",
                        help='set for edge tpu')
    parser.add_argument('-o',
                        '--output',
                        type=str,
                        default=None,
                        help="Saves the inferred to the given folder")
    args = parser.parse_args()

    interpreter = make_interpreter(args.model, args.edge)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)
    print(f"Size: {size}")
    image = Image.open(args.input).convert('RGB').resize(size, Image.ANTIALIAS)
    classify.set_input(interpreter, image)

    print('----INFERENCE TIME----')
    print('Note: The first inference on Edge TPU is slow because it includes',
          'loading the model into Edge TPU memory.')
    for _ in range(args.count):
        start = time.perf_counter()
        interpreter.invoke()
        inference_time = time.perf_counter() - start
        seg = classify.get_output(interpreter)

        print('%.1fms' % (inference_time * 1000))

    if args.output is not None:
        vis_segmentation(image, seg, out=args.output)
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-m',
                        '--model',
                        required=True,
                        help='File path of .tflite file.')
    parser.add_argument('-i',
                        '--input',
                        required=True,
                        help='Image to be classified.')
    parser.add_argument('-k',
                        '--top_k',
                        type=int,
                        default=1,
                        help='Max number of classification results')
    parser.add_argument('-t',
                        '--threshold',
                        type=float,
                        default=0.0,
                        help='Classification score threshold')
    parser.add_argument('-c',
                        '--count',
                        type=int,
                        default=5,
                        help='Number of times to run inference')
    args = parser.parse_args()

    interpreter = make_interpreter(args.model)
    interpreter.allocate_tensors()

    test_data = np.asarray(read_csv(args.input)).astype(float)
    print(test_data.shape)
    classify.set_input(interpreter, test_data[0])

    print('----INFERENCE TIME----')
    print('Note: The first inference on Edge TPU is slow because it includes',
          'loading the model into Edge TPU memory.')
    for _ in range(args.count):
        start = time.perf_counter()
        interpreter.invoke()
        inference_time = time.perf_counter() - start
        classes = classify.get_output(interpreter, args.top_k, args.threshold)
        print('%.1fms' % (inference_time * 1000))

    print('-------RESULTS--------')
    for klass in classes:
        print('%s: %.5f' % ("score", klass.score))
Beispiel #5
0
    def eval_folder(dir, class_toggle=True):
        assert os.path.exists(dir)

        wrong_path = "./wrong"
        if not os.path.exists(wrong_path):
            os.makedirs(wrong_path)

        below_thresh_path = "./below_thresh"
        if not os.path.exists(below_thresh_path):
            os.makedirs(below_thresh_path)

        gfr_files = os.listdir(dir)
        gh3_pics = []

        # keep only the jpg images
        for file in gfr_files:
            if file.endswith(".jpg") or file.endswith(".png"):
                gh3_pics.append(file)

        for path in gh3_pics:
            pic_path = os.path.join(dir, path)
            pic = cv2.imread(pic_path, cv2.COLOR_BGR2RGB)
            pic = np.array(pic, dtype=np.uint8)
            classify.set_input(interpreter, pic)
            interpreter.invoke()
            classes = classify.get_output(interpreter, 1, 0)
            if class_toggle:
                i = 0  # click
            else:
                i = 1  # no click
            if classes[0][0] == i:
                if classes[0][1] < args.threshold:
                    print("below threshold of " + str(args.threshold) + ": " +
                          pic_path)
                    filename = os.path.join(below_thresh_path, path)
                    cv2.imwrite(filename, pic)
            else:
                print("wrong classification: " + pic_path)
                filename = os.path.join(wrong_path, path)
                cv2.imwrite(filename, pic)
Beispiel #6
0
def main():
    labels = load_labels(PATH_LABELS) if PATH_LABELS else {}
    interpreter = make_interpreter(PATH_MODEL)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)
    # we need to convert to gray scale if source image is colored
    # image = Image.open(args.input).convert('L').resize(size, Image.ANTIALIAS)
    image = Image.open(PATH_IMAGE).resize(size, Image.ANTIALIAS)

    classify.set_input(interpreter, image)

    print('----INFERENCE TIME----')
    print('Note: The first inference on Edge TPU is slow because it includes',
          'loading the model into Edge TPU memory.')
    for _ in range(REPEAT):
        start = time.perf_counter()
        interpreter.invoke()
        inference_time = time.perf_counter() - start
        classes = classify.get_output(interpreter, TOP_K, THRESHOLD)
        print('%.1fms' % (inference_time * 1000))
        for klass in classes:
            print('%d %s: %.5f' % (klass.id, labels[klass.id], klass.score))
Beispiel #7
0
    def live_play():
        count = 0
        single_note = np.zeros((80, 80, 3), dtype=np.float32)
        sct = mss.mss()  # init screen grab object
        threading.Thread(target=notes_worker, daemon=True).start()
        start_time = last_strum = time.time()

        print("SCRIPT STARTED")

        while time.time() - start_time < args.duration:
            all_notes = np.asarray(
                sct.grab(roi))[:, :, :-1]  # RGBA, so omit alpha
            # cv2.imwrite("test.jpg", all_notes); break
            current_notes = []
            start_i = -1 * note_width
            stop_i = 0
            count += 1

            for i in range(5):
                start_i += note_width
                stop_i += note_width
                single_note[0:80,
                            0:note_width, :] = all_notes[0:80,
                                                         start_i:stop_i, :]
                classify.set_input(interpreter, single_note)
                interpreter.invoke()
                classes = classify.get_output(interpreter, 1, args.threshold)
                # if the highest probable class is "click" and over a threshold confidence:
                if len(classes) > 0 and classes[0][0] == 0:
                    current_notes.append(NOTES[i])

            can_strum_again = time.time() - last_strum > 0.1
            if len(current_notes) and can_strum_again:
                note_q.put(current_notes)

        print("SCRIPT END! FPS: " + str(count / args.duration))
        release_keys()
Beispiel #8
0
def main():
    mic.c_mic()
    wavetxt.wave_to_txt()
    writeimage.write()

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-m',
                        '--model',
                        default="./models/CNN_Model_6_v1_edgetpu.tflite",
                        help='File path of .tflite file.')
    parser.add_argument('-i',
                        '--input',
                        default="./images/test1/0.png",
                        help='Image to be classified.')
    parser.add_argument('-l',
                        '--labels',
                        default="./models/classification.txt",
                        help='File path of labels file.')
    parser.add_argument('-k',
                        '--top_k',
                        type=int,
                        default=1,
                        help='Max number of classification results')
    parser.add_argument('-t',
                        '--threshold',
                        type=float,
                        default=0.0,
                        help='Classification score threshold')
    parser.add_argument('-c',
                        '--count',
                        type=int,
                        default=1,
                        help='Number of times to run inference')
    args = parser.parse_args()

    labels = load_labels(args.labels) if args.labels else {}

    interpreter = make_interpreter(args.model)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)
    image = Image.open(args.input).convert('RGB').resize(size, Image.ANTIALIAS)
    classify.set_input(interpreter, image)

    print('----INFERENCE TIME----')
    print('Note: The first inference on Edge TPU is slow because it includes',
          'loading the model into Edge TPU memory.')
    for _ in range(args.count):
        start = time.perf_counter()
        interpreter.invoke()
        inference_time = time.perf_counter() - start
        classes = classify.get_output(interpreter, args.top_k, args.threshold)
        print('%.1fms' % (inference_time * 1000))

    print('-------RESULTS--------')
    filename = "./Data/result/anomaly_detection_result.txt"
    with open(filename, "w") as fid:
        for klass in classes:
            print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))
            # np.savetxt(fid, labels.get(klass.id, klass.id))
            # d = ('%s: %s\n'% (str(round(time.time())), labels.get(klass.id, klass.id)))
            d = ('%s' % (labels.get(klass.id, klass.id)))
            fid.write(d)
        # fid.writelines(str(round(time.time()))+"\t"+labels.get(klass.id, klass.id)+"\n")
    send_data(d)
Beispiel #9
0
def infer_worker(interpreter, threshold, video):
  single_note = np.zeros((46, 46, 3), dtype=np.float32)
  last_strum = count = 0
  final_count = math.inf
  last_infer_all_neg = True
  last_notes = []
  last_notes_non_empty = []
  note_delay = 0.26

  while count != final_count:
    hammer_on = False
    roi_ = roi_q.get()
    # not an image but a expected frame count
    if type(roi_) is int:
      print("final frame count: " + str(roi_))
      final_count = roi_
      roi_q.task_done()
      continue
    count += 1 # must be after final frame count bit
    if video:
      roi_, timestamp = roi_
    current_notes = []
    start_i = -1 * note_width
    stop_i = 0
    wait_a_frame = False

    for i in range(5):
      start_i += note_width
      stop_i += note_width
      single_note[0:29, 0:note_width, :] = roi_[0:29, start_i:stop_i, :]
      classify.set_input(interpreter, single_note)
      interpreter.invoke()
      classes = classify.get_output(interpreter, 1, threshold)
      # if the lowest probable class is "noclick" and over a threshold confidence:
      if len(classes) > 0 and classes[0][0] != 2:
        current_notes.append(NOTES[i])
        if video:
          roi_ = cv2.putText(img=np.copy(roi_), text='%.2f' % classes[0][1], org=(16+46*(i), 20), fontScale=0.3,
                             fontFace=cv2.FONT_HERSHEY_SIMPLEX, color=(255,255,255), thickness=1) 
        # skip the other notes since we most benefit from using the second detection
        if last_infer_all_neg:
          last_infer_all_neg = False
          wait_a_frame = True
        if classes[0][0] == 1: 
          hammer_on = True
          break

    if video: 
      # really in the way, should add space @ the bottom for this info, if needed
      # roi_ = cv2.putText(img=np.copy(roi_), text='%.2f' % timestamp, org=(5,10), fontScale=0.3, 
      #                    fontFace=cv2.FONT_HERSHEY_SIMPLEX, color=(0,0,255), thickness=1)
      roi_v_q.put(roi_)

    if wait_a_frame: continue

    can_strum_again = time.perf_counter() - last_strum > 0.05 #0.125
    
    tmp_last_notes = list(current_notes)
    current_notes += last_notes
    # remove potential dupes
    current_notes = list(dict.fromkeys(current_notes))
    last_notes = tmp_last_notes

    if len(current_notes):
      last_infer_all_neg = False 
      if can_strum_again:
        last_strum = time.perf_counter()
        if not hammer_on:
          current_notes.append(k.STRUM)
        if video:
          if final_count is math.inf: 
            note_q.put([last_notes_non_empty, current_notes])
            threading.Timer(interval=note_delay, function=notes_worker, args=[]).start()
        else:
          note_q.put([last_notes_non_empty, current_notes])
          threading.Timer(interval=note_delay, function=notes_worker, args=[]).start()
        last_notes_non_empty = list(current_notes)
    else:
      last_infer_all_neg = True

    roi_q.task_done()
  print("infer_worker finished at " + str(time.time()))
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-m',
                        '--model',
                        required=True,
                        help='File path of .tflite file.')
    parser.add_argument('-l', '--labels', help='File path of labels file.')
    parser.add_argument('-k',
                        '--top_k',
                        type=int,
                        default=1,
                        help='Max number of classification results')
    parser.add_argument('-t',
                        '--threshold',
                        type=float,
                        default=0.0,
                        help='Classification score threshold')
    parser.add_argument('-c',
                        '--count',
                        type=int,
                        default=5,
                        help='Number of times to run inference')
    args = parser.parse_args()

    labels = load_labels(args.labels) if args.labels else {}

    interpreter = make_interpreter(args.model)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)

    # initialize the camera and grab a reference to the raw camera capture
    camera = PiCamera(resolution=(640, 480), framerate=30)

    # allow the camera to warmup
    time.sleep(0.1)
    print('Note: The first inference on Edge TPU is slow because it includes',
          'loading the model into Edge TPU memory.')
    while True:
        rawCapture = PiRGBArray(camera)
        # grab an image from the camera
        camera.capture(rawCapture, format="bgr")
        image = rawCapture.array
        cv2.imshow("Image", image)
        # display the image on screen and wait for a keypress
        size = classify.input_size(interpreter)
        image = cv2.resize(image, size)
        classify.set_input(interpreter, image)

        print('----INFERENCE TIME----')
        for _ in range(args.count):
            start = time.perf_counter()
            interpreter.invoke()
            inference_time = time.perf_counter() - start
            classes = classify.get_output(interpreter, args.top_k,
                                          args.threshold)
            print('%.1fms' % (inference_time * 1000))

        print('-------RESULTS--------')
        for klass in classes:
            print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Beispiel #11
0
def main():
  
  # establish connection with Google API
  
  SCOPES = ['https://www.googleapis.com/auth/spreadsheets']

  creds = None
  # The file token.pickle stores the user's access and refresh tokens, and is
  # created automatically when the authorization flow completes for the first
  # time.
  if os.path.exists('token.pickle'):
    with open('token.pickle', 'rb') as token:
        creds = pickle.load(token)
    # If there are no (valid) credentials available, let the user log in.


    
  if not creds or not creds.valid:
    if creds and creds.expired and creds.refresh_token:
        creds.refresh(Request())
    else:
        flow = InstalledAppFlow.from_client_secrets_file(
            'credentials.json', SCOPES)
        creds = flow.run_local_server(port=0)
    # Save the credentials for the next run
    with open('token.pickle', 'wb') as token:
        pickle.dump(creds, token)

            
  service = build('sheets', 'v4', credentials=creds)
  SCOPES = ['https://www.googleapis.com/auth/spreadsheets']

  creds = None
  # The file token.pickle stores the user's access and refresh tokens, and is
  # created automatically when the authorization flow completes for the first
  # time.
  if os.path.exists('token.pickle'):
    with open('token.pickle', 'rb') as token:
        creds = pickle.load(token)
    # If there are no (valid) credentials available, let the user log in.


    
  if not creds or not creds.valid:
    if creds and creds.expired and creds.refresh_token:
        creds.refresh(Request())
    else:
        flow = InstalledAppFlow.from_client_secrets_file(
            'credentials.json', SCOPES)
        creds = flow.run_local_server(port=0)
    # Save the credentials for the next run
    with open('token.pickle', 'wb') as token:
        pickle.dump(creds, token)
          
  service = build('sheets', 'v4', credentials=creds)




  # set up the servo
  
  time_since_lastfed = 1000
  camera = PiCamera()
  while True:
    # obtain what bird was selected by user
    selected_bird = read_selected_bird(service)
    
    #camera.start_preview()
    sleep(.5)
    camera.capture('images/current_photo.jpg')
    #camera.stop_preview()
    
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '-m', '--model', required=True, help='File path of .tflite file.')
    parser.add_argument(
        '-i', '--input', required=False, help='Image to be classified.')
    parser.add_argument(
        '-l', '--labels', help='File path of labels file.')
    parser.add_argument(
        '-k', '--top_k', type=int, default=1,
        help='Max number of classification results')
    parser.add_argument(
        '-t', '--threshold', type=float, default=0.0,
        help='Classification score threshold')
    parser.add_argument(
        '-c', '--count', type=int, default=5,
        help='Number of times to run inference')
    args = parser.parse_args()

    labels = load_labels(args.labels) if args.labels else {}

    interpreter = make_interpreter(args.model)
    interpreter.allocate_tensors()

    size = classify.input_size(interpreter)
    image = Image.open('images/current_photo.jpg').convert('RGB').resize(size, Image.ANTIALIAS)
    classify.set_input(interpreter, image)

    print('----INFERENCE TIME----')
    print('Note: The first inference on Edge TPU is slow because it includes',
          'loading the model into Edge TPU memory.')
    for _ in range(args.count):
      start = time.perf_counter()
      interpreter.invoke()
      inference_time = time.perf_counter() - start
      classes = classify.get_output(interpreter, args.top_k, args.threshold)
      #print('%.1fms' % (inference_time * 1000))

    print('-------RESULTS--------')
    for klass in classes:
      label = labels.get(klass.id, klass.id)
      ## Only if a bird was identified
      print(label)
      
      print(selected_bird)
      if label != '' and label != 'background':
        name = label.split('(')[1].split(')')[0]
        print(name, ' , confidence: ', klass.score)
      #print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))
        
        if name == selected_bird and klass.score>.60:
          if time_since_lastfed > 12:
            servoPIN = 17
            GPIO.setmode(GPIO.BCM)
            GPIO.setup(servoPIN, GPIO.OUT)

            p = GPIO.PWM(servoPIN, 50) # GPIO 17 for PWM with 50Hz
            p.start(4.7) # Initialization
  
            p.ChangeDutyCycle(6)
            time.sleep(0.2)

            p.ChangeDutyCycle(4.7)
            time.sleep(0.5)

            p.stop()
            GPIO.cleanup()
            

            dateTimeObj = datetime.now()
            date = dateTimeObj.strftime("%b %d, %Y")
            hour = dateTimeObj.strftime("%I %p")
            dateandtime = dateTimeObj.strftime("%b %d, %Y, %I:%M %p")
            
            upload_observation(date, hour, dateandtime, name,'no', '', '', service)

            time_since_lastfed = 0
            
        elif klass.score>.60:
             
            dateTimeObj = datetime.now()
            date = dateTimeObj.strftime("%b %d, %Y")
            hour = dateTimeObj.strftime("%I %p")
            dateandtime = dateTimeObj.strftime("%b %d, %Y, %I:%M %p")
            
            upload_observation(date, hour, dateandtime, name,'yes', '', '', service)
             
    time_since_lastfed += 1
    print(time_since_lastfed)
Beispiel #12
0
def main():
    subprocess.run('/usr/bin/snapshot', shell=False)#calls "snapshot.py"           
    image_file = os.listdir(rootdir) 
    
    for root, subdirs, files in os.walk(rootdir):

        labels = getLabel(root, files)

        interpreter = getInterpreter(root, files)
                
        if interpreter is not None:
            size = classify.input_size(interpreter)
            
            #image_path = getImage(root, files)
            image_path = getImage(dir_path, image_file)
            
            image = Image.open(image_path).convert('RGB').resize(size, Image.ANTIALIAS)
    
            classify.set_input(interpreter, image)
    
            print('*The first inference on Edge TPU is slow because it includes',
                  'loading the model into Edge TPU memory*')
            for _ in range(count):
                start = time.perf_counter()
                interpreter.invoke()
                inference_time = time.perf_counter() - start
                classes = classify.get_output(interpreter, top_k, threshold)
                #print('%.1f' % (inference_time * 1000)) 
                dummy.append(f'Time(ms):{(inference_time*1000):.4}')
                print('Time(ms):', '%.1f' % (inference_time * 1000))
            print("\n")   
                
            for klass in classes:
                #print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))
                dummy.append(f'Inference:{(labels.get(klass.id, klass.id))}')
                print('Inference:', '%s' % (labels.get(klass.id, klass.id)))
                dummy.append(f'Score:{(klass.score):.5}')
                print('Score:', '%.5f' % (klass.score))
                print("\n")
    
    
    maX_group = max_group() 
      
    temperature = check_temperature_status()
    maX_group.append(f'TPU_temp(°C):{int(temperature)/1000}')
    #print("maX_group:", maX_group)
    print('#####################################')
    print("\n")
    
   
    

    es=initialize_elasticsearch() 
    initialize_mapping(es)   


    actions = [
        {
            '_index': INDEX_NAME,
            '_type': DOC_TYPE,
            "@timestamp": str(datetime.datetime.utcnow().strftime("%Y-%m-%d"'T'"%H:%M:%S")),
            "Labels": maX_group[0].split(":")[1],
            "Model": maX_group[1].split(":")[1],
            "Image": maX_group[2].split(":")[1],
            "Time(ms)": maX_group[4].split(":")[1],
            "Inference": maX_group[5].split(":")[1],
            "Score": maX_group[6].split(":")[1],
            "TPU_temp(°C)": maX_group[7].split(":")[1]
        
        }]

    try:
        res=helpers.bulk(client=es, index = INDEX_NAME, actions = actions) 
        print ("\nhelpers.bulk() RESPONSE:", res)
        print ("RESPONSE TYPE:", type(res))
        
    except Exception as err: 
        print("\nhelpers.bulk() ERROR:", err)
    
    print("\n")
    print("\n")
    
    os.remove(image_path)
    print("Photo has been deleted")
Beispiel #13
0
def main():

    labels = load_labels(etiquetas_calibre) if etiquetas_calibre else {}
    interpreter = make_interpreter(modelo_calibre)
    interpreter.allocate_tensors()
    while cv2.waitKey(1) & 0xFF != ord('q'):

        print()
        print('____________________________________')
        print()

        # Open the url image, set stream to True, this will return the stream content.
        try:
            r = requests.get(image_url, stream=True)
            r2 = requests.get(image_url2, stream=True)
        except:
            print("Error al descagar imagenes")

        # Check if the image was retrieved successfully
        if (r.status_code == 200) and (r2.status_code == 200):
            # Set decode_content value to True, otherwise the downloaded image file's size will be zero.
            r.raw.decode_content = True
            r2.raw.decode_content = True

            # Open a local file with wb ( write binary ) permission.
            with open(filename, 'wb') as f:
                shutil.copyfileobj(r.raw, f)

            print('Image sucessfully Downloaded1 : ', filename)
            print()

            # Open a local file with wb ( write binary ) permission.
            with open(filename2, 'wb') as f2:
                shutil.copyfileobj(r2.raw, f2)

            print('Image sucessfully Downloaded2 : ', filename2)
            print()

        else:
            print('Images Couldn\'t be retreived')

        im = Image.open(filename)
        im = im.crop((50, 100, 500, 300))
        im.save(filename)

        im2 = Image.open(filename2)
        im2 = im2.crop((150, 100, 500, 300))
        im2.save(filename2)

        # Opening the image (R prefixed to string
        # in order to deal with '\' in paths)
        image = Image.open(filename)

        # Converting the image to greyscale, as edge detection
        # requires input image to be of mode = Greyscale (L)
        image = image.convert("L")

        # Detecting Edges on the Image using the argument ImageFilter.FIND_EDGES
        image = image.filter(ImageFilter.FIND_EDGES)

        # Saving the Image Under the name Edge_Sample.png
        image.save(filename)

        # Opening the image (R prefixed to string
        # in order to deal with '\' in paths)
        image2 = Image.open(filename2)

        # Converting the image to greyscale, as edge detection
        # requires input image to be of mode = Greyscale (L)
        image2 = image2.convert("L")

        # Detecting Edges on the Image using the argument ImageFilter.FIND_EDGES
        image2 = image2.filter(ImageFilter.FIND_EDGES)

        # Saving the Image Under the name Edge_Sample.png
        image2.save(filename2)

        #motor de clasificación
        #XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX

        size = classify.input_size(interpreter)
        image = Image.open(img_input).convert('RGB').resize(
            size, Image.ANTIALIAS)
        classify.set_input(interpreter, image)
        print("XXXXXXXXXXXXXXXXXXXXXXXXXXX")
        print("")
        print('----INFERENCE TIME----')
        print(
            'Note: The first inference on Edge TPU is slow because it includes',
            'loading the model into Edge TPU memory.')
        for _ in range(5):
            start = time.perf_counter()
            interpreter.invoke()
            inference_time = time.perf_counter() - start
            classes = classify.get_output(interpreter, 1, 0)
            print('%.1fms' % (inference_time * 1000))

        print('-------RESULTS--------')
        for klass in classes:
            print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))

        #segunda imagen

        size = classify.input_size(interpreter)
        image2 = Image.open(img_input2).convert('RGB').resize(
            size, Image.ANTIALIAS)
        classify.set_input(interpreter, image2)
        print("")
        print("XXXXXXXXXXXXXXXXXXXXXXXXXXX")
        print("")
        print('----INFERENCE TIME 2----')
        print(
            'Note: The first inference on Edge TPU is slow because it includes',
            'loading the model into Edge TPU memory.')
        for _ in range(5):
            start = time.perf_counter()
            interpreter.invoke()
            inference_time = time.perf_counter() - start
            classes = classify.get_output(interpreter, 1, 0)
            print('%.1fms' % (inference_time * 1000))

        print('-------RESULTS2--------')
        for klass in classes:
            print('%s: %.5f' % (labels.get(klass.id, klass.id), klass.score))

        #XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX

        # Window name in which image is displayed
        image_final = cv2.imread(img_input2)
        window_name = ''

        # Using cv2.imshow() method
        # Displaying the image
        cv2.imshow(window_name, image_final)

        #closing all open windows

    cv2.destroyAllWindows()
Beispiel #14
0
def infer_worker(interpreter, threshold, video):
    single_note = np.zeros((80, 80, 3), dtype=np.float32)
    last_strum = count = 0
    final_count = math.inf

    while count != final_count:
        roi_ = roi_q.get()
        # not an image but a expected frame count
        if type(roi_) is int:
            print("final frame count: " + str(roi_))
            final_count = roi_
            roi_q.task_done()
            continue
        count += 1  # must be after final frame count bit
        if video:
            roi_, timestamp = roi_
        current_notes = []
        start_i = -1 * note_width
        stop_i = 0

        for i in range(5):
            start_i += note_width
            stop_i += note_width
            single_note[0:80, 0:note_width, :] = roi_[0:80, start_i:stop_i, :]
            #cv2.imwrite("test.jpg", single_note); break
            #t_test = time.perf_counter()
            classify.set_input(interpreter, single_note)
            interpreter.invoke()
            classes = classify.get_output(interpreter, 1, threshold)
            # t___ = time.perf_counter() - t_test
            # print(t___ * 1000)
            # if the highest probable class is "click" and over a threshold confidence:
            if len(classes) > 0 and classes[0][0] == 0:
                current_notes.append(NOTES[i])
                if video:
                    roi_ = cv2.putText(img=np.copy(roi_),
                                       text='%.2f' % classes[0][1],
                                       org=(16 + 80 * (i), 51),
                                       fontScale=0.3,
                                       fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                       color=(255, 255, 255),
                                       thickness=1)

        if video:
            roi_ = cv2.putText(img=np.copy(roi_),
                               text='%.2f' % timestamp,
                               org=(5, 10),
                               fontScale=0.3,
                               fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                               color=(0, 0, 255),
                               thickness=1)
            roi_v_q.put(roi_)

        can_strum_again = time.perf_counter() - last_strum > 0.125

        if len(current_notes) and can_strum_again:
            last_strum = time.perf_counter()
            if video:
                if final_count is math.inf:
                    note_q.put(current_notes)
            else:
                note_q.put(current_notes)

        roi_q.task_done()
    print("infer_worker finished at " + str(time.time()))