Пример #1
0
 def set_favorite_item_in_ui():
     print("Selected Favorite Item")
     favorites_item = favorites_list_queryset.filter(
         name=favorites_item_name.get())
     favorites_item_selected = parsepy.item(
         name=favorites_item[0].name,
         upc=favorites_item[0].upc,
         imageURL=favorites_item[0].imageURL,
         category=favorites_item[0].category)
     print(favorites_item_selected, favorites_item_selected.name)
     set_item_in_ui(favorites_item_selected, 'upc')
     if len(favorites_item_selected.name) > 1:
         speaker.say(favorites_item_selected.name)
Пример #2
0
def search(upc):
    new_item = parsepy.item()
    # Call the API and put results into a new Item object
    params = {'token': BARCODESPIDER_TOKEN, 'upc': upc}
    r = requests.get(BARCODESPIDER_URL, params=params)
    if r.status_code == 200:
        item_attributes = r.json()['item_attributes']
        new_item.name = item_attributes['title']  # the product title from the lookup
        new_item.upc = upc
        new_item.imageURL = item_attributes['image']
    else:
        new_item.name = "@@ UPC not found"

    return new_item
Пример #3
0
    def __init__(self, speaker):
        self.speaker = speaker  # used to create the beep() sound

        self.handle = pvrhino.create(context_path='./models/Irma_Rules_2.rhn',
                                     sensitivity=0.25)
        print("sample_rate", self.handle.sample_rate, "frame_len:",
              self.handle.frame_length)
        self.pa = pyaudio.PyAudio()
        self.audio_stream = self.pa.open(
            rate=self.handle.sample_rate,
            channels=1,
            format=pyaudio.paInt16,
            input=True,
            frames_per_buffer=self.handle.frame_length)
        self.recognizer = sr.Recognizer()  # obtain audio from the microphone
        print('NONSPEAKING', self.recognizer.non_speaking_duration)
        print('PAUSE THRESHOLD', self.recognizer.pause_threshold)
        self.recognizer.pause_threshold = 0.5  # default 0.8
        self.recognizer.operation_timeout = 2
        self.recognizer.energy_threshold = 3000

        with sr.Microphone() as source:
            self.recognizer.adjust_for_ambient_noise(source)

        # Thread and flags
        self.ON = True
        self.running = True
        self.wakeword_flag = False
        self.voice_item = parsepy.item()
        self.voice_item.upc = ' '
        self.voice_item.imageURL = ' '
        self.voice_item.name = ' '
        self.command = 'None'
        self.wakeword_thread = threading.Thread(target=self.wakeword_run,
                                                name="wakeword_thread")
        self.wakeword_thread.start()
        print('WakeWord Initialized')
Пример #4
0
 def raise_new_item_window():
     global current_item
     current_item = parsepy.item()
     new_item_window.deiconify()
Пример #5
0
from io import BytesIO

from tkinter import *

import requests
from PIL import ImageTk, Image

from parse_rest.connection import register
from parse_rest.user import User

import parsepy
import voice_command

register(APP_ID, API_KEY, master_key=None)
user = User.login(USER, PASSWORD)
current_item = parsepy.item()
last_name = " "


def start(barcode, wake, speaker, detected_object):
    # Root: Define the tkinter root.
    root = Tk()
    root.title('IRMA Refrigerator Management System')
    root.geometry("900x800")
    # root.attributes("-fullscreen", True)
    # root.columnconfigure(0, weight=1)
    # root.rowconfigure(0, weight=1)

    # Frame: Define a frame that covers the entire root window.
    root_frame = Frame(root, padx=3, pady=3)
    root_frame.grid(column=0, row=0, sticky=(N, W, E, S))
    def detection_loop(self):
        print("detection loop started")
        while self.ON:
            # Read frame from camera connection
            frame = self.camera.read_frame()

            in_height, in_width, in_channels = frame.shape
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame_resized = cv2.resize(frame_rgb, (self.width, self.height))
            input_data = np.expand_dims(frame_resized, axis=0)
            # Normalize pixel values if using a floating model (i.e. if model is non-quantized)
            if self.floating_model:
                input_data = (np.float32(input_data) - 127.5) / 127.5
                print('Floating model')
            # Perform the actual detection by running the model with the image as input
            self.interpreter.set_tensor(self.input_details[0]['index'], input_data)
            self.interpreter.invoke()
            # Retrieve detection results
            boxes = self.interpreter.get_tensor(self.output_details[0]['index'])[0]  # Bounding box coordinates of detected objects
            classes = self.interpreter.get_tensor(self.output_details[1]['index'])[0]  # Class index of detected objects
            scores = self.interpreter.get_tensor(self.output_details[2]['index'])[0]  # Confidence of detected objects

            # Loop over all detections and draw detection box if confidence is above minimum threshold
            ilist = [51, 52, 53, 54, 55, 56, 57, 58, 59, 60]
            object_name = ' '
            top_score = 0
            top_name = ' '
            last_name = ' '
            for i in range(len(scores)):
                if ((int(classes[i]) in ilist) and (scores[i] > self.min_conf_threshold) and (scores[i] <= 1.0)):
                    # Get bounding box coordinates and draw box
                    # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
                    ymin = int(max(0, boxes[i][0]) * in_height)
                    xmin = int(max(0, boxes[i][1]) * in_width)
                    ymax = int(min(1, boxes[i][2]) * in_height)
                    xmax = int(min(1, boxes[i][3]) * in_width)
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (10, 255, 0), 1)

                    # Draw label into **input frame**
                    object_name = self.labels[int(classes[i])]  # Look up object name from "labels" array using class index
                    item_txt = '%s: %d%%' % (object_name, int(scores[i] * 100))  # Example: 'person: 72%'
                    labelSize, baseLine = cv2.getTextSize(item_txt, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)  # Get font size
                    label_ymin = max(ymin, labelSize[1] + 10)  # Make sure not to draw label too close to top of window
                    cv2.rectangle(frame,
                                  (xmin, label_ymin - labelSize[1] - 10),
                                  (xmin + labelSize[0], label_ymin + baseLine - 10),
                                  (255, 255, 255),
                                  cv2.FILLED)  # Draw white box to put label text in
                    cv2.putText(frame, item_txt,
                                (xmin, label_ymin - 7),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                0.5, (0, 0, 0), 2)  # Draw label text

                    # Draw circle in center
                    xcenter = xmin + (int(round((xmax - xmin) / 2)))
                    ycenter = ymin + (int(round((ymax - ymin) / 2)))
                    cv2.circle(frame, (xcenter, ycenter), 5, (0, 0, 255), thickness=-1)
                    if scores[i] > top_score:
                        top_score = scores[i]
                        top_name = self.labels[int(classes[i])]
                    # Print info
                    print('Object ', str(classes[i]), ': ', object_name, 'score:', scores[i])

            # SELECT WHICH ITEM GETS SAVED
            if top_name != last_name:
                last_name = top_name
                object_item = parsepy.item()
                object_item.name = top_name
                object_item.upc = ' '
                object_item.imageURL = ' '
                print("writing new object", object_item.name)
                self.new_item = object_item
                self.obj_flag = True

            # All the results have been drawn on the frame, so it's time to display it.
            if SHOW_DETECTION_VIDEO and in_height > 0:
                out_scale_fct = 1
                frame = cv2.resize(frame, (int(in_width * out_scale_fct), int(in_height * out_scale_fct)))
                frame = cv2.normalize(frame, frame, 0, 255, cv2.NORM_MINMAX)
                cv2.imshow('Objects', frame)
                cv2.moveWindow('Objects', 10, 10)
                cv2.waitKey(200)