예제 #1
0
    def __label_analysis__(self, datalist):
        try:
            for responseItem in datalist['responses']:
                for label_item in responseItem['labelAnnotations']:
                    label_details = Label()
                    label_details.description = label_item['description']
                    label_details.score = label_item['score']
                    self.vision_api_matches_list.append(label_details)

            print("API Response")
            for this_thing in self.vision_api_matches_list:
                print(this_thing.description + " = " + str(this_thing.score))

            picture_match = False
            current_match = ItemOfInterest()
            current_match.priority = 100

            for visual_label in self.vision_api_matches_list:
                for item_of_interest in self.items_of_interest_list:
                    if item_of_interest.matches_label(visual_label.description,
                                                      visual_label.score,
                                                      current_match.priority):
                        current_match = item_of_interest
                        picture_match = True

            if picture_match:
                current_match.trigger_response()
            else:
                print("No match")
                # snap_shot.deletePicture()
        except:
            sound_display = SoundControl()
            sound_display.error_blip()
            print("Issue processing data returned from Google Vision API")
    def __get_picture_from_camera__(self):
        # initialize the camera
        try:
            sound_display = SoundControl()
            if (self.cam == 0):
                self.__init_camera__()

            s, img = self.cam.read()

            sound_display.justSound("sounds/camera.wav")
            self.__set_picture_name__()
            if s:  # frame captured without any errors
                imwrite(self.picture_name, img)  # save image

                if self.display_captured_pic:
                    sound_display.sound_and_image_display(
                        "", img, "Captured Picture")

            else:
                sound_display.error_blip()
                self.cam.release()
                return False

            #self.cam.release()
            #cv2.destroyAllWindows()

            return True

        except:
            print("No camera opened")
            return False
 def picture_countdown(self):
     sound_display = SoundControl()
     if (self.camera_countdown_on):
         short_pip = "sounds/short_tone.wav"
         long_pip = "sounds/short_tone.wav"
         sound_display.justSound(short_pip)
         time.sleep(0.5)
         sound_display.justSound(short_pip)
         time.sleep(0.5)
         sound_display.justSound(short_pip)
         time.sleep(0.5)
         sound_display.justSound(long_pip)
     return_val = self.__get_picture_from_camera__()
     return return_val
예제 #4
0
    def send_picture_to_api(self):
        try:
            data = open(self.output_filename)

            url_request = "https://vision.googleapis.com/v1/images:annotate?key=" + self.project_api_key
            print(url_request)
            response = requests.post(url=url_request,
                                     data=data,
                                     headers={'Content-Type': 'application/json'})
            self.datalist = response.json()
            return True
        except:
            sound_display = SoundControl()
            sound_display.error_blip()
            print("Error with response from Google Vision API")
            return False
예제 #5
0
    def analyse_picture(self, picture_file, request):
        self.vision_api_matches_list.clear()
        self.__set_context__(request)

        if (self.google_request.prepare_picture(picture_file, self.features)):
            if (self.google_request.send_picture_to_api()):
                try:
                    datalist = self.google_request.get_json_datalist()

                    if self.request_mode == Context.FACIAL:
                        self.__facial_analaysis__(datalist)
                    else:
                        self.__label_analysis__(datalist)

                except:
                    sound_display = SoundControl()
                    sound_display.error_blip()
                    print("Issue with Google Vision API data")
예제 #6
0
 def trigger_response(self):
     self.set_triggered()
     sound_display = SoundControl()
     sound_display.sound_and_picture_display(self.audio_response,
                                             self.visual_response,
                                             "API Matched Item")
예제 #7
0
#!/usr/bin/env python3.7

from flask import Flask, render_template, redirect, url_for
import datetime
import json
import threading
from subprocess import call
from SoundControl import SoundControl
from settings import volumeIncrement, apiEndpoint, apiEndpointForWeb

app = Flask(__name__)

lullablock = SoundControl()
print("lullablock status: ", lullablock)


def timerInMinutes():
    return round(lullablock.getTimer() / 60, 1)


@app.route('/')
def index():
    now = datetime.datetime.now()
    timeString = now.strftime("%Y-%m-%d %H:%M")

    templateData = {
        'volume': lullablock.getVolume(),
        'timer_in_minutes': timerInMinutes(),
        'time': timeString,
        'tracks': lullablock.getTracks(),
        'track': lullablock.getTrackTitle(),