Ejemplo n.º 1
0
1. (Re)Start Docker
2. Start Object Detection pepper_tensorflow/pepper_tensorflow/object_detection.py
3. Start This script!

"""


class BroaderMindApp(
        AbstractApplication,
        StatisticsComponent,  # Show Performance Statistics in Terminal
        DisplayComponent,  # Display what Robot (or Computer) sees in browser
        SceneComponent,  # Scene (dependency of DisplayComponent)
        ContextComponent,  # Context (dependency of DisplayComponent)
        ObjectDetectionComponent,  # Object Detection (dependency of DisplayComponent)
        FaceRecognitionComponent,  # Face Recognition (dependency of DisplayComponent)
        SpeechRecognitionComponent,  # Speech Recognition Component (dependency)
        TextToSpeechComponent):  # Text to Speech (dependency)
    def __init__(self, backend):
        super(BroaderMindApp, self).__init__(backend)

        self.context.start_chat("Human")

    def on_chat_turn(self, utterance):

        # TODO: Change animation to preferred animation
        self.say('hello! ... ...', animation=animations.AFFIRMATIVE)


if __name__ == '__main__':
    BroaderMindApp(config.get_backend()).run()
Ejemplo n.º 2
0
from pepper.framework import *
from pepper.knowledge.wikipedia import Wikipedia
from pepper import config


class WikipediaApp(AbstractApplication,
                   StatisticsComponent,
                   StreamingSpeechRecognitionComponent,
                   TextToSpeechComponent):

    def on_transcript(self, hypotheses, audio):
        question = hypotheses[0].transcript
        print(question)
        self.respond_wikipedia(question)

    def respond_wikipedia(self, question):
        answer = Wikipedia().query(question)
        if answer:
            answer = answer.split('. ')[0]
            self.say(answer)
        return answer


if __name__ == "__main__":
    WikipediaApp(config.get_backend()).run()
Ejemplo n.º 3
0
from pepper.framework import *
from pepper import config


class TrackApp(AbstractApplication, TrackComponent, DisplayComponent,
               ObjectDetectionComponent, FaceDetectionComponent):
    def __init__(self, backend):
        super(TrackApp, self).__init__(backend)


if __name__ == '__main__':
    TrackApp(config.get_backend()).run()
Ejemplo n.º 4
0
        # Query Wikipedia for Answer to Question
        result = Wikipedia.query(question)

        if result:

            answer, url = result

            # Limit Answer to a single sentence
            answer = answer.split('.')[0]

            # Tell Answer to Human
            self.say(answer)

        else:

            # Tell Human you don't know
            self.say("I don't know!")


if __name__ == "__main__":

    # Get Backend from Global Configuration File
    backend = config.get_backend()

    # Create Application with given Backend
    application = WikipediaApplication(backend)

    # Run Application
    application.run()
Ejemplo n.º 5
0
from pepper.framework import *
from pepper import config


class ObjPosApp(AbstractApplication,
                DisplayComponent, SceneComponent,
                ExploreComponent, ContextComponent,
                ObjectDetectionComponent, SpeechRecognitionComponent, FaceRecognitionComponent, TextToSpeechComponent):

    def __init__(self, backend):
        super(ObjPosApp, self).__init__(backend)


if __name__ == '__main__':
    ObjPosApp(config.get_backend()).run()
Ejemplo n.º 6
0
            # Check whether speaking is appropriate
            if self.is_object_recognition_appropriate(obj.name):

                # Then tell human what you saw
                self.say("I see a {}".format(obj.name))

    def is_object_recognition_appropriate(self, name):
        """Returns True if telling about objects is appropriate and updates Object Time"""

        # Appropriateness arises when
        #  1. object hasn't been seen before, or
        #  2. enough time has passed since last sighting
        if name not in self.object_time or (time() - self.object_time[name] >
                                            self.OBJECT_TIMEOUT):

            # Store last seen time (right now) in object_time dictionary
            self.object_time[name] = time()

            # Return "Appropriate"
            return True

        # Return "Not Appropriate"
        return False


if __name__ == "__main__":

    # Run ObjectApplication using Backend Configured in Global Configuration File
    ObjectApplication(config.get_backend()).run()
Ejemplo n.º 7
0
from pepper.framework import *
from pepper import config


class DisplayApp(AbstractApplication, StatisticsComponent, DisplayComponent,
                 ObjectDetectionComponent, FaceDetectionComponent,
                 SpeechRecognitionComponent):

    pass


if __name__ == '__main__':
    DisplayApp(config.get_backend()).run()
Ejemplo n.º 8
0
from pepper.framework import *
from pepper import config


class VerboseApp(AbstractApplication, StatisticsComponent,
                 SpeechRecognitionComponent, ObjectDetectionComponent,
                 FaceDetectionComponent):
    def on_image(self, image):
        self.log.info("on_image: {}".format(image.shape))

    def on_object(self, image, objects):
        self.log.info("on_object: {} {}".format(image.shape, objects))

    def on_face_known(self, faces):
        self.log.info("on_face: {}".format(faces))

    def on_face(self, faces):
        self.log.info("on_person: {}".format(faces))

    def on_face_new(self, faces):
        self.log.info("on_new_person: {}".format(faces))

    def on_transcript(self, hypotheses, audio):
        self.log.info("on_transcript: {}".format(hypotheses, audio.shape))


if __name__ == '__main__':
    VerboseApp(config.get_backend()).run()
Ejemplo n.º 9
0
                    "bye bye", "bye", "goodbye", "see you"
            ]:
                # Respond Goodbye, <speaker>!
                self.say("Goodbye, {}!".format(self._speaker))

                # Sleep 5 seconds (else human would be instantly greeted again)
                sleep(5)

                # Switch Back to Idle Intention
                IdleIntention(self.application)

                return

        else:

            # If Human doesn't end the conversation,
            # act as if you understand what he/she is saying :)
            self.say("How interesting!")


if __name__ == '__main__':

    # Initialize Application
    application = MyApplication(config.get_backend())

    # Initialize (Idle) Intention
    IdleIntention(application)

    # Run Application
    application.run()
Ejemplo n.º 10
0
    def _is_name_statement(self, hypotheses):
        for hypothesis in hypotheses:
            for cue in self.CUES:
                if cue in hypothesis.transcript.lower():
                    return True
        return False

    def _save(self):
        name, features = self._current_name, np.concatenate(
            self.face_vectors).reshape(-1, OpenFace.FEATURE_DIM)

        if name != "NEW":  # Prevent Overwrite of NEW.bin
            self.face_classifier.add(name, features)
            features.tofile(
                os.path.join(config.PEOPLE_NEW_ROOT, "{}.bin".format(name)))


if __name__ == '__main__':

    while True:

        # Boot Application
        application = ResponderApp(config.get_backend())

        # Boot Default Intention
        intention = DefaultIntention(application)

        # Run Application
        application.run()
Ejemplo n.º 11
0
                return True
        return False

    @staticmethod
    def is_negation(statement):
        for word in statement.split():
            if word in NEGATION:
                return True
        return False

    @staticmethod
    def goodbye(statement):
        for bye in GOODBYE:
            if statement == re.sub('[?!.;,]', '', bye.lower()):
                return True
        return False

    def end_conversation(self):
        self.say("{}!".format(
            choice(["See you later", "ByeBye", "Till another time", "It was good having talked to you", "Goodbye"])))
        IdleIntention(self.application)

    def save_person(self):
        np.concatenate(self._face).tofile(os.path.join(config.NEW_FACE_DIRECTORY, "{}.bin".format(self._name)))


if __name__ == '__main__':
    app = DefaultApp(config.get_backend())
    IdleIntention(app)
    app.run()
Ejemplo n.º 12
0
        # Just reply with a random statement for now
        self.say(choice(["Interesting", "Right", "I see", "Ok"]))
        processed_utterance = language.analyze(self.chat, self.brain)

        if processed_utterance.type == language.UtteranceType.QUESTION:
            brain_response = self.brain.query_brain(processed_utterance)
            print(language.utils.reply_to_question(brain_response, []))

        elif processed_utterance.type == language.UtteranceType.STATEMENT:
            brain_response = self.brain.update(processed_utterance)
            #print(self.brain.brain_help.phrase_update(response))

        else:
            brain_response = 'unknown type'

        print(brain_response)
        return 0

    def say(self, text, animation=None, block=False):
        # Call Text To Speech for given Text
        super(ContextApp, self).say(text, animation, block)

        # Add whatever Pepper says to Chat as an Utterance
        if self.has_chat:
            self.chat.add_utterance(text, me=True)


if __name__ == '__main__':
    # Run Application using Settings in pepper.config
    ContextApp(config.get_backend()).run()
Ejemplo n.º 13
0
    def on_face_known(self, faces):
        """
        On Person Event.
        Called every time a known face is detected.
        """

        for person in faces:
            if self.is_greeting_appropriate(person.name):
                self.say("Hello, {}!".format(person.name))

    def on_face_new(self, faces):
        """
        On New Person Event.
        Called every time an unknown face is detected.
        """

        if self.is_greeting_appropriate("new"):
            self.say("I see {} new people!".format(len(faces)))

    def is_greeting_appropriate(self, name):
        """Returns True if greeting is appropriate and updates Greeting Time"""
        if name not in self.nametime or (time() - self.nametime[name]) > self.GREET_TIMEOUT:
            self.nametime[name] = time()
            return True
        return False


if __name__ == "__main__":
    # Run Application with Backend specified in Global Configuration File
    GreetingApplication(config.get_backend()).run()
Ejemplo n.º 14
0
            image = objects[0].image
            image.to_file(self.output)

            with open(
                    os.path.join(self.output,
                                 "{}_obj.json".format(image.hash)),
                    "w") as json_file:
                json.dump([obj.dict() for obj in objects], json_file)

            for obj in objects:

                hash = str(obj.id)

                path = os.path.join(self.output, image.hash, hash)
                os.makedirs(path)

                rgb = obj.image.get_image(obj.image_bounds)
                depth = obj.image.get_depth(obj.image_bounds)
                meta = obj.dict()

                Image.fromarray(rgb).save(os.path.join(path, "rgb.png"))
                np.save(os.path.join(path, "depth.npy"), depth)
                with open(os.path.join(path, "meta.json"), "w") as json_file:
                    json.dump(meta, json_file)

            self.log.info(objects)


if __name__ == '__main__':
    CameraDumpApp(config.get_backend()).run()