def __init__(self, creds={}, recipients=[]): """ Default Constructor.""" self.language = 'el' self.askValidation = True self.waitOnAnswer = 5 self.rh = RappRobot() self.ch = RappPlatformAPI() self.emailCreds = {'username': '', 'password': ''} self.__load_email_credentials() if 'username' in creds: self.emailCreds['username'] = creds['username'] if 'password' in creds: self.emailCreds['password'] = creds['password'] print self.emailCreds if self.emailCreds['username'] == '' or self.emailCreds[ 'password'] == '': self.say( u'Δεν βρήκα καταχωρημένα τα στοιχεία λογαριασμού ηλεκτρονικού ταχυδρομίου. Παρακαλώ επικοινώνησε με την τεχνική υπηρεσια.' ) self.rh.humanoid_motion.goToPosture('Sit', 0.5) sys.exit(1) self.emailTitle = "NAO SendMail RApp" self.emailBody = "Hello I am NAO robot :)\n\n" + \ "This is an automated email that the user" + \ " requested to send among with the attachment files!\n\n" + \ "Robots are awesome!! :-)" self.recipients = recipients
def RappGeolocation(self, IP): #sys.path.append('../../../Thesis/rapp_portable/rapp-api/python') ch = RappPlatformAPI() #address = addr response = ch.geolocation(IP) if response.get('error') is not "": abort(500, response.get('error')) return response
def RappText2Speech(self, text, language): ch = RappPlatformAPI(address=addr) response = ch.textToSpeech(text, language, RAPPtext2speech_full_path) f = open( RAPPtext2speech_full_path, 'r' ) #at RPi: f = open(RAPPtext2speech_full_path).read() #serializable return f
def RappObjectRecCaffe(self, imageString, image): path = imageORimageString(imageString, image) #path of saved image. ch = RappPlatformAPI(address=addr) response = ch.objectRecognitionCaffe( path) # Use the server's path of the image. return response
def RappLightDetection(self, imageString, image): path = imageORimageString(imageString, image) #path of saved image. ch = RappPlatformAPI(address=addr) response = ch.hazardDetectionLights( path) # Use the server's path of the image. return response
def RappSpeechDetectionGoogle(self, audioString, audioFile, language, audioSource): path = audioFileORaudioString(audioString, audioFile) ch = RappPlatformAPI() ###address = addr response = ch.speechRecognitionGoogle(path, audioSource, language) return response
def RappQrDetection(self, imageString, image): path = imageORimageString(imageString, image) #path of saved image. ch = RappPlatformAPI() ###############address = addr response = ch.qrDetection(path) # Use the server's path of the image. if not response.get('error') == "": abort(500, response.get('error')) return response
def __init__(self): """ Default Constructor.""" self.language = 'el' self.askValidation = True self.waitOnAnswer = 5 self.rh = RappRobot() self.ch = RappPlatformAPI() self._loc = self.ch.geolocation(Net.ipaddr_connected()) print '[*] Current Location: {0}'.format(self._loc)
def __init__(self): self.frames = [] self.pa = pyaudio.PyAudio() self.stream = self.open_mic_stream() self.tap_threshold, self.noise_threshold = self.calibrate() self.hear = False self.ch = RappPlatformAPI() topic = rospy.get_param("rec_topic") self.publisher = rospy.Publisher(topic, String, queue_size=10)
def getExercise(test_type="", test_subtype="", test_diff="", test_index=""): ch = RappPlatformAPI() response = ch.cognitiveExerciseSelect(test_type=test_type, test_subtype=test_subtype, test_diff=test_diff, test_index=test_index) if response['error'] != '': print response['error'] sys.exit(0) for key, val in response.iteritems(): print key, val testInstance = response['test_instance'] testType = response['test_type'] testSubType = response['test_subtype'] questions = response['questions'] possibAns = response['possib_ans'] correctAns = response['correct_ans'] language = response['lang'] cogTest = CognitiveTest(testType, testSubType, testInstance, questions, possibAns, correctAns, language) if testType == 'ArithmeticCts': if testSubType == 'BasicArithmeticCts': return BasicArithmericCts(cogTest) elif testSubType == 'TimeDifferenceCts': return TimeDifferenceCts(cogTest) elif testSubType == 'TransactionChangeCts': return TransactionChangeCts(cogTest) else: return None elif testType == 'AwarenessCts': if testSubType == 'TimeDayYearCts': return TimeDayYearCts(cogTest) else: return None elif testType == 'ReasoningCts': if testSubType == 'StoryTellingCts': return StoryTellingCts(cogTest) elif testSubType == 'WordRememberingCts': return WordRememberingCts(cogTest) else: return None else: return None
def setUp(self): self.ch = RappPlatformAPI() # Wait till the service is up using an erroneous call for fast return counter = 0 timeout = 120 while counter < timeout: counter = counter + 1 res = self.ch.cognitiveExerciseSelect(0) if res['error'] != u'Connection Error': break time.sleep(1) print 'Waiting for service... ' + str(timeout - counter) if counter == timeout: self.assertEqual('Connection error', True)
def RappDoorDetection(self, imageString, image): path = imageORimageString(imageString, image) #path of saved image. ch = RappPlatformAPI() #################address = addr #im=Image.open(image) #im.save("/home/panos/Desktop/Swagger/finalTest/test_v5/Door.png","PNG", progressive = True) #Path to save the image on the server. ### deinterlacing # #size=list(im.size) #size[0] /= 2 #size[1] /= 2 #downsized=im.resize(self, size, Image.NEAREST) # NEAREST drops the lines #downsized.save("/home/panos/Desktop/Swagger/finalTest/test_v5/Door.png") ####### response = ch.hazardDetectionDoor( path) # Use the server's path of the image. return response
def __init__(self, email_username, email_pass, recipients=[]): """ Default Constructor.""" self.language = 'el' self.askValidation = True self.waitOnAnswer = 5 self.rh = RappRobot() self.ch = RappPlatformAPI() self.emailUsername = email_username self.emailPass = email_pass self.emailTitle = "NAO SendMail RApp" self.emailBody = "Hello I am NAO robot :)\n\n" + \ "This is an automated email that the user" + \ " requested to send among with the attachment files!\n\n" + \ "Robots are awesome!! :-)" self.recipients = recipients
def RappSpeechDetectionSphinx(self, audioString, audioFile, language, audioSource, words=None, sentences=None, grammar=None): path = audioFileORaudioString(audioString, audioFile) ch = RappPlatformAPI(address=addr) response = ch.speechRecognitionSphinx(path, language=language, audio_source=audioSource, words=words, sentences=sentences, grammar=grammar) #f=open('/home/panos/Desktop/Swagger/finalTest/test_v3/RappGoogle.wav','r') return response
def __init__(self, cog_test): """ Constructor """ self.language = cog_test.language self.audioSource = Constants.AUDIO_SOURCE self.tempDir = Constants.TEMP_DIR self.cogTest = cog_test self.recordTime = 5 self.maxReruns = 3 self.askValidation = True self.rh = RappRobot() self.ch = RappPlatformAPI() self.performance = { 'correct_answers': 0, 'wrong_answers': 0, 'final_score': 0 } self.print_info()
#!/usr/bin/env python import time import sys from rapp_robot_api import RappRobot rh = RappRobot() from RappCloud import RappPlatformAPI ch = RappPlatformAPI() rh.audio.setVolume(50) rh.audio.speak("Hello") while True: rh.audio.speak('Do you want to recognize an object or learn it?') r = rh.audio.speechDetection(['learn', 'recognize', 'exit'], 10.0, 'English') if r['word'] == 'learn': rh.audio.speak("Ok, lets learn the object") rh.audio.speak("Show me the object") time.sleep(3) rh.vision.capturePhoto('/home/nao/temp.jpg', 'front', '640x960') ch.objectDetectionClearModels() ch.objectDetectionLearnObject('/home/nao/temp.jpg', 'test') elif r['word'] == 'recognize': rh.audio.speak("I am going to recognize the object") ch.objectDetectionLoadModels(['test']) rh.vision.capturePhoto('/home/nao/temp.jpg', 'front', '640x960') r = ch.objectDetectionFindObjects('/home/nao/temp.jpg') if len(r['found_names']) != 0: rh.audio.speak("I have seen the object!") else:
def RappWeatherForecast(self, city, weather_reporter, metric): ch = RappPlatformAPI() ######################address = addr response = ch.weatherReportForecast(city, weather_reporter, metric) #Rapp service return response
def RappWeatherCurrent(self, city, weatherReporter, metric): ch = RappPlatformAPI() #########address = addr response = ch.weatherReportCurrent(city, weatherReporter, metric) return response
def setUp(self): self.ch = RappPlatformAPI()
def setUp(self): rospack = rospkg.RosPack() self.pkgDir = rospack.get_path('rapp_testing_tools') self.ch = RappPlatformAPI()