def process_call(line, contents, subroutines): contents = contents.replace('@call', '') contents = contents.strip('()') parts = contents.split(',') name = parts[0] args = [s.strip(' )') for s in parts[1:]] if name not in subroutines: fatal_error('Unknown subroutine: {}'.format(name), AssemblerNameError, line) if len(args) != subroutines[name]: msg = 'Wrong number of arguments: Expected {}, got {}'.format( subroutines[name], len(args) ) fatal_error(msg, AssemblerException, line) debug('@call of {}'.format(name)) for i, arg in enumerate(args): yield build_line('MOV $arg{} {}'.format(i, arg)) counter = next(call_counter) yield build_line('MOV $jump_back :ret{}'.format(counter)) yield build_line('JMP :{}'.format(name)) yield build_line('ret{}:'.format(counter))
def get_question_context(self, conversation, current_question): question_context = None if self.response is None: debug("Response is not set") # Check if it has an intent debug(self.response.intent) if hasattr(self.response, 'intent') and self.response.intent is not None: conversation_intent_method = getattr( conversation, 'intent_' + self.response.intent) question_context = conversation_intent_method( response=self.response) else: # Does not have an intent debug("Intent is none") if self.free_text_answer_allowed is True: debug("Allow free text") if hasattr(conversation, 'free_answer'): question_context = conversation.free_answer( question=current_question, response=self.response) else: debug(conversation + " does not have free_answer method") if question_context is None: return False return question_context
def process_call(line, contents, subroutines): contents = contents.replace('@call', '') contents = contents.strip('()') parts = contents.split(',') name = parts[0] args = [s.strip(' )') for s in parts[1:]] if name not in subroutines: fatal_error('Unknown subroutine: {}'.format(name), AssemblerNameError, line) if len(args) != subroutines[name]: msg = 'Wrong number of arguments: Expected {}, got {}'.format( subroutines[name], len(args)) fatal_error(msg, AssemblerException, line) debug('@call of {}'.format(name)) for i, arg in enumerate(args): yield build_line('MOV $arg{} {}'.format(i, arg)) counter = next(call_counter) yield build_line('MOV $jump_back :ret{}'.format(counter)) yield build_line('JMP :{}'.format(name)) yield build_line('ret{}:'.format(counter))
def callback(self, recognizer, audio): from App.Core import user_manager user_manager.get_user() debug("I heard something") # received audio data, now we'll recognize it using Google Speech Recognition try: # for testing purposes, we're just using the default API key # to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")` # instead of `r.recognize_google(audio)` command = recognizer.recognize_wit(audio, api_key) debug("User said: " + command) aliasses = {'poepie', 'poppie', 'poppy', 'moppie', 'pappie'} for alias in aliasses: if str.lower(_("Hello") + alias) in str.lower(command).replace( " ", ""): tts.say( _("Hello %s, what can I do for you?") % user_manager.user.name) self.command_centre.start() except sr.UnknownValueError: print("Google Speech Recognition could not understand audio") except sr.RequestError as e: print( "Could not request results from Google Speech Recognition service; {0}" .format(e))
def ask(self): if self.is_repeat_question is True: tts.say(_("I'll repeat the question")) debug("Repeating question") question_text = self.question.ask_question() debug("Question: " + question_text) tts.say(question_text)
def start(self): # Sit up robot whenever available psm.go_to_state(StandBy()) # Try and get the user from the user manager user_manager.get_user() # When user is available, we'll greet the user, otherwise start introduction if user_manager.has_user() is True: debug("Has a user") conversation_manager.conversation = Greetings.Greetings else: debug("Does not have a user") conversation_manager.conversation = Introduction.Introduction # Get the conversation handler conversation = conversation_manager.get_conversation() # Start the conversation conversation_handler.start(conversation) # Commit changes to prevent data loss whilst listening to the user in the commandcentre db.commit() command_listener = CommandListener() command_listener.start() # Commit changes to the database db.commit()
def preprocessor_constants(lines): """ Replaces constants usage with the defined vaule. Example: $const = 5 MOV [2] $const Results in: MOV [2] 5 :type lines: list[Line] """ global automem_counter automem_counter = count() constants = {} for lineno, line in enumerate(lines): tokens = [] iterator = iter(line.contents.split()) is_assignment_line = False # Process all tokens in this line for token, next_token in neighborhood(iterator): if token[0] == '$': const_name = token[1:] if next_token == '=': # Found assignment, store the associated value is_assignment_line = True value = next(iterator) if const_name in constants: warn('Redefined ${}'.format(const_name), RedefinitionWarning, line) if value == '[_]': # Process auto increment memory value = automem(line) constants[const_name] = value else: # Found usage of constant, replace with stored value try: tokens.append(constants[const_name]) except KeyError: fatal_error('No such constant: ${}'.format(const_name), NoSuchConstantError, line) else: # Uninteresting token tokens.append(token) # Skip assignment lines if not is_assignment_line: debug('Constants:', constants) yield set_contents(line, ' '.join(tokens))
def get_current_weather(self): if self.location is not None: observation = self.client.weather_at_place(self.location) self.weather = observation.get_weather() else: debug( "Location is not set, set location before calling this method") return self.weather
def find_answer_count_results(path): settings.urls_searched = 0 #---------------------OCR if path is image-------------------------- if not isinstance(path, list): question_answers = get_question_answers(path) else: question_answers = path question = question_answers[0].lower() answer1 = question_answers[1].lower() answer2 = question_answers[2].lower() answer3 = question_answers[3].lower() #--------------Set up variables when called for the first time for one question--------------- if settings.last_first_answer != answer1: settings.isNegative = False settings.isTermino = False settings.numbers_tried = 0 settings.start_time = time.time() settings.last_first_answer = answer1 settings.time_out = 2 if settings.saveAnsweredQuestions: text_file = open("trainer.py", "a") text_file.write("find_answer(['" + question + "','" + answer1 + "','" + answer2 + "','" + answer3 + "'])\n") text_file.close() #---------------------Process question----------------------------------- question = processQuestion(question) #---------------------Search Google----------------------------------- questions_with_answer = [[question, answer1], [question, answer2], [question, answer3]] #get frequencies using processes pool = Pool(processes=settings.number_of_workers) summed_frequencies = pool.map(getNumberOfResults, questions_with_answer) pool.terminate() #--------------------------------Print scores---------------------------- #success is either False or the winner winner = print_scores([question_answers[0], answer1, answer2, answer3], summed_frequencies) settings.numbers_tried = settings.numbers_tried + 1 #-----------------------------Search again or return---------------------------- # #if not sucessful search again but this time including the answers in the question and taking a bit more time # if settings.isNotSure and settings.numbers_tried<2: # time_out = settings.second_time_out # winner = find_answer([question_answers[0] + " " + answer1 + " " + answer2 + " " + answer3,answer1,answer2,answer3]) # else: debug("--- querrying time: %s seconds ---" % (time.time() - settings.start_time)) return winner
async def on_ready(): # TODO: code to set/save the current nickname debug('login: {}#{}'.format(bot.user.name, bot.user.discriminator)) for server in bot.servers: if server.get_member(bot.user.id).display_name != SHRUGGIE_NICKNAME: await bot.change_nickname( server.get_member(bot.user.id), SHRUGGIE_NICKNAME, )
def free_answer(self, question, response): for (property, question_class) in self.required_properties.items(): if isinstance(question, question_class): if hasattr(response, 'text'): method = getattr(self, 'set_' + property) return method(response.text) else: debug("Skip property")
def say(self, text): from config import USE_TTS_LANGUAGE # Fetch the fill path for the tts_result.mp3 in which the TTS results will be saved file_name = get_path('/resources/audio/tts_result.mp3') if os.path.isfile(file_name) is False: debug(str(file_name) + "File does not exists") tts = gTTS(text=str(text), lang=USE_TTS_LANGUAGE) tts.save(file_name) self.play_mp3_file(file_name)
def process_values(self): if self.raw_response: self.text = self.raw_response['_text'] debug(self.raw_response) for (label, value) in self.raw_response['entities'].items(): if label == 'intent': self.intent = value[0]['value'] self.intent_confidence = value[0]['confidence'] * 100 else: setattr(self, label, value[0]['value'])
def listen(self): r = sr.Recognizer() with sr.Microphone() as source: debug("Say something!") audio = r.listen(source) # Write audio to a file which we will send to the API with open(get_path("/resources/audio/microphone-results.wav"), "wb") as f: f.write(audio.get_wav_data())
def test_merge_directories_no_overlap(udf_name, sd1, sd2, sd3, prefix): """Test 4: Assert directories are correctly merged if no overlapping.""" folderdir1 = os.path.join(sd1.homedir, udf_name) folderdir2 = os.path.join(sd2.homedir, udf_name) expected = [] os.mkdir(folderdir1) # add folders and files to folderdir1 on_sd1 = [] for d in ('a', 'b', 'c'): dirpath = os.path.join(folderdir1, d) os.makedirs(dirpath) expected.append(d) for f in ('foo1.txt', 'bar1.txt', 'syncdaemon1.log'): # flip a coin if random.random() < 0.5: filepath = os.path.join(dirpath, f) open(filepath, 'w').close() on_sd1.append(os.path.join(d, f)) debug(prefix, "created in sd1", on_sd1) expected.extend(on_sd1) os.mkdir(folderdir2) on_sd2 = [] # add folders and files to folderdir2 for d in ('z', 'y', 'x'): dirpath = os.path.join(folderdir2, d) os.makedirs(dirpath) expected.append(d) for f in ('foo2.txt', 'bar2.txt', 'syncdaemon2.log'): # flip a coin if random.random() < 0.5: filepath = os.path.join(dirpath, f) open(filepath, 'w').close() on_sd2.append(os.path.join(d, f)) debug(prefix, "created in sd2", on_sd2) expected.extend(on_sd2) expected.sort() debug(prefix, "Expected", expected) # create the folder on sd1 and wait sd2 to finish working yield sd1.sdt.create_folder(path=folderdir1) yield sd1.sdt.wait_for_nirvana(.5) folders = yield sd1.sdt.get_folders() debug(prefix, 'get_folders completed!', folders) assert len(folders) == 1 # UDF was reported as expected yield sd2.sdt.wait_for_nirvana(.5) actual = walk_and_list_dir(folderdir2) debug(prefix, 'Found in SD2', actual) assert expected == actual, 'directory merge successful'
def preprocessor_subroutine(lines): reset_counters() subroutines = collect_definitions(lines) if not subroutines: # Check, if there are calls w/o definition for line in lines: if '@call' in line.contents: fatal_error('@call without subroutine definition', AssemblerException, line) yield from lines return debug('{} subroutines found: {}'.format(len(subroutines), subroutines)) # Build preamble yield build_line('$return = [_]') yield build_line('$jump_back = [_]') for i in range(max(subroutines.values())): yield build_line('$arg{} = [_]'.format(i)) # Process start()/end()/call() in_subroutine = False call_count = 0 for line in lines: #: :type: str contents = line.contents.strip() if contents.startswith('@call'): yield from process_call(line, contents, subroutines) call_count += 1 elif contents.startswith('@start('): if in_subroutine: assert False in_subroutine = True yield from _subroutine_process_start(line, contents) elif contents.startswith('@end()'): if not in_subroutine: assert False debug('@end') in_subroutine = False yield Line(0, '<subroutine>', '', 'JMP $jump_back') else: yield line
def is_valid(self, poppy): for motor_state in self.motor_states: if not motor_state.is_valid(poppy): debug(motor_state.motor.name + ' is not in valid state ' + self.name) debug('{0} should be between {1} and {2}'.format( str(motor_state.motor.get_position(poppy)), str(motor_state.valid_min_position), str(motor_state.valid_max_position))) return False return True
def get_intent(self, audio_path): # Create the API client client = Wit(api_key) # Send the .wav file we've created earlier to the API try: with open(get_path('/resources/audio/microphone-results.wav'), 'rb') as f: resp = client.speech(f, None, {'Content-Type': 'audio/wav'}) return resp except: debug("Microphone-results failed to open")
def _subroutine_process_start(line, contents): # : :type: list[str] parts = contents.split(',') verify_start(parts, line) name = parts[0].replace('@start', '').strip('( ') debug('@start: {}'.format(name)) yield build_line('{}:'.format(name)) yield build_line('MOV $return 0') yield build_line('')
def start(self): from App.Core import intent_handler response = intent_handler.wait_for_answer() if hasattr(response, 'intent'): if response.intent == 'get_day': from CommandCentre.commands import GetDay GetDay().execute() elif response.intent == 'get_weather': from CommandCentre.commands import GetWeather GetWeather().set_response(response).execute() else: debug("No intent found or something")
def confirming_answer(self): if hasattr(self.response, 'text'): tts.say( _("You've answered %s, is that right?") % self.response.text) confirmation_intent_response = intent_handler.wait_for_answer() if hasattr(confirmation_intent_response, 'intent'): if confirmation_intent_response.intent == 'confirmation_confirm': debug("Confirmed") return True elif confirmation_intent_response.intent == 'confirmation_deny': debug("Repeat question") tts.say(_("I'll repeat the question")) return False
def test_unsuscribe_delete_subscribe(udf_name, sd1, sd2, sd3, prefix): """Test 14: unsubscribe and subsc., removing everything in the middle.""" # create udf folder = yield create_udf(udf_name, sd1, prefix) folder_id = folder["volume_id"] udf_path = folder["path"] debug(prefix, 'create_folder completed!', folder) assert folder['subscribed'], 'sd1 must be subscribed' yield sd1.sdt.wait_for_nirvana(.5) # un-subscribe and check yield sd1.sdt.unsubscribe_folder(folder_id) folders = yield sd1.sdt.get_folders() assert len(folders) == 1, "SD1 has udfs != 1 (%d)" % len(folders) assert not folders[0]['subscribed'], 'sd1 must NOT be subscribed' debug(prefix, 'unsubscribed!') # remove everything shutil.rmtree(udf_path) debug(prefix, 'everything removed from disk') yield sd1.sdt.wait_for_nirvana(.5) # subscribe and wait yield sd1.sdt.subscribe_folder(folder_id) folders = yield sd1.sdt.get_folders() assert len(folders) == 1, "SD1 has udfs != 1 (%d)" % len(folders) assert folders[0]['subscribed'], 'sd1 must be subscribed' yield sd1.sdt.wait_for_nirvana(.5) debug(prefix, 'subscribed!') # check stuff in disk for sd1 in_disk = walk_and_list_dir(udf_path) expected = ['a_dir', os.path.join('a_dir', 'a_file.txt')] assert in_disk == expected, \ "Wrong stuff in disk: %s (expected: %s)" % (in_disk, expected)
def test_remove_udf(udf_name, sd1, sd2, sd3, prefix): """Test 8: Remove an UDF, assert correct deletion on both clients.""" yield create_udf(udf_name, sd1, prefix) yield sd2.sdt.wait_for_nirvana(.5) assert os.path.exists(os.path.join(sd2.homedir, udf_name)) actual1 = walk_and_list_dir(os.path.join(sd1.homedir, udf_name)) actual2 = walk_and_list_dir(os.path.join(sd2.homedir, udf_name)) debug(prefix, "contents for SD1", actual1) assert actual1 == actual2 debug(prefix, "Removing the UDF:", udf_name) shutil.rmtree(os.path.join(sd1.homedir, udf_name)) yield sd1.sdt.wait_for_nirvana(.5) yield sd2.sdt.wait_for_nirvana(.5) events = [event['event_name'] for event in sd2.events] assert 'VM_VOLUME_DELETED' in events, 'VM_UDF_DELETED in sd2.events' debug(prefix, "VM_VOLUME_DELETED found on SD2") msg = 'UDF\'s contents must be deleted from file system on SD2.' udf_content = os.listdir(os.path.join(sd2.homedir, udf_name)) debug(prefix, 'contents for SD2', udf_content) assert udf_content == [], msg
def start(self): # For each required property for (property, question_class) in self.required_properties.items(): # Check if our data source has that property set if hasattr(self.data_source, property): data_source_attribute = getattr(self.data_source, property) # Check if that property has already been forfilled debug(data_source_attribute) if data_source_attribute == "": return question_class() else: debug("Data source has no first name") continue return self.stop()
def _dirty_deps(t, depth, fromdir): debug('%s?%s\n' % (depth, t)) stamptime = state.stamped(t, fromdir) if stamptime == None: debug('%s-- DIRTY (no stamp)\n' % depth) return True try: realtime = os.stat(os.path.join(fromdir or '', t)).st_mtime except OSError: realtime = 0 if stamptime != realtime: debug('%s-- DIRTY (mtime)\n' % depth) return True for mode,name in state.deps(t, fromdir): if mode == 'c': if os.path.exists(name): debug('%s-- DIRTY (created)\n' % depth) return True elif mode == 'm': if dirty_deps(name, depth + ' ', fromdir=vars.BASE): #debug('%s-- DIRTY (sub)\n' % depth) return True return False
def get(self, url): helpers.debug(__file__, url = url) if not url: self.redirect('/login') elif url == 'login' and self.get_secure_cookie('rcloud_login'): if self._verify_api_key(): self.redirect('/zones/list') else: self.render('login.py.html') elif url == 'logout': self.clear_cookie('rcloud_login') self.redirect('/login') else: self.render('login.py.html')
def sendEmail(cls, to, subject, html, reply_to=None): body = helpers.strip_html(html) if SENDGRID_API_KEY: message = sgmail.Mail() message.set_from(sgmail.Email(SENDER_EMAIL)) message.set_subject(subject) message.add_content(sgmail.Content('text/html', html)) message.add_content(sgmail.Content('text/plain', body)) personalization = sgmail.Personalization() for to_email in to: personalization.add_to(sgmail.Email(to_email)) message.add_personalization(personalization) if reply_to: message.set_reply_to(sgmail.Email(reply_to)) if helpers.debug(): mail_settings = sgmail.MailSettings() mail_settings.set_sandbox_mode(sgmail.SandBoxMode(True)) message.set_mail_settings(mail_settings) api = sendgrid.SendGridAPIClient(apikey=SENDGRID_API_KEY) response = api.client.mail.send.post(request_body=message.get()) else: for to_email in to: if reply_to: mail.send_mail(sender=SENDER_EMAIL, to=to, subject=subject, body=body, html=html, reply_to=reply_to) else: mail.send_mail(sender=SENDER_EMAIL, to=to, subject=subject, body=body, html=html)
def post(self, url): helpers.debug(__file__, url = url) username = self.get_argument('username') key = self.get_argument('key') # verify the login information if not self._verify_api_key(username, key): self.set_status(403) self.write("Bad username or API key.") return # save it in a cookie on client self.set_secure_cookie('rcloud_login', "%s %s" % (username, key)) self.redirect('/zones/list')
def preprocessor_labels(lines): """ Replace labels with the referenced instruction number. Example: label: GOTO :label Results in: GOTO 0 :type lines: list[Line] """ labels = collect_labels(lines) # Update references for line in lines: tokens = [] for token in line.contents.split(): # Label usage if token[0] == ':': label_name = token[1:] try: instruction_no = labels[label_name] except KeyError: fatal_error('No such label: {}'.format(label_name), NoSuchLabelError, line) else: tokens.append(str(instruction_no)) # Label definitions elif token[-1] == ':': # Skip continue else: tokens.append(token) # If there any tokens left, yield them if tokens: debug('Labels:', labels) yield set_contents(line, ' '.join(tokens))
def start_conversation(self): end_of_conversation = False starting_question = self.conversation.start() if issubclass(starting_question.__class__, Question): self.question = QuestionHandler(starting_question) else: debug( "Starting node is not of type question. Stopping conversation") return while end_of_conversation is False: self.question.ask() listener = ConversationListener() response = listener.listen() response_handler = ResponseHandler(response=response) if issubclass(self.question.question.__class__, FreeAnswer): response_handler.free_text_answer_allowed = True question_context = response_handler.get_question_context( conversation=self.conversation, current_question=self.question.question) if isinstance(question_context, Success): tts.say(question_context.intermediate_text) self.question.set(question=question_context.next_question) elif isinstance(question_context, AskForRepeat): self.question.set(question=question_context.current_question) self.question.repeat_question(True) elif isinstance(question_context, Confirmation): confirmation_result = response_handler.confirming_answer() if confirmation_result is True: self.question.set(question=question_context.next_question) tts.say(question_context.intermediate_text) elif confirmation_result is False: self.question.set( question=question_context.current_question) if isinstance(self.question.question, EndOfConversation): debug("End of conversation") if self.question.question.text != "": tts.say(self.question.question.text) end_of_conversation = True return True
def run(self): """Run the application ... """ cookie_secret = "testingtestingtestingtesting" if self._debug else \ "".join([chr(random.randint(0,254)) for n in xrange(40)]) options = { 'template_path': os.path.join(os.path.dirname(__file__), 'templates'), 'cookie_secret': cookie_secret, 'debug': self._debug, } helpers.debug(__file__, repr(options)) webapp = tornado.web.Application([ (r"/(login|logout)?", handlers.LoginHandler), (r"/zones(.*)", handlers.ZoneHandler), ("/favicon.ico", handlers.NullHandler), ], **options) webapp.listen(self._port) tornado.ioloop.IOLoop.instance().start()
def test_sharing(udf_name, sd1, sd2, sd3, prefix): """Test 12: Shares inside UDF.""" folder = yield create_udf(udf_name, sd1, prefix) dir1 = os.path.join(folder["path"], "a_dir") dir2 = os.path.join(folder["path"], "other_dir") os.mkdir(dir2) yield sd1.sdt.wait_for_nirvana(1) # offer one and accept it debug(prefix, "Offering share 1 from SD1") share_name_1 = "share_1_" + udf_name d_wait_share = sd3.wait_for_event('SV_SHARE_CHANGED') sd1.sdt.offer_share(dir1, sd3.username, share_name_1, "View") yield d_wait_share debug(prefix, "Received share in SD3") shares = yield sd3.sdt.get_shares() share = [x for x in shares if x['name'] == share_name_1][0] share_id = share['volume_id'] debug(prefix, "Accepting share") yield sd3.sdt.accept_share(share_id) # offer a second one debug(prefix, "Offering share 2 from SD1") share_name_2 = "share_2_" + udf_name d_wait_share = sd3.wait_for_event('SV_SHARE_CHANGED') sd1.sdt.offer_share(dir2, sd3.username, share_name_2, "Modify") yield d_wait_share debug(prefix, "Received share in SD3") # check the shares of sd1 shared = yield sd1.sdt.list_shared() share = [x for x in shared if x['name'] == share_name_1][0] assert share['access_level'] == "View", "share 1 in sd1 should be View!" assert share['accepted'], "share 1 in sd1 should be accepted" share = [x for x in shared if x['name'] == share_name_2][0] assert share['access_level'] == "Modify", "share 2 in sd1 should be Modif!" assert not share['accepted'], "share 2 in sd1 should NOT be accepted" # check the shared of sd3 shares = yield sd3.sdt.get_shares() share = [x for x in shares if x['name'] == share_name_1][0] assert share['access_level'] == "View", "share 1 in sd2 should be View!" assert share['accepted'], "share 1 in sd2 should be accepted" share = [x for x in shares if x['name'] == share_name_2][0] assert share['access_level'] == "Modify", "share 2 in sd2 should be Modif!" assert not share['accepted'], "share 2 in sd2 should NOT be accepted"
def test_unsuscribe_subscribe(udf_name, sd1, sd2, sd3, prefix): """Test 15: unsubscribe and subscribe.""" folder = yield create_udf(udf_name, sd1, prefix) folder_id = folder["volume_id"] debug(prefix, 'create_folder completed!', folder) assert folder['subscribed'], 'sd1 must be subscribed' # un-subscribe and check yield sd1.sdt.unsubscribe_folder(folder_id) folders = yield sd1.sdt.get_folders() assert len(folders) == 1 # UDF was reported as expected assert not folders[0]['subscribed'], 'sd1 must NOT be subscribed' # subscribe and check yield sd1.sdt.subscribe_folder(folder_id) folders = yield sd1.sdt.get_folders() assert len(folders) == 1 # UDF was reported as expected assert folders[0]['subscribed'], 'sd1 must be subscribed'
def test_get_folders(udf_name, sd1, sd2, sd3, prefix): """Test 2: Assert folder list is correct.""" udf_values = yield create_udf(udf_name, sd1, prefix) yield sd2.sdt.wait_for_nirvana(.5) folders = yield sd2.sdt.get_folders() debug(prefix, 'get_folders completed!', folders) assert len(folders) == 1, 'only 1 folder' folder = folders[0] assert folder['path'] == os.path.join(sd2.homedir, udf_name), \ 'path correct' assert folder['subscribed'], 'udf must be subscribed' assert folder['suggested_path'] == os.path.join('~', udf_name), \ 'suggested_path must be correct' assert folder['node_id'] == udf_values['node_id'], \ 'node_id mut be correct' assert folder['volume_id'] == udf_values['volume_id'], \ 'volume id must be correct'
def test_create_folder(udf_name, sd1, sd2, sd3, prefix): """Test 1: Assert correct folder creation.""" wait_for_udf_created = sd2.wait_for_event('VM_UDF_CREATED') yield create_udf(udf_name, sd1, prefix) yield wait_for_udf_created yield sd2.sdt.wait_for_nirvana(.5) folderdir1 = os.path.join(sd1.homedir, udf_name) folderdir2 = os.path.join(sd2.homedir, udf_name) expected = walk_and_list_dir(folderdir1) actual = walk_and_list_dir(folderdir2) debug(prefix, 'expected', expected) debug(prefix, 'actual', actual) assert expected == actual, 'UDF must be replicated correctly' with open(os.path.join(folderdir1, expected[-1])) as fd: content1 = fd.read() with open(os.path.join(folderdir1, actual[-1])) as fd: content2 = fd.read() assert content1 == content2, 'file content macth'
def create_udf(udf_name, sd, prefix, basedir=None): """Create an UDF on SD's home.""" if basedir is None: basedir = sd.homedir folderdir = os.path.join(basedir, udf_name) os.mkdir(folderdir) dirpath = os.path.join(folderdir, 'a_dir') os.makedirs(dirpath) filepath = os.path.join(dirpath, 'a_file.txt') create_file_and_add_content(filepath) debug(prefix, 'Attempting to create folder for path %r' % folderdir) folder = yield sd.sdt.create_folder(path=folderdir) folder, = folder debug(prefix, 'folder created with id %s' % (folder['volume_id'],)) yield sd.sdt.wait_for_nirvana(.5) defer.returnValue(folder)
def test_sharing_udfitself(udf_name, sd1, sd2, sd3, prefix): """Test 18: Sharing the UDF itself.""" folder = yield create_udf(udf_name, sd1, prefix) # offer one and accept it debug(prefix, "Offering share 1 from SD1") share_name = "share_" + udf_name d_wait_share = sd3.wait_for_event('SV_SHARE_CHANGED') sd1.sdt.offer_share(folder["path"], sd3.username, share_name, "Modify") yield d_wait_share debug(prefix, "Received share in SD3") shares = yield sd3.sdt.get_shares() share = [x for x in shares if x['name'] == share_name][0] share_id = share['volume_id'] debug(prefix, "Accepting share and wait acceptance to propagate") yield sd3.sdt.accept_share(share_id) yield sd1.sdt.wait_for_nirvana(.5) # check the shares of sd1 shared = yield sd1.sdt.list_shared() share = [x for x in shared if x['name'] == share_name][0] assert share['access_level'] == "Modify", "share in sd1 should be Modify!" assert share['accepted'], "share in sd1 should be accepted" # check the shared of sd3 shares = yield sd3.sdt.get_shares() share = [x for x in shares if x['name'] == share_name][0] assert share['access_level'] == "Modify", "share in sd2 should be Modify!" assert share['accepted'], "share in sd2 should be accepted"
def test_renaming_the_udf_itself(udf_name, sd1, sd2, sd3, prefix): """Test 7: Assert correct unsubcription when the UDF is renamed.""" folder1 = os.path.join(sd1.homedir, udf_name) folder2 = os.path.join(folder1, "udf_parent_dir") folder3 = os.path.join(folder2, "udf_dir") debug(prefix, 'test_create_folder using UDF at', folder3) os.makedirs(folder3) folder = yield sd1.sdt.create_folder(path=folder3) debug(prefix, 'create_folder completed!', folder) # FIXME: this signal is sometimes lost. events = [event['event_name'] for event in sd2.events] if 'VM_UDF_CREATED' not in events: yield sd2.wait_for_event('VM_UDF_CREATED') # rename and wait for nirvana # # FIXME: this will generate this message from pyinotify: # ERROR: The path <folder3> of this watch # <Watch ... > must not be trusted anymore # this is because the "general" watch manager doesn't have a watch on the # parent of the UDF, so it doesn't know how it was moved; actually we # don't care, because the UDF is being un-subscribed... # debug(prefix, 'rename!') os.rename(folder3, folder3 + ".renamed") debug(prefix, 'wait for nirvanas') yield sd1.sdt.wait_for_nirvana(1) yield sd2.sdt.wait_for_nirvana(1) # both SDs should have the UDF, the first one should have it unsuscribed folders = yield sd1.sdt.get_folders() udfs = [f for f in folders if f['path'] == folder3] assert len(udfs) == 1, "SD1 has udfs != 1 (%d)" % len(udfs) assert not udfs[0]['subscribed'], "The UDF of SD1 is subscribed!" folders = yield sd2.sdt.get_folders() folder_in_sd2 = os.path.join(sd2.homedir, udf_name, "udf_parent_dir", "udf_dir") udfs = [f for f in folders if f['path'] == folder_in_sd2] assert len(udfs) == 1, "SD1 has udfs != 1 (%d)" % len(udfs) assert udfs[0]['subscribed'], "The UDF of SD1 is not subscribed!"
def test_no_events_from_ancestors_if_unsubsc(udf_name, sd1, sd2, sd3, prefix): """Test 17: Watches are removed in ancestors.""" # structure: # base_dir # \---parent # |--- udf_dir1 # \--- middle # \--- udf_dir2 # # unsubscribing udf2 should remove the watch of "middle", but not from # "homedir", as the later is also an ancestor of other udf base_dir = os.path.join(sd1.homedir, udf_name) parent = os.path.join(base_dir, "parent") udf_dir1 = os.path.join(parent, "udf_dir1") middle = os.path.join(parent, "middle") udf_dir2 = os.path.join(middle, "udf_dir2") os.makedirs(udf_dir1) os.makedirs(udf_dir2) yield sd1.sdt.create_folder(path=udf_dir1) yield sd1.sdt.create_folder(path=udf_dir2) debug(prefix, 'create_folders completed!') yield sd1.sdt.wait_for_nirvana(.5) # rename udf2 and wait for nirvana debug(prefix, 'rename!') os.rename(udf_dir2, udf_dir2 + ".renamed") debug(prefix, 'wait for nirvana') yield sd1.sdt.wait_for_nirvana(1) # check that UDF1 ancestors still have the watches by renaming # 'parent' and verifying that UDF1 is unsubscribed; there's no # way to check that 'middle' lost its watch debug(prefix, 'check!') os.rename(parent, parent + ".renamed") folders = yield sd1.sdt.get_folders() udf = [x for x in folders if x['path'] == udf_dir1][0] assert not udf['subscribed'], "%s of SD1 is subscribed!" % udf
def test_renaming_ancestor(udf_name, sd1, sd2, sd3, prefix): """Test 6: Assert correct unsubscription when an ancestor is renamed.""" folder1 = os.path.join(sd1.homedir, udf_name) folder2 = os.path.join(folder1, "udf_parent_dir") folder3 = os.path.join(folder2, "udf_dir") debug(prefix, 'test_create_folder using UDF at', folder3) os.makedirs(folder3) folder = yield sd1.sdt.create_folder(path=folder3) debug(prefix, 'create_folder completed!', folder) yield sd2.sdt.wait_for_nirvana(.5) # FIXME: this signal is sometimes lost. events = [event['event_name'] for event in sd2.events] if 'VM_UDF_CREATED' not in events: yield sd2.wait_for_event('VM_UDF_CREATED') # rename and wait for nirvana debug(prefix, 'rename!') os.rename(folder2, folder2 + ".renamed") debug(prefix, 'wait for nirvanas') yield sd1.sdt.wait_for_nirvana(1) yield sd2.sdt.wait_for_nirvana(1) # both SDs should have the UDF, the first one should be unsuscribed folders = yield sd1.sdt.get_folders() udfs = [f for f in folders if f['path'] == folder3] assert len(udfs) == 1, "SD1 has udfs != 1 (%d)" % len(udfs) assert not udfs[0]['subscribed'], "The UDF of SD1 is subscribed!" folders = yield sd2.sdt.get_folders() folder_in_sd2 = os.path.join(sd2.homedir, udf_name, "udf_parent_dir", "udf_dir") udfs = [f for f in folders if f['path'] == folder_in_sd2] assert len(udfs) == 1, "SD1 has udfs != 1 (%d)" % len(udfs) assert udfs[0]['subscribed'], "The UDF of SD1 is not subscribed!"
def handle_exception(self, exception, debug): # log the error logging.exception(exception) # if this is development, then print out a stack trace stacktrace = None if helpers.debug() or (self.user and self.user.is_admin): import traceback stacktrace = traceback.format_exc() # if the exception is a HTTPException, use its error code # otherwise use a generic 500 error code if isinstance(exception, webapp2.HTTPException): status_int = exception.code else: status_int = 500 self.renderError(status_int, stacktrace=stacktrace) # send an email notifying about this error self.deferEmail([SUPPORT_EMAIL], "Error Alert", "error_alert.html", exception=exception, user=self.user, url=self.request.url, method=self.request.method)
def test_unsubscribe_no_side_effects(udf_name, sd1, sd2, sd3, prefix): """Test 3: Assert sd1 can unsubscribe from an UDF without side effects.""" folder = yield create_udf(udf_name, sd1, prefix) assert folder['subscribed'], 'sd1 subscribed' yield sd2.sdt.wait_for_nirvana(.5) # is UDF created already? folders = yield sd1.sdt.get_folders() debug(prefix, 'folders for SD1', folders) fid = folder['volume_id'] folder = yield sd1.sdt.unsubscribe_folder(fid) folder, = folder debug(prefix, 'unsubscribe_folder completed!', folder) assert not folder['subscribed'], 'sd1 no longer subscribed' folders = yield sd2.sdt.get_folders() debug(prefix, 'folders for SD2', folders) assert len(folders) == 1, 'only 1 folder' assert folders[0]['subscribed'], 'sd2 subscribed'
def test_renaming_ancestor_of_two(udf_name, sd1, sd2, sd3, prefix): """Test 16: Check behavior when an ancestor of more than one is renamed.""" udfdir1 = os.path.join(sd1.homedir, udf_name) udfdir2 = os.path.join(udfdir1, "udf_parent_dir") os.makedirs(udfdir2) udfdir3 = os.path.join(udfdir2, "udf_dir1") udfdir4 = os.path.join(udfdir2, "udf_dir2") yield sd1.sdt.create_folder(path=udfdir3) yield sd1.sdt.create_folder(path=udfdir4) debug(prefix, 'create_folders completed!') yield sd1.sdt.wait_for_nirvana(.5) # FIXME: this signal is sometimes lost. events = [event['event_name'] for event in sd2.events] if 'VM_UDF_CREATED' not in events: yield sd2.wait_for_event('VM_UDF_CREATED') # rename and wait for nirvana debug(prefix, 'rename!') os.rename(udfdir2, udfdir2 + ".renamed") debug(prefix, 'wait for nirvanas') yield sd1.sdt.wait_for_nirvana(1) yield sd2.sdt.wait_for_nirvana(1) # both SDs should have UDFs, the first one "unsuscribed" folders = yield sd1.sdt.get_folders() assert len(folders) == 2, "SD1 has udfs != 2 (%d)" % len(folders) for udf in folders: assert not udf['subscribed'], "%s of SD1 is subscribed!" % udf folders = yield sd2.sdt.get_folders() assert len(folders) == 2, "SD2 has udfs != 2 (%d)" % len(folders) for udf in folders: assert udf['subscribed'], "%s of SD2 is NOT subscribed!" % udf
def test_merge_directories_with_overlap(udf_name, sd1, sd2, sd3, prefix): """Test 5: Assert directories are correctly merge with overlapping.""" # Structure to start # # client 1: # .../a # .../a/conflict.txt (content random) # .../a/noconflict.txt (same content that 2) # .../a/bar.txt # .../b # # client 2: # .../a # .../a/conflict.txt (content random) # .../a/noconflict.txt (same content that 1) # .../a/beer.txt # .../c # # Result after UDF creation and merge: # # .../a/bar.txt and .../b are synced to client 2 # .../a/beer.txt and .../c are synced to client 1 # .../a/conflict.txt stays the same in one client, and in the other it # goes to conflict (depending on which got first to the server) # .../a/noconflict.txt stays ok in both clients # folderdir1 = os.path.join(sd1.homedir, udf_name) folderdir2 = os.path.join(sd2.homedir, udf_name) os.mkdir(folderdir1) # add folders and files to folderdir1 dirpath = os.path.join(folderdir1, 'a') os.makedirs(dirpath) filepath = os.path.join(dirpath, 'conflict.txt') create_file_and_add_content(filepath, content='content from SD1') filepath = os.path.join(dirpath, 'noconflict.txt') with open(filepath, "w") as fh: fh.write("same content") filepath = os.path.join(dirpath, 'bar.txt') create_file_and_add_content(filepath) dirpath = os.path.join(folderdir1, 'b') os.makedirs(dirpath) os.mkdir(folderdir2) # add folders and files to folderdir2 dirpath = os.path.join(folderdir2, 'a') os.makedirs(dirpath) filepath = os.path.join(dirpath, 'conflict.txt') create_file_and_add_content(filepath, content='content from SD2') filepath = os.path.join(dirpath, 'noconflict.txt') with open(filepath, "w") as fh: fh.write("same content") filepath = os.path.join(dirpath, 'beer.txt') create_file_and_add_content(filepath) dirpath = os.path.join(folderdir1, 'c') os.makedirs(dirpath) # wait for all changes to settle down yield sd2.sdt.wait_for_nirvana(.5) yield sd1.sdt.wait_for_nirvana(.5) # prepare the info to compare expected_no_conflict = ['a', 'b', 'c', os.path.join('a', 'bar.txt'), os.path.join('a', 'beer.txt'), os.path.join('a', 'noconflict.txt'), os.path.join('a', 'conflict.txt')] expected_no_conflict.sort() debug(prefix, 'expected without conflict', expected_no_conflict) expected_with_conflict = copy(expected_no_conflict) expected_with_conflict.append(os.path.join('a', 'conflict.txt.u1conflict')) expected_with_conflict.sort() debug(prefix, 'expected with conflict', expected_with_conflict) # create the UDF and wait everything to stop yield sd1.sdt.create_folder(path=folderdir1) yield sd2.sdt.wait_for_nirvana(.5) yield sd1.sdt.wait_for_nirvana(.5) actual1 = walk_and_list_dir(folderdir1) debug(prefix, 'actual content from SD1', actual1) actual2 = walk_and_list_dir(folderdir2) debug(prefix, 'actual content from SD2', actual2) # we don't know which client will enter in conflict, so we # tested both ways. if actual1 != expected_no_conflict: assert actual1 == expected_with_conflict, \ 'directory merge must be correct for SD1' assert actual2 == expected_no_conflict, \ 'directory merge must be correct for SD2' else: assert actual1 == expected_no_conflict, \ 'directory merge must be correct for SD1' assert actual2 == expected_with_conflict, \ 'directory merge must be correct for SD2'
def post(self, url): helpers.debug(__file__, url = url) url = url.split('/') username, key = self.get_secure_cookie('rcloud_login').split() conn = clouddns.connection.Connection(username, key) email_address = self.get_cookie('rcloud_soa_email') # save the email address to be entered later if self.get_argument('email', default=""): email_address = self.get_argument('email') self.set_cookie('rcloud_soa_email', email_address) if len(url) < 2 or not url[1]: # add a zone d_name = self.get_argument('name') d_ttl = self.get_argument('ttl') conn.create_domain(d_name, d_ttl, email_address) elif len(url) > 2 and url[1].isdigit(): # edit a record domain = conn.get_domain(int(url[1])) record = domain.get_record(url[2]) r = {} r['name'] = self.get_argument('name', None) r['data'] = self.get_argument('data', None) r['ttl'] = self.get_argument('ttl', None) r['comment'] = self.get_argument('comment', None) if record.type in ('MX', 'SRV'): r['priority'] = self.get_argument('priority', None) # unset things that don't change for field in r.keys(): cur_val = record.__dict__[field] if cur_val and r[field] == cur_val: del r[field] record.update(**r) # we don't want to go back to the same edit record page del url[2] elif url[1].isdigit(): # add a record (edit a zone not really applicable?) domain = conn.get_domain(int(url[1])) r = {} r['name'] = self.get_argument('name') r['type'] = self.get_argument('type') r['data'] = self.get_argument('data') r['ttl'] = self.get_argument('ttl', None) r['comment'] = self.get_argument('comment', None) if r['type'] in ('MX', 'SRV'): r['priority'] = self.get_argument('priority', None) domain.create_record(**r) self.redirect('/zones%s' % "/".join(url))
def get(self, url): helpers.debug(__file__, url = url) url = url.split('/') username, key = self.get_secure_cookie('rcloud_login').split() conn = clouddns.connection.Connection(username, key) email_address = self.get_cookie('rcloud_soa_email') # root request if len(url) < 2 or not url[1]: self.redirect('/zones/list') # SHOW ZONES elif url[1] == 'list': domains = conn.list_domains_info() tpl_args = { 'domains': domains, 'username': username, 'email': email_address, } self.render('zones_list.py.html', **tpl_args) # EDIT RECORD elif len(url) > 2 and url[1].isdigit(): domain = conn.get_domain(int(url[1])) record = domain.get_record(url[2]) tpl_args = { 'domain': domain, 'record': record, } self.render('record_show.py.html', **tpl_args) # EDIT ZONE (SHOW RECORDS) elif url[1].isdigit(): domain = conn.get_domain(int(url[1])) records = domain.list_records_info() records_a_aaaa_cname = [] records_mx_srv = [] records_txt = [] records_ns = [] for r in records: if r['type'] in ('A', 'AAAA', 'CNAME'): records_a_aaaa_cname.append(r) elif r['type'] in ('MX', 'SRV'): records_mx_srv.append(r) elif r['type'] in ('TXT',): records_txt.append(r) elif r['type'] in ('NS',): records_ns.append(r) tpl_args = { 'domain': domain, 'records': records, 'email': email_address, 'records_a_aaaa_cname': records_a_aaaa_cname, 'records_mx_srv': records_mx_srv, 'records_txt': records_txt, 'records_ns': records_ns, } self.render('zones_show.py.html', **tpl_args)
def check(failure): """Error must have been occurred. Analyze it.""" is_error = failure.type == tools.ErrorSignal debug(prefix, 'UDF creation failed. Error:', failure.type) assert is_error, 'failure must be a tools.ErrorSignal'
def test_disconnect_modify_connect(udf_name, sd1, sd2, sd3, prefix): """Test 13: Create UDF, disconnect the SD, do stuff, and then reconnect.""" folder = yield create_udf(udf_name, sd1, prefix) folder_path = folder['path'] other_dir = os.path.join(folder_path, 'other_dir') os.mkdir(other_dir) third_dir = os.path.join(folder_path, 'third_dir') os.mkdir(third_dir) yield sd1.sdt.wait_for_nirvana(.5) debug(prefix, 'Disconnecting SD1.') yield sd1.sdt.disconnect() # disconnect SD1 debug(prefix, 'Doing stuff in the file system of SD1.') # do stuff in the file system xyz_dir = os.path.join(folder_path, 'x', 'y', 'z') os.makedirs(xyz_dir) create_file_and_add_content(os.path.join(xyz_dir, 'new.txt')) # move a file within the UDF os.rename(os.path.join(folder_path, 'a_dir', 'a_file.txt'), os.path.join(xyz_dir, 'renamed_file.txt')) # move a folder outside the UDF to the root dir os.rename(os.path.join(folder_path, 'other_dir'), os.path.join(sd1.rootdir, udf_name + 'renamed_other_dir')) # move a folder outside the UDF to the home dir renamed_third_dir = os.path.join(sd1.homedir, 'renamed_third_dir') os.rename(os.path.join(folder_path, 'third_dir'), renamed_third_dir) expected = set(walk_and_list_dir(sd1.homedir)) debug(prefix, "Expected to have", expected) debug(prefix, 'Re connecting SD1.') yield sd1.sdt.connect() # re-connect SD1 yield sd1.sdt.wait_for_nirvana(.5) debug(prefix, 'Waiting for nirvana for SD2.') yield sd2.sdt.wait_for_nirvana(.5) # wait for SD2 to get all the changes actual = set(walk_and_list_dir(sd2.homedir)) debug(prefix, "Currently found", actual) debug(prefix, 'expected sym diff actual', expected.symmetric_difference(actual)) assert expected.difference(actual) == set([u'renamed_third_dir']), \ 'SD1 home must have the same as SD2 except for renamed_third_dir.' assert actual.difference(expected) == set([]), \ 'SD2 home must have nothing extra than the SD1\'s.'
def test_unsusc_lotofchanges_subsc(udf_name, sd1, sd2, sd3, prefix): """Test 19: Merge should be done correctly.""" # some dirs and files udf_dir = os.path.join(sd1.homedir, udf_name) dir_a = os.path.join(udf_dir, "a") file_1 = os.path.join(dir_a, "file1") file_2 = os.path.join(dir_a, "file2") file_3 = os.path.join(dir_a, "file3") dir_b = os.path.join(udf_dir, "b") dir_c = os.path.join(udf_dir, "c") dir_d = os.path.join(udf_dir, "d") # we create an UDF and put: # - dir_a, with: # - file_1 # - file_2 # - dir_b # - dir_c folder, = yield sd1.sdt.create_folder(path=udf_dir) folder_id = folder["volume_id"] for d in (dir_a, dir_b, dir_c): os.mkdir(d) for f in (file_1, file_2): open(f, "w").close() debug(prefix, 'initial UDF completed!') yield sd1.sdt.wait_for_nirvana(.5) # unsubscribe yield sd1.sdt.unsubscribe_folder(folder_id) debug(prefix, 'unsubscribed!') # some changes: os.rmdir(dir_c) os.mkdir(dir_d) os.remove(file_2) open(file_3, "w").close() debug(prefix, 'changes made!') yield sd1.sdt.wait_for_nirvana(1) # subscribe again yield sd1.sdt.subscribe_folder(folder_id) debug(prefix, 'subscribed!') yield sd1.sdt.wait_for_nirvana(.5) yield sd2.sdt.wait_for_nirvana(.5) debug(prefix, 'changes propagated') # what the merge should do: # - dir_c is back from the server # - dir_d uploaded # - file_2 is back from the server # - file_3 uploaded to the server # - the rest should remain unchanged # to check, we verify everything in both clients expected = ['a', 'a/file1', 'a/file2', 'a/file3', 'b', 'c', 'd'] for which, sd in enumerate((sd1, sd2)): debug(prefix, 'check SD', sd) udf_dir = os.path.join(sd.homedir, udf_name) in_disk = walk_and_list_dir(udf_dir) assert in_disk == expected, "sd %s has bad stuff in "\ "disk: %s" % (which, in_disk)