def _free_experiment(self, lab_coordaddress, reservation_id, lab_session_id, experiment_instance_id): try: initial_time = datetime.datetime.now() try: labserver = self.locator[lab_coordaddress] experiment_response = labserver.free_experiment( SessionId.SessionId(lab_session_id)) except Exception as e: if DEBUG: traceback.print_exc() log.log(ReservationConfirmer, log.level.Error, "Exception freeing experiment: %s" % e) log.log_exc(ReservationConfirmer, log.level.Warning) self.coordinator.mark_experiment_as_broken( experiment_instance_id, [str(e)]) else: # Everything went fine end_time = datetime.datetime.now() self.coordinator.confirm_resource_disposal( lab_coordaddress.address, reservation_id, lab_session_id, experiment_instance_id, experiment_response, initial_time, end_time) except: if DEBUG: traceback.print_exc() log.log(ReservationConfirmer, log.level.Critical, "Unexpected exception freeing experiment") log.log_exc(ReservationConfirmer, log.level.Critical)
def do_send_command_to_device(self, command): if command == "POLL": if self._gpib_launcher.poll(): log.log(UdGpibExperiment, log.level.Info, "Executed, saving results") self._remove_file() return "OK" else: return "WAIT" elif command == "RESULT code": return self._gpib_launcher.get_result_code() elif command == "RESULT stdout": return self._remove_non_ascii_characters( self._gpib_launcher.get_result_stdout()) elif command == "RESULT stderr": return self._remove_non_ascii_characters( self._gpib_launcher.get_result_stderr()) elif command == "RESULT file": try: return "OK%s" % self._remove_non_ascii_characters( self._read_output_file()) except Exception: return "ERFile <%s> not found" % self._cfg_manager.get_value( 'gpib_public_output_file_filename') else: raise GpibErrors.UnknownUdGpibCommandError( "Unknown received command: %s" % command)
def _iterate(self, element): elements = [element] while True: try: new_element = self.queue.get_nowait() elements.append(new_element) except Queue.Empty: break if not self.stopped: execute = True with self.period_lock: if time.time() - self._latest_update <= self.next_period_between_updates: execute = False else: self._latest_update = time.time() self._update_period_between_updates() if execute: try: self.scheduler.update() except: log.log(SchedulerTransactionsSynchronizer, log.level.Critical, "Exception updating scheduler") log.log_exc(SchedulerTransactionsSynchronizer, log.level.Critical) self._notify_elements(elements)
def authenticate(self, login, password): if not LDAP_AVAILABLE: msg = "The optional library 'ldap' is not available. The users trying to be authenticated with LDAP will not be able to do so. %s tried to do it. " % login print(msg, file=sys.stderr) log.log(self, log.level.Error, msg) return False password = str(password) ldap_module = _ldap_provider.get_module() try: ldapobj = ldap_module.initialize( self.ldap_uri ) except Exception as e: raise LoginErrors.LdapInitializingError( "Exception initializing the LDAP module: %s" % e ) dn = "%s@%s" % (login, self.domain) pw = password try: ldapobj.simple_bind_s(dn, pw) except ldap.INVALID_CREDENTIALS as e: return False except Exception as e: raise LoginErrors.LdapBindingError( "Exception binding to the server: %s" % e ) else: ldapobj.unbind_s() return True
def _free_experiment(self, lab_session_id): if not self._session_manager.has_session(lab_session_id): return session = self._session_manager.get_session_locking(lab_session_id) finished = True experiment_response = None try: # Remove the async requests whose results we have not retrieved. # It seems that they might still be running when free gets called. # TODO: Consider possible issues. session_id = session['session_id'] if session_id in self._async_requests: del self._async_requests[session_id] experiment_instance_id = session['experiment_instance_id'] try: experiment_response = self._free_experiment_from_assigned_experiments(experiment_instance_id, lab_session_id) except Exception as e: log.log( LaboratoryServer, log.level.Error, "Exception freeing experiment" % e ) log.log_exc(LaboratoryServer, log.level.Error) experiment_response = '' if experiment_response is not None and experiment_response.lower() != 'ok' and experiment_response != '': try: response = json.loads(experiment_response) finished = response.get(Coordinator.FINISH_FINISHED_MESSAGE) except: traceback.print_exc() finally: if finished: self._session_manager.delete_session_unlocking(lab_session_id) else: self._session_manager.modify_session_unlocking(lab_session_id, session) return experiment_response
def _test_server(self,server,address): """ _test_server(self,server,address) -> bool It returns True (if we could perform a call to "test_me"), or False (if we couldn't) """ # Check if the server is up and running try: random_msg = str(random.random()) result_msg = server.test_me(random_msg) if random_msg != result_msg: # This was not a valid server, try another log.log( ServerLocator, log.level.Warning, "Test message received from server %s different from the message sent (%s vs %s). Trying another server" %( address.address, random_msg, result_msg ) ) return False except Exception as e: #There was a exception: this is not a valid server, try another log.log( ServerLocator, log.level.Warning, "Testing server %s raised exception %s. Trying another server" % ( address.address, e ) ) log.log_exc(ServerLocator, log.level.Info) return False else: return True
def confirm_resource_disposal(self, lab_coordaddress, reservation_id, lab_session_id, experiment_instance_id, experiment_response, initial_time, end_time): experiment_finished = True information_to_store = None time_remaining = 0.5 # Every half a second by default if experiment_response is None or experiment_response == 'ok' or experiment_response == '': pass # Default value else: try: response = json.loads(experiment_response) experiment_finished = response.get(FINISH_FINISHED_MESSAGE, experiment_finished) time_remaining = response.get(FINISH_ASK_AGAIN_MESSAGE, time_remaining) information_to_store = response.get(FINISH_DATA_MESSAGE, information_to_store) except Exception as e: log.log( AbstractCoordinator, log.level.Error, "Could not parse experiment server finishing response: %s; %s" % (e, experiment_response) ) log.log_exc( AbstractCoordinator, log.level.Warning ) if not experiment_finished: time.sleep(time_remaining) # We just ignore the data retrieved, if any, and perform the query again self.confirmer.enqueue_free_experiment(lab_coordaddress, reservation_id, lab_session_id, experiment_instance_id) return else: # Otherwise we mark it as finished self.post_reservation_data_manager.finish(reservation_id, json.dumps(information_to_store)) try: # and we remove the resource # print "AT CONFIRM_RESOURCE_DISPOSAL" self._release_resource_instance(experiment_instance_id) finally: self.finished_store.put(reservation_id, information_to_store, initial_time, end_time) # It's done here so it's called often enough self.post_reservation_data_manager.clean_expired()
def mark_resource_as_broken(self, resource_instance, messages=[]): scheduler = self._get_scheduler_per_resource(resource_instance) anything_changed = False session = self._session_maker() try: changed = scheduler.removing_current_resource_slot( session, resource_instance) anything_changed = anything_changed or changed changed = self.resources_manager.mark_resource_as_broken( session, resource_instance) anything_changed = anything_changed or changed if anything_changed: session.commit() finally: session.close() if anything_changed: log.log( Coordinator, log.level.Warning, "Resource %s marked as broken: %r" % (resource_instance, messages)) if self.notifications_enabled: self._notify_experiment_status('broken', resource_instance, messages)
def _log(self, result_code, output, stderr): log.log(XilinxImpact,log.level.Info,"Device programming was finished. Result code: %i\n<output>\n%s\n</output><stderr>\n%s\n</stderr>" % ( result_code, output, stderr ) )
def wrapped(self, *args, **kargs): if not SERIAL_AVAILABLE: msg = "The optional library 'pyserial' is not available. The experiments trying to use the serial port will fail." print >> sys.stderr, msg log.log(self, log.level.Error, msg) return return func(self, *args, **kargs)
def wrapper(self, *args, **kwargs): try: return func(self, *args, **kwargs) except Exception as e: for exc, code, propagate in exceptions_to_check: if issubclass(e.__class__, exc): if propagate or self._cfg_manager.get_doc_value( configuration_doc.DEBUG_MODE): log.log( self.__class__, log.level.Info, "%s raised on %s: %s: %s" % (exc.__name__, func.__name__, e, e.args)) log.log_exc(self.__class__, log.level.Debug) return self._raise_exception(code, e.args[0]) else: # WebLabInternalServerError log.log( self.__class__, log.level.Warning, "Unexpected %s raised on %s: %s: %s" % (exc.__name__, func.__name__, e, e.args)) log.log_exc(self.__class__, log.level.Info) return self._raise_exception( RemoteFacadeManagerCodes. WEBLAB_GENERAL_EXCEPTION_CODE, UNEXPECTED_ERROR_MESSAGE_TEMPLATE % self._cfg_manager.get_value( SERVER_ADMIN_EMAIL, DEFAULT_SERVER_ADMIN_EMAIL))
def _should_finish(self, lab_coordaddress, lab_session_id, reservation_id): try: try: labserver = self.locator.get_server_from_coordaddr( lab_coordaddress, ServerType.Laboratory) received_experiment_response = labserver.should_experiment_finish( lab_session_id) experiment_response = float(received_experiment_response) except Exception as e: if DEBUG: traceback.print_exc() log.log( ReservationConfirmer, log.level.Error, "Exception checking if the experiment should finish: %s" % e) log.log_exc(ReservationConfirmer, log.level.Warning) self.coordinator.confirm_should_finish( lab_coordaddress.address, lab_session_id, reservation_id, 0) # Don't try again with this reservation else: self.coordinator.confirm_should_finish( lab_coordaddress.address, lab_session_id, reservation_id, experiment_response) except: if DEBUG: traceback.print_exc() log.log(ReservationConfirmer, log.level.Critical, "Unexpected exception checking should_finish") log.log_exc(ReservationConfirmer, log.level.Critical)
def _retrieve_networks_from_coordinator(self, original_server_address, server_coord_address): try: return self._coordinator.get_networks(original_server_address, server_coord_address) except ProtocolErrors.ProtocolError as pe: # TODO: not unittested log.log( ServerLocator, log.level.Error, "Problem while asking for networks to the coordinator server. %s" % pe) log.log_exc(ServerLocator, log.level.Warning) raise LocatorErrors.ProblemCommunicatingWithCoordinatorError( "Couldn't retrieve networks from coordinator server: " + str(pe), pe) except Exception as e: # TODO: not unittested log.log( ServerLocator, log.level.Error, "Unexpected exception while asking for networks to the coordinator server. %s" % e) log.log_exc(ServerLocator, log.level.Warning) import traceback traceback.print_exc() raise LocatorErrors.ProblemCommunicatingWithCoordinatorError( "Unexpected exception while asking for networks from coordinator server: " + str(e), e)
def _iterate(self, element): elements = [element] while True: try: new_element = self.queue.get_nowait() elements.append(new_element) except Queue.Empty: break if not self.stopped: execute = True with self.period_lock: if time.time( ) - self._latest_update <= self.next_period_between_updates: execute = False else: self._latest_update = time.time() self._update_period_between_updates() if execute: try: self.scheduler.update() except: log.log(SchedulerTransactionsSynchronizer, log.level.Critical, "Exception updating scheduler") log.log_exc(SchedulerTransactionsSynchronizer, log.level.Critical) self._notify_elements(elements)
def do_reserve_experiment(self, experiment_instance_id, client_initial_data, server_initial_data): lab_sess_id = self._session_manager.create_session() try: experiment_coord_address = self._assigned_experiments.reserve_experiment( experiment_instance_id, lab_sess_id) except LaboratoryErrors.BusyExperimentError: # If it was already busy, free it and reserve it again try: old_lab_sess_id = self._assigned_experiments.get_lab_session_id( experiment_instance_id) self._free_experiment(old_lab_sess_id) except Exception as e: # If there is an error freeing the experiment, we don't want to propagate it to the User Processing Server: # our focus is to reserve the new session. log.log( LaboratoryServer, log.level.Warning, "Exception freeing already reserved experiment: %s" % e) log.log_exc(LaboratoryServer, log.level.Info) try: experiment_coord_address = self._assigned_experiments.reserve_experiment( experiment_instance_id, lab_sess_id) except LaboratoryErrors.BusyExperimentError: # The session might have expired and that's why this experiment is still reserved. Free it directly from # assigned_experiments. self._free_experiment_from_assigned_experiments( experiment_instance_id, lab_sess_id) experiment_coord_address = self._assigned_experiments.reserve_experiment( experiment_instance_id, lab_sess_id) self._session_manager.modify_session( lab_sess_id, { 'experiment_instance_id': experiment_instance_id, 'experiment_coord_address': experiment_coord_address, 'session_id': lab_sess_id }) # Obtain the API of the experiment. api = self._find_api(experiment_instance_id, experiment_coord_address) experiment_server = self._locator.get_server_from_coordaddr( experiment_coord_address, ServerType.Experiment) if api == ExperimentApiLevel.level_1: experiment_server.start_experiment() experiment_server_response = "ok" elif api == ExperimentApiLevel.level_2: experiment_server_response = experiment_server.start_experiment( client_initial_data, server_initial_data) # If the API version is concurrent, we will also send the session id, to be able to identify the user for each request. elif api == ExperimentApiLevel.level_2_concurrent: experiment_server_response = experiment_server.start_experiment( lab_sess_id, client_initial_data, server_initial_data) else: # ERROR: Unrecognized version. experiment_server_response = experiment_server.start_experiment( lab_sess_id, client_initial_data, server_initial_data) return lab_sess_id, experiment_server_response, experiment_coord_address.address
def do_send_command_to_device(self, command): if command == "POLL": if self._gpib_launcher.poll(): log.log( UdGpibExperiment, log.level.Info, "Executed, saving results" ) self._remove_file() return "OK" else: return "WAIT" elif command == "RESULT code": return self._gpib_launcher.get_result_code() elif command == "RESULT stdout": return self._remove_non_ascii_characters(self._gpib_launcher.get_result_stdout()) elif command == "RESULT stderr": return self._remove_non_ascii_characters(self._gpib_launcher.get_result_stderr()) elif command == "RESULT file": try: return "OK%s" % self._remove_non_ascii_characters(self._read_output_file()) except Exception: return "ERFile <%s> not found" % self._cfg_manager.get_value('gpib_public_output_file_filename') else: raise GpibErrors.UnknownUdGpibCommandError("Unknown received command: %s" % command)
def do_send_command(self, session, command): lab_session_id = session['session_id'] experiment_instance_id = session['experiment_instance_id'] api = self._assigned_experiments.get_api(experiment_instance_id) experiment_coord_address = session['experiment_coord_address'] experiment_server = self._locator.get_server_from_coordaddr( experiment_coord_address, ServerType.Experiment) try: if api.endswith("concurrent"): response = experiment_server.send_command_to_device( lab_session_id, command.get_command_string()) else: response = experiment_server.send_command_to_device( command.get_command_string()) except Exception as e: log.log(LaboratoryServer, log.level.Warning, "Exception sending command to experiment: %s" % e) log.log_exc(LaboratoryServer, log.level.Info) raise LaboratoryErrors.FailedToSendCommandError( "Couldn't send command: %s" % str(e)) return Command.Command(str(response))
def _program_file(self, file_content): try: fd, file_name = tempfile.mkstemp( prefix='ud_xilinx_experiment_program', suffix='.' + self._programmer.get_suffix()) try: try: # TODO: encode? utf8? if isinstance(file_content, unicode): file_content_encoded = file_content.encode('utf8') else: file_content_encoded = file_content file_content_recovered = ExperimentUtil.deserialize( file_content_encoded) os.write(fd, file_content_recovered) finally: os.close(fd) self._programmer.program(file_name) finally: os.remove(file_name) # print file_name # import sys # sys.stdout.flush() except Exception as e: # TODO: test me log.log( ElevatorExperiment, log.level.Info, "Exception programming the logic into the board: %s" % e.args[0]) log.log_exc(ElevatorExperiment, log.level.Debug) raise ExperimentErrors.SendingFileFailureError( "Error sending file to device: %s" % e) self._clear()
def _send_async_command_t(self, session, command): """ This method is used for asynchronously calling the experiment server's send_command_to_device, and for that purpose runs on its own thread. This implies that its response will arrive asynchronously to the client. """ lab_session_id = session['session_id'] experiment_instance_id = session['experiment_instance_id'] api = self._assigned_experiments.get_api(experiment_instance_id) experiment_coord_address = session['experiment_coord_address'] experiment_server = self._locator[experiment_coord_address] try: if api.endswith("concurrent"): response = experiment_server.send_command_to_device( lab_session_id, command.get_command_string()) else: response = experiment_server.send_command_to_device( command.get_command_string()) except Exception as e: log.log(LaboratoryServer, log.level.Warning, "Exception sending async command to experiment: %s" % e) log.log_exc(LaboratoryServer, log.level.Info) raise LaboratoryErrors.FailedToSendCommandError( "Couldn't send async command: %s" % str(e)) return Command.Command(str(response))
def _execute(self, cmd_params, digilent_adept): # Kludge! full_cmd_line = digilent_adept + cmd_params.split(" ") log.log(DigilentAdept, log.level.Warning, "Executing %s" % full_cmd_line) try: popen = subprocess.Popen(full_cmd_line, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except Exception as e: raise ErrorProgrammingDeviceError( "There was an error while executing Digilent Adept: %s" % e) # TODO: make use of popen.poll to make this asynchronous try: stdout, stderr = popen.communicate('N\n') result = popen.wait() except Exception as e: raise ErrorWaitingForProgrammingFinishedError( "There was an error while waiting for Digilent Adept to finish: %s" % e) return result, stdout, stderr
def get_status(self): """ get_status() -> Reservation It returns the state of the reservation (such as "you're waiting in a queue", "the experiment is being initialized", "you have the reservation available", etc.) """ try: status = self._coordinator.get_reservation_status( self._reservation_id) except coord_exc.ExpiredSessionError as e: log.log(ReservationProcessor, log.level.Debug, "reason for rejecting:") log.log_exc(ReservationProcessor, log.level.Debug) human = self._cfg_manager.get_doc_value( configuration_doc.CORE_UNIVERSAL_IDENTIFIER_HUMAN) core_id = self._cfg_manager.get_doc_value( configuration_doc.CORE_UNIVERSAL_IDENTIFIER) raise core_exc.NoCurrentReservationError( "get_reservation_status at %s (%s) called but coordinator rejected reservation id (%s). Reason: %s" % (human, core_id, self._reservation_id, str(e))) else: if status.status == scheduling_status.WebLabSchedulingStatus.RESERVED_LOCAL: self.process_reserved_status(status) if status.status == scheduling_status.WebLabSchedulingStatus.RESERVED_REMOTE: self.process_reserved_remote_status(status) return Reservation.Reservation.translate_reservation(status)
def _get_server_from_coordinator(self, session_id): try: return self._coordinator.get_server(session_id) except CoordinatorServerErrors.NoServerFoundError as nsfe: raise nsfe except ProtocolErrors.ProtocolError as pe: log.log( ServerLocator, log.level.Error, "Problem while asking for other server to the coordinator server. %s" % pe ) log.log_exc( ServerLocator, log.level.Warning ) raise LocatorErrors.ProblemCommunicatingWithCoordinatorError( "Couldn't ask for other server to coordinator server: " + str(pe), pe ) except Exception as e: log.log( ServerLocator, log.level.Error, "Unexpected exception while asking for other server to the coordinator server. %s" % e ) log.log_exc( ServerLocator, log.level.Warning ) raise LocatorErrors.ProblemCommunicatingWithCoordinatorError( "Unexpected exception while asking for other server to the coordinator server: " + str(e), e )
def log_message(self, format, *args): #args: ('POST /weblab/xmlrpc/ HTTP/1.1', '200', '-') log.log( XmlRpcRequestHandler, log.level.Info, "Request from %s: %s" % (get_context().get_ip_address(), format % args) )
def _program_file(self, file_content): try: fd, file_name = tempfile.mkstemp( prefix="ud_xilinx_experiment_program", suffix="." + self._xilinx_impact.get_suffix() ) try: try: # TODO: encode? utf8? if isinstance(file_content, unicode): file_content_encoded = file_content.encode("utf8") else: file_content_encoded = file_content file_content_recovered = ExperimentUtil.deserialize(file_content_encoded) os.write(fd, file_content_recovered) finally: os.close(fd) self._programmer.program(file_name) finally: os.remove(file_name) # print file_name # import sys # sys.stdout.flush() except Exception as e: # TODO: test me log.log(UdXilinxExperiment, log.level.Info, "Exception joining sending program to device: %s" % e.args[0]) log.log_exc(UdXilinxExperiment, log.level.Debug) raise ExperimentErrors.SendingFileFailureError("Error sending file to device: %s" % e) self._clear()
def log_message(self, format, *args): #args: ('POST /weblab/soap/ HTTP/1.1', '200', '-') log.log( WebLabRequestHandlerClass, log.level.Info, "Request from %s: %s" % (get_context().get_ip_address(), format % args) )
def log_message(self, format, *args): #args: ('POST /foo/bar/ HTTP/1.1', '200', '-') log.log( WebHttpHandler, log.level.Info, "Request from %s: %s" % (get_context().get_ip_address(), format % args) )
def get_data_type(self): def is_builtin(): if not hasattr(self.element, "__class__"): return False return self.element.__class__ == __builtin__.__dict__.get(self.element.__class__.__name__) if type(self.element) in _basic_normal_types: return _Node.BASIC elif type(self.element) == tuple: return _Node.TUPLE elif type(self.element) == list: return _Node.LIST elif type(self.element) == dict: return _Node.DICT elif is_builtin() or isinstance(self.element, _DtoBuiltin): return _Node.BUILTIN_E elif isinstance(self.element, Exception): return _Node.EXCEPTION elif type(self.element) == _new.instance: return _Node.INSTANCE elif hasattr(self.element, "__reduce__") or hasattr(self.element, "__reduce_ex__"): return _Node.OBJECT else: _log.log( _Node, _log.level.Warning, "Unrecognized type: %s, %r, %r. Configure it at voodoo.mapper.py" % (type(self.element), self.name, self.parent), ) return _Node.IGNORABLE
def _retrieve_all_servers_from_coordinator(self, original_server_address, server_type, restrictions): try: return self._coordinator.get_all_servers(original_server_address, server_type, restrictions) except ProtocolErrors.ProtocolError as pe: # TODO: not unittested log.log( ServerLocator, log.level.Error, "Problem while asking for all servers to the coordinator server. %s" % pe) log.log_exc(ServerLocator, log.level.Warning) raise LocatorErrors.ProblemCommunicatingWithCoordinatorError( "Couldn't retrieve all servers from coordinator server: " + str(pe), pe) except Exception as e: # TODO: not unittested log.log( ServerLocator, log.level.Error, "Unexpected exception while asking for all servers to the coordinator server. %s" % e) log.log_exc(ServerLocator, log.level.Warning) raise LocatorErrors.ProblemCommunicatingWithCoordinatorError( "Unexpected exception while asking all servers from coordinator server: " + str(e), e)
def run(self): while not self.stopping: try: sleep(1) if self.frequency is None: continue # Here self.frequency is configured, so wait the rest of the required time if self.frequency > 1: sleep(self.frequency - 1) if self.stopping: break if self.coordinator is None: continue coordinator = self.coordinator() if coordinator is None or coordinator.locator is None: continue # coordinator not configured yet self.current_checker = self.Checker(coordinator) self.current_checker.check() except Exception as e: log.log(ResourcesCheckerThread, log.level.Critical, "Exception checking resources: %s" % e) log.log_exc(ResourcesCheckerThread, log.level.Error)
def _register_xmlrpc_server(self, who, port): _xmlrpc_server_lock.acquire() try: global _xmlrpc_server if _xmlrpc_server == None: _xmlrpc_server = {} if not _xmlrpc_server.has_key(port): _xmlrpc_server[port] = _AvoidTimeoutXMLRPCServer( (who, port), requestHandler=UtilRequestHandlerClass, allow_none=True) _xmlrpc_server[port].socket.settimeout(MAX_TIMEOUT) self.server = _xmlrpc_server[port] for i in all_methods: if i in _xmlrpc_server_functions: log.log( ServerXMLRPC, log.level.Warning, 'Method "%s" already served by server "%s"' % (i, self)) #Register every function from "all_methods" self.server.register_function( new.instancemethod(getattr(self.__class__, i), self, self.__class__), 'Util.%s' % i) _xmlrpc_server_functions.append(i) finally: _xmlrpc_server_lock.release()
def create_external_user(self, external_user, external_id, system, group_names): session = self.Session() try: try: auth_type = session.query(model.DbAuthType).filter_by(name=system).one() auth = auth_type.auths[0] except (NoResultFound, KeyError): raise DbErrors.DbUserNotFoundError("System '%s' not found in database" % system) groups = [] for group_name in group_names: try: group = session.query(model.DbGroup).filter_by(name=group_name).one() except NoResultFound: raise DbErrors.DbUserNotFoundError("Group '%s' not found in database" % group_name) groups.append(group) try: role = session.query(model.DbRole).filter_by(name=external_user.role.name).one() user = model.DbUser(external_user.login, external_user.full_name, external_user.email, role = role) user_auth = model.DbUserAuth(user, auth, configuration = external_id) for group in groups: group.users.append(user) session.add(user) session.add(user_auth) session.commit() except Exception as e: log.log( DatabaseGateway, log.level.Warning, "Couldn't create user: %s" % e) log.log_exc(DatabaseGateway, log.level.Info) raise DbErrors.DatabaseError("Couldn't create user! Contact administrator") finally: session.close()
def authenticate(self, login, password): if not LDAP_AVAILABLE: msg = "The optional library 'ldap' is not available. The users trying to be authenticated with LDAP will not be able to do so. %s tried to do it. " % login print(msg, file=sys.stderr) log.log(self, log.level.Error, msg) return False if not password: # The Python LDAP module does not provide any error if the password is empty return False password = str(password) ldap_module = _ldap_provider.get_module() try: ldapobj = ldap_module.initialize(self.ldap_uri) except Exception as e: raise LoginErrors.LdapInitializingError( "Exception initializing the LDAP module: %s" % e) dn = "%s@%s" % (login, self.domain) pw = password try: ldapobj.simple_bind_s(dn, pw) except ldap.INVALID_CREDENTIALS as e: return False except Exception as e: raise LoginErrors.LdapBindingError( "Exception binding to the server: %s" % e) else: ldapobj.unbind_s() return True
def wrapped(self, *args, **kargs): if not SERIAL_AVAILABLE: msg = "The optional library 'pyserial' is not available. The experiments trying to use the serial port will fail." print(msg, file=sys.stderr) log.log(self, log.level.Error, msg) return return func(self, *args, **kargs)
def __init__(self, generic_scheduler_arguments, experiment_id, schedulers, particular_configuration): super(IndependentSchedulerAggregator, self).__init__(generic_scheduler_arguments) if len(schedulers) == 0: # This case should never happen given that if the experiment_id # exists, then there should be at least one scheduler for that raise NoSchedulerFoundError("No scheduler provider at IndependentSchedulerAggregator") remote_schedulers = [] local_schedulers = [] for resource_type_name in schedulers: scheduler = schedulers[resource_type_name] if scheduler.is_remote(): remote_schedulers.append(resource_type_name) else: local_schedulers.append(resource_type_name) # # Local schedulers go first # self.sorted_schedulers = local_schedulers + remote_schedulers log.log( IndependentSchedulerAggregator, log.level.Info, "Creating a new IndependentSchedulerAggregator. experiment_id: %s; schedulers: %s" % (experiment_id, self.sorted_schedulers), max_size = 100000) self.experiment_id = experiment_id self.schedulers = schedulers self.particular_configuration = particular_configuration
def _register_soap_server(self,who,port): if SOAPPY_AVAILABLE: _soap_server_lock.acquire() try: global _soap_server if _soap_server == None: _soap_server = {} if not _soap_server.has_key(port): _soap_server[port] = _AvoidTimeoutSOAPServer((who,port)) _soap_server[port].config.dumpFaultInfo = 0 _soap_server[port].socket.settimeout(MAX_TIMEOUT) self.server = _soap_server[port] if port not in _soap_server_functions: _soap_server_functions[port] = [] for method_name in all_methods: if method_name in _soap_server_functions[port]: log.log(ServerSOAP,log.level.Warning,'Method "%s" already served by server "%s" on port %s' % (method_name,self, port)) #Register every function from "all_methods" self.server.registerFunction(new.instancemethod(getattr(self.__class__, method_name),self,self.__class__)) _soap_server_functions[port].append(method_name) finally: _soap_server_lock.release() else: msg = "The optional library 'SOAPpy' is not available. The communications between different servers will not work through SOAP." print >> sys.stderr, msg log.log(self, log.level.Error, msg) class FakeServer(object): def handle_request(self): time.sleep(MAX_TIMEOUT) self.server = FakeServer()
def _program_file_t(self, file_content): """ Running in its own thread, this method will program the board while updating the state of the experiment appropriately. """ try: start_time = time.time() # To track the time it takes self._current_state = STATE_PROGRAMMING self._program_file(file_content) self._current_state = STATE_READY elapsed = time.time( ) - start_time # Calculate the time the programming process took # Remember when real usage starts, so that we can enforce use-time specific limits. self._use_time_start = time.time() if DEBUG: print "[DBG]: STATE became STATE_READY. UseTimeStart = %s." % self._use_time_start # If we are in adaptive mode, change the programming time appropriately. # TODO: Consider limiting the variation range to dampen anomalies. if self._adaptive_time: self._programmer_time = elapsed except Exception as e: # Note: Currently, running the fake xilinx will raise this exception when # trying to do a CleanInputs, for which apparently serial is needed. self._current_state = STATE_FAILED log.log(ElevatorExperiment, log.level.Warning, "Error programming file: " + str(e)) log.log_exc(ElevatorExperiment, log.level.Warning)
def _skeleton(self,*parameters,**kparameters): """ Dynamically generated method. Protocol: SOAP. Method name: METHOD_NAME. Documentation: DOCUMENTATION """ try: if SERIALIZE: parameters_instance = pickle.loads(parameters[0]) if SERIALIZE_MAPPING: parameters_instance = mapper.load_from_dto(parameters_instance) params, kparams = parameters_instance result = getattr(self._parent,'do_'+METHOD_NAME)( *params, **kparams ) if SERIALIZE_MAPPING: result = mapper.dto_generator(result) dumped_result = pickle.dumps(result) return dumped_result else: return getattr(self._parent,'do_'+METHOD_NAME)(*parameters,**kparameters) except Exception as e: # TODO: watch out, if server gets a Control + C, the exception is going to propagate tb = traceback.format_exc() if type(e) == types.InstanceType: class_name = str(e.__class__) else: class_name = type(e).__module__ + '.' + type(e).__name__ log.log(self,log.level.Info,"Exception : " + class_name + "; " + e.args[0] + "; " + tb) raise SOAPpy.faultType( faultcode=class_name, faultstring=e.args[0], detail=tb )
def _find_api(self, experiment_instance_id, experiment_coord_address = None): """ _find_api(experiment_instance_id) Tries to retrieve the API version of the specified experiment. @param experiment_instance_id Experiment instance identifier for the experiment whose API we want. @param experiment_coord_address Experiment coord address. May be None. @return API version, as a string. Will return the current API if for any reason it is unable to obtain the version. """ # Check whether we know the API version already. api = self._assigned_experiments.get_api(experiment_instance_id) # If we don't know the API version yet, we will have to ask the experiment server itself if api is None: reported_api = self._get_experiment_api(experiment_instance_id) if reported_api is None: log.log( LaboratoryServer, log.level.Warning, "It was not possible to find out the api version of %r. Using current version as default." % experiment_coord_address) if DEBUG: print "[DBG] Was not possible to find out the api version of %r" % experiment_coord_address else: # Remember the api version that we retrieved self._assigned_experiments.set_api(experiment_instance_id, reported_api) api = reported_api # If we don't know the api, we will use the current version as default. if api is None: api = ExperimentApiLevel.current self._assigned_experiments.set_api(experiment_instance_id, api) return api
def _execute(self, cmd_params, digilent_adept): # Kludge! full_cmd_line = digilent_adept + cmd_params.split(" ") log.log(DigilentAdept,log.level.Warning,"Executing %s" % full_cmd_line) try: popen = subprocess.Popen( full_cmd_line, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE ) except Exception as e: raise ErrorProgrammingDeviceError( "There was an error while executing Digilent Adept: %s" % e ) # TODO: make use of popen.poll to make this asynchronous try: result = popen.wait() except Exception as e: raise ErrorWaitingForProgrammingFinishedError( "There was an error while waiting for Digilent Adept to finish: %s" % e ) try: stdout_result = popen.stdout.read() stderr_result = popen.stderr.read() except Exception as e: raise ErrorRetrievingOutputFromProgrammingProgramError( "There was an error while retrieving the output of Digilent Adept: %s" % e ) return result, stdout_result, stderr_result
def _send_async_command_t(self, session, command): """ This method is used for asynchronously calling the experiment server's send_command_to_device, and for that purpose runs on its own thread. This implies that its response will arrive asynchronously to the client. """ lab_session_id = session['session_id'] experiment_instance_id = session['experiment_instance_id'] api = self._assigned_experiments.get_api(experiment_instance_id) experiment_coord_address = session['experiment_coord_address'] experiment_server = self._locator[experiment_coord_address] try: if api.endswith("concurrent"): response = experiment_server.send_command_to_device(lab_session_id, command.get_command_string()) else: response = experiment_server.send_command_to_device(command.get_command_string()) except Exception as e: log.log( LaboratoryServer, log.level.Warning, "Exception sending async command to experiment: %s" % e ) log.log_exc(LaboratoryServer, log.level.Info) raise LaboratoryErrors.FailedToSendCommandError("Couldn't send async command: %s" % str(e)) return Command.Command(str(response))
def run(self): while not self.stopping: try: sleep(1) if self.frequency is None: continue # Here self.frequency is configured, so wait the rest of the required time if self.frequency > 1: sleep(self.frequency - 1) if self.stopping: break if self.coordinator is None: continue coordinator = self.coordinator() if coordinator is None or coordinator.locator is None: continue # coordinator not configured yet checker = self.Checker(coordinator) checker.check() except Exception as e: log.log(ResourcesCheckerThread, log.level.Critical, "Exception checking resources: %s" % e ) log.log_exc(ResourcesCheckerThread, log.level.Error)
def get_data_type(self): def is_builtin(): if not hasattr(self.element, '__class__'): return False return self.element.__class__ == __builtin__.__dict__.get( self.element.__class__.__name__) if type(self.element) in _basic_normal_types: return _Node.BASIC elif type(self.element) == tuple: return _Node.TUPLE elif type(self.element) == list: return _Node.LIST elif type(self.element) == dict: return _Node.DICT elif is_builtin() or isinstance(self.element, _DtoBuiltin): return _Node.BUILTIN_E elif isinstance(self.element, Exception): return _Node.EXCEPTION elif type(self.element) == _new.instance: return _Node.INSTANCE elif hasattr(self.element, '__reduce__') or hasattr( self.element, '__reduce_ex__'): return _Node.OBJECT else: _log.log( _Node, _log.level.Warning, 'Unrecognized type: %s, %r, %r. Configure it at voodoo.mapper.py' % (type(self.element), self.name, self.parent)) return _Node.IGNORABLE
def _execute(self, cmd_params, xcs3prog_full_path): """ Runs the actual process with the given command line arguments and full path. :param cmd_params: :param xcs3prog_full_path: :return: """ full_cmd_line = xcs3prog_full_path + cmd_params.split(" ") log.log(Xcs3prog, log.level.Warning, "Executing %s" % full_cmd_line) try: popen = subprocess.Popen( full_cmd_line, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) except Exception as e: raise ErrorProgrammingDeviceError( "There was an error while executing xcs3prog FPGA programming tool: %s" % e ) try: stdout, stderr = popen.communicate("N\n") result = popen.wait() except Exception as e: raise ErrorWaitingForProgrammingFinishedError( "There was an error while waiting for xcs3prog FPGA programming tool to finish: %s" % e ) return result, stdout, stderr
def _retrieve_all_servers_from_coordinator(self,original_server_address,server_type,restrictions): try: return self._coordinator.get_all_servers(original_server_address,server_type,restrictions) except ProtocolErrors.ProtocolError as pe: # TODO: not unittested log.log( ServerLocator, log.level.Error, "Problem while asking for all servers to the coordinator server. %s" % pe ) log.log_exc( ServerLocator, log.level.Warning ) raise LocatorErrors.ProblemCommunicatingWithCoordinatorError( "Couldn't retrieve all servers from coordinator server: " + str(pe), pe ) except Exception as e: # TODO: not unittested log.log( ServerLocator, log.level.Error, "Unexpected exception while asking for all servers to the coordinator server. %s" % e ) log.log_exc( ServerLocator, log.level.Warning ) raise LocatorErrors.ProblemCommunicatingWithCoordinatorError( "Unexpected exception while asking all servers from coordinator server: " + str(e), e )
def run(self): try: try: self.result = self._func( self._self, *self._args, **self._kargs ) self.finished_ok = True finally: if self._resource_manager != None: self._resource_manager.remove_resource(self) except Exception as e: self.raised_exc = e if self.logging: log.log( _ThreadedFunc, log.level.Warning, "threaded: exception caught while running %s: %s" % ( self._func.__name__, e ) ) log.log_exc( _ThreadedFunc, log.level.Warning) sio = StringIO.StringIO() traceback.print_exc(file=sio) self.raised_exc_traceback = sio.getvalue()
def __init__(self, generic_scheduler_arguments, experiment_id, schedulers, particular_configuration): super(IndependentSchedulerAggregator, self).__init__(generic_scheduler_arguments) if len(schedulers) == 0: # This case should never happen given that if the experiment_id # exists, then there should be at least one scheduler for that raise NoSchedulerFoundError( "No scheduler provider at IndependentSchedulerAggregator") remote_schedulers = [] local_schedulers = [] for resource_type_name in schedulers: scheduler = schedulers[resource_type_name] if scheduler.is_remote(): remote_schedulers.append(resource_type_name) else: local_schedulers.append(resource_type_name) # # Local schedulers go first # self.sorted_schedulers = local_schedulers + remote_schedulers log.log( IndependentSchedulerAggregator, log.level.Info, "Creating a new IndependentSchedulerAggregator. experiment_id: %s; schedulers: %s" % (experiment_id, self.sorted_schedulers), max_size=100000) self.experiment_id = experiment_id self.schedulers = schedulers self.particular_configuration = particular_configuration
def _program_file(self, file_content): try: fd, file_name = tempfile.mkstemp(prefix='pic18_experiment_program', suffix='.hex') try: try: #TODO: encode? utf8? if isinstance(file_content, unicode): file_content_encoded = file_content.encode('utf8') else: file_content_encoded = file_content file_content_recovered = ExperimentUtil.deserialize( file_content_encoded) os.write(fd, file_content_recovered) finally: os.close(fd) print "File ready in %s" % file_name self._programmer.program(file_name) print "File sent with programmer: ", self._programmer finally: os.remove(file_name) except Exception as e: print "Error sending file" import traceback traceback.print_exc() #TODO: test me log.log( UdPic18Experiment, log.level.Info, "Exception joining sending program to device: %s" % e.args[0]) log.log_exc(UdPic18Experiment, log.level.Debug) raise ExperimentExceptions.SendingFileFailureException( "Error sending file to device: %s" % e)
def _program_file_t(self, file_content): """ Running in its own thread, this method will program the board while updating the state of the experiment appropriately. """ try: start_time = time.time() # To track the time it takes self._current_state = STATE_PROGRAMMING self._program_file(file_content) self._current_state = STATE_READY elapsed = time.time() - start_time # Calculate the time the programming process took # Remember when real usage starts, so that we can enforce use-time specific limits. self._use_time_start = time.time() if DEBUG: print "[DBG]: STATE became STATE_READY. UseTimeStart = %s." % self._use_time_start # If we are in adaptive mode, change the programming time appropriately. # TODO: Consider limiting the variation range to dampen anomalies. if self._adaptive_time: self._programmer_time = elapsed except Exception as e: # Note: Currently, running the fake xilinx will raise this exception when # trying to do a CleanInputs, for which apparently serial is needed. self._current_state = STATE_FAILED log.log(UdXilinxExperiment, log.level.Warning, "Error programming file: " + str(e)) log.log_exc(UdXilinxExperiment, log.level.Warning)
def _should_finish(self, lab_coordaddress, lab_session_id, reservation_id): try: try: labserver = self.locator[lab_coordaddress] received_experiment_response = labserver.should_experiment_finish( lab_session_id) experiment_response = float(received_experiment_response) except Exception as e: if DEBUG: traceback.print_exc() log.log( ReservationConfirmer, log.level.Error, "Exception checking if the experiment should finish: %s" % e) log.log_exc(ReservationConfirmer, log.level.Warning) self.coordinator.confirm_should_finish( lab_coordaddress.address, lab_session_id, reservation_id, 5) # Try again in 5 seconds else: self.coordinator.confirm_should_finish( lab_coordaddress.address, lab_session_id, reservation_id, experiment_response) except: if DEBUG: traceback.print_exc() log.log(ReservationConfirmer, log.level.Critical, "Unexpected exception checking should_finish") log.log_exc(ReservationConfirmer, log.level.Critical)
def wrapper(*args, **kwargs): try: for _ in xrange(10): try: return func(*args, **kwargs) except OperationalError as oe: # XXX MySQL dependent!!! if oe.orig.args[0] == 1213: log.log( PriorityQueueScheduler, log.level.Error, "Deadlock found, restarting...%s" % func.__name__ ) log.log_exc(PriorityQueueScheduler, log.level.Warning) continue else: raise except: if DEBUG: print("Error in exc_checker: ", sys.exc_info()) log.log( PriorityQueueScheduler, log.level.Error, "Unexpected exception while running %s" % func.__name__ ) log.log_exc(PriorityQueueScheduler, log.level.Warning) raise wrapper.__name__ = func.__name__ wrapper.__doc__ = func.__doc__
def log_message(self, format, *args): #args: ('POST /weblab/json/ HTTP/1.1', '200', '-') log.log( WrappedWSGIRequestHandler, log.level.Info, "Request: %s" % (format % args) )
def _retrieve_session_id_from_coordinator(self, original_server_address, server_type, restrictions): try: return self._coordinator.new_query(original_server_address, server_type, restrictions) except ProtocolErrors.ProtocolError as pe: log.log( ServerLocator, log.level.Error, "Problem while asking for new session id to the coordinator server. %s" % pe) log.log_exc(ServerLocator, log.level.Warning) raise LocatorErrors.ProblemCommunicatingWithCoordinatorError( "Couldn't retrieve new session id from coordinator server: " + str(pe), pe) except Exception as e: log.log( ServerLocator, log.level.Error, "Unexpected exception while asking for new session id to the coordinator server. %s" % e) log.log_exc(ServerLocator, log.level.Warning) raise LocatorErrors.ProblemCommunicatingWithCoordinatorError( "Unexpected exception while asking new session id from coordinator server: " + str(e), e)
def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: weblab = _global_context.current_weblab config = weblab.ctx.config weblab_class = weblab.ctx.server_instance for exc, code, propagate in EXCEPTIONS: if issubclass(e.__class__, exc): if propagate or config.get_doc_value( configuration_doc.DEBUG_MODE): log( weblab_class, level.Info, "%s raised on %s: %s: %s" % (exc.__name__, func.__name__, e, e.args)) log_exc(weblab_class, level.Debug) return _raise_exception(code, e.args[0]) else: # WebLabInternalServerError log( weblab_class, level.Warning, "Unexpected %s raised on %s: %s: %s" % (exc.__name__, func.__name__, e, e.args)) log_exc(weblab_class, level.Info) return _raise_exception( ErrorCodes.WEBLAB_GENERAL_EXCEPTION_CODE, UNEXPECTED_ERROR_MESSAGE_TEMPLATE % config.get_value(SERVER_ADMIN_EMAIL, DEFAULT_SERVER_ADMIN_EMAIL))
def finish_reservation(self, reservation_id): redis_client = self.redis_maker() reservation_str = redis_client.hget(self.external_weblabdeusto_reservations, reservation_id) if reservation_str is not None: reservation = json.loads(reservation_str) remote_reservation_id = reservation['remote_reservation_id'] serialized_cookies = reservation['cookies'] else: log.log(ExternalWebLabDeustoScheduler, log.level.Info, "Not finishing reservation %s since somebody already did it" % reservation_id) return cookies = pickle.loads(str(serialized_cookies)) client = self._create_client(cookies) client.finished_experiment(SessionId(remote_reservation_id)) try: client.get_reservation_status(SessionId(remote_reservation_id)) except: # TODO: Actually check that the reservation was expired pass # Expired reservation else: now = self.time_provider.get_datetime() self.post_reservation_data_manager.create(reservation_id, now, now + self.expiration_delta, json.dumps("''")) result = redis_client.hdel(self.external_weblabdeusto_reservations, reservation_id) if not result: log.log(ExternalWebLabDeustoScheduler, log.level.Info, "Not deleting reservation %s from ExternalWebLabDeustoReservation since somebody already did it" % reservation_id) return
def reserve_experiment(self, reservation_id, experiment_id, time, priority, initialization_in_accounting, client_initial_data, request_info): server_uuids = list(request_info.get(SERVER_UUIDS, [])) server_uuids.append((self.core_server_uuid, self.core_server_uuid_human)) consumer_data = { 'time_allowed' : time, 'priority' : priority, 'initialization_in_accounting' : initialization_in_accounting, 'external_user' : request_info.get('username', ''), SERVER_UUIDS : server_uuids, } for forwarded_key in FORWARDED_KEYS: if forwarded_key in request_info: consumer_data[forwarded_key] = request_info[forwarded_key] # TODO: identifier of the server login_client = self._create_login_client() session_id = login_client.login(self.username, self.password) client = self._create_client(login_client.get_cookies()) serialized_client_initial_data = json.dumps(client_initial_data) serialized_consumer_data = json.dumps(consumer_data) # If the administrator has mapped that this experiment_id is other, take that other. Otherwide, take the same one requested_experiment_id_str = self.experiments_map.get(experiment_id.to_weblab_str(), experiment_id.to_weblab_str()) requested_experiment_id = ExperimentId.parse(requested_experiment_id_str) external_reservation = client.reserve_experiment(session_id, requested_experiment_id, serialized_client_initial_data, serialized_consumer_data) if external_reservation.is_null(): return None remote_reservation_id = external_reservation.reservation_id.id log.log(ExternalWebLabDeustoScheduler, log.level.Warning, "Local reservation_id %s is linked to remote reservation %s" % (reservation_id, remote_reservation_id)) cookies = client.get_cookies() serialized_cookies = pickle.dumps(cookies) redis_client = self.redis_maker() pipeline = redis_client.pipeline() pipeline.hset(self.external_weblabdeusto_reservations, reservation_id, json.dumps({ 'remote_reservation_id' : remote_reservation_id, 'cookies' : serialized_cookies, 'start_time' : time_mod.time(), })) external_weblabdeusto_pending_results = self.EXTERNAL_WEBLABDEUSTO_PENDING_RESULTS % (self.resource_type_name, self.core_server_route) pipeline.hset(external_weblabdeusto_pending_results, reservation_id, json.dumps({ 'remote_reservation_id' : remote_reservation_id, 'username' : request_info.get('username',''), 'serialized_request_info' : pickle.dumps(request_info), 'experiment_id_str' : experiment_id.to_weblab_str(), })) pipeline.execute() reservation_status = self._convert_reservation_to_status(external_reservation, reservation_id, remote_reservation_id) return reservation_status