def subscribe_and_validate(self, topic, qos, payload, timeout=1): """ Subscribe to a topic and validate that the specified payload is received within timeout. It is required that a connection has been established using `Connect` keyword. The payload can be specified as a python regular expression. If the specified payload is not received within timeout, an AssertionError is thrown. `topic` topic to subscribe to `qos` quality of service for the subscription `payload` payload (message) that is expected to arrive `timeout` time to wait for the payload to arrive Examples: | Subscribe And Validate | test/test | 1 | test message | """ seconds = convert_time(timeout) self._verified = False logger.info('Subscribing to topic: %s' % topic) self._mqttc.subscribe(str(topic), int(qos)) self._payload = str(payload) self._mqttc.on_message = self._on_message timer_start = time.time() while time.time() < timer_start + seconds: if self._verified: break self._mqttc.loop() if not self._verified: raise AssertionError("The expected payload didn't arrive in the topic")
def subscribe(self, topic, qos, timeout=1, limit=1): """ Subscribe to a topic and return a list of message payloads received within the specified time. `topic` topic to subscribe to `qos` quality of service for the subscription `timeout` duration of subscription. Specify 0 to enable background looping (async) `limit` the max number of payloads that will be returned. Specify 0 for no limit Examples: Subscribe and get a list of all messages received within 5 seconds | ${messages}= | Subscribe | test/test | qos=1 | timeout=5 | limit=0 | Subscribe and get 1st message received within 60 seconds | @{messages}= | Subscribe | test/test | qos=1 | timeout=60 | limit=1 | | Length should be | ${messages} | 1 | """ seconds = convert_time(timeout) try: self._messages[topic] = [ i for i in self._messages[topic] if i not in self.msg_tmp[topic] ] except KeyError: self._messages[topic] = [] limit = int(limit) self._subscribed = False logger.info('Subscribing to topic: %s' % topic) self._mqttc.on_subscribe = self._on_subscribe self._mqttc.subscribe(str(topic), int(qos)) self._mqttc.on_message = self._on_message_list if seconds == 0: logger.info('Starting background loop') self._background_mqttc = self._mqttc self._background_mqttc.loop_start() self.msg_tmp = self._messages return self._messages[topic] timer_start = time.time() while time.time() < timer_start + seconds: if limit == 0 or len(self._messages[topic]) < limit: self._mqttc.loop() else: # workaround for client to ack the publish. Otherwise, # it seems that if client disconnects quickly, broker # will not get the ack and publish the message again on # next connect. time.sleep(1) break self.msg_tmp = self._messages return self._messages[topic]
def subscribe(self, topic, qos, timeout=1, limit=1): """ Subscribe to a topic and return a list of message payloads received within the specified time. `topic` topic to subscribe to `qos` quality of service for the subscription `timeout` duration of subscription `limit` the max number of payloads that will be returned. Specify 0 for no limit Examples: Subscribe and get a list of all messages received within 5 seconds | ${messages}= | Subscribe | test/test | qos=1 | timeout=5 | limit=0 | Subscribe and get 1st message received within 60 seconds | @{messages}= | Subscribe | test/test | qos=1 | timeout=60 | limit=1 | | Length should be | ${messages} | 1 | """ seconds = convert_time(timeout) self._messages = [] limit = int(limit) logger.info('Subscribing to topic: %s' % topic) self._mqttc.subscribe(str(topic), int(qos)) self._mqttc.on_message = self._on_message_list timer_start = time.time() while time.time() < timer_start + seconds: if limit == 0 or len(self._messages) < limit: self._mqttc.loop() else: # workaround for client to ack the publish. Otherwise, # it seems that if client disconnects quickly, broker # will not get the ack and publish the message again on # next connect. time.sleep(1) break return self._messages
def __init__(self, loop_timeout=LOOP_TIMEOUT): self._loop_timeout = convert_time(loop_timeout) self._background_mqttc = None self._messages = {} self._username = None self._password = None #self._mqttc = mqtt.Client() self.msg_tmp = {}
def make_a_booking(name_room, day, hour_start, duration): print(name_room, day, hour_start, duration) # connect to the localhost database cnx = mysql.connector.connect(password='******', user="******", database="alex") #day_only : get the parsed date day_only = str(dateparser.parse(day).date()) # parse the hour in string inputed by the user and convert it the a pendulum object hour_start_parsed = dateutil.parser.parse(hour_start, fuzzy_with_tokens=True) pendulum_combined_day_and_hour_start = pendulum.parse(str(day_only) + " " + hour_start, strict=False) # convert the duration in string inputed by the user and to seconds then in minutes duration_in_seconds = convert_time(duration) duration_in_minutes = duration_in_seconds / 60 # add the duration_in_minutes to the starting hour to get the hour start pendulum object pendulum_combined_day_and_hour_end = pendulum_combined_day_and_hour_start.add( minutes=duration_in_minutes) #print(pendulum_combined_day_and_hour_end) # check if the room is available room_available = is_the_room_available( name_room, day_only, pendulum_combined_day_and_hour_start, pendulum_combined_day_and_hour_end, cnx) #if the room isn't available return False # else make the insert intro the database and return True if room_available == False: print( 'Oh wait ..., I just checked and unfortunately this room is not available :-(' ) return False else: print('Hey, I just checked and the room is available :-)') cur_insert_entry = cnx.cursor(buffered=True) query_insert_one = "INSERT INTO reservations (name_room, hour_start, hour_end) VALUES ('%s', '%s', '%s' );" % ( name_room, pendulum_combined_day_and_hour_start, pendulum_combined_day_and_hour_end) #print(" the query is " + str(query_insert_one)) cur_insert_entry.execute(query_insert_one) cnx.commit() cur_insert_entry.close() cnx.close() return True
def subscribe_and_receive1(self, topic, qos, timeout=1): """ Subscribe to a topic and return one message payloads received within the specified time. `topic` topic to subscribe to `qos` quality of service for the subscription `timeout` duration of subscription Examples: Subscribe and get one message received within 5 seconds | ${messages}= | Subscribe | test/test | qos=1 | timeout=5 """ seconds = convert_time(timeout) self._message = "" logger.info('Subscribing to topic: %s' % topic) self._mqttc.subscribe(str(topic), int(qos)) self._mqttc.on_message = self._on_message_one timer_start = time.time() while time.time() < timer_start + seconds: self._mqttc.loop() return json.loads(self._message)
def subscribe_and_validate(self, topic, qos, payload, timeout=1): """ Subscribe to a topic and validate that the specified payload is received within timeout. It is required that a connection has been established using `Connect` keyword. The payload can be specified as a python regular expression. If the specified payload is not received within timeout, an AssertionError is thrown. `topic` topic to subscribe to `qos` quality of service for the subscription `payload` payload (message) that is expected to arrive `timeout` time to wait for the payload to arrive Examples: | Subscribe And Validate | test/test | 1 | test message | """ seconds = convert_time(timeout) self._verified = False logger.info('Subscribing to topic: %s' % topic) self._mqttc.subscribe(str(topic), int(qos)) self._payload = str(payload) self._mqttc.on_message = self._on_message timer_start = time.time() while time.time() < timer_start + seconds: if self._verified: break self._mqttc.loop() if not self._verified: raise AssertionError( "The expected payload didn't arrive in the topic")
def listen(self, topic, timeout=1, limit=1): """ Listen to a topic and return a list of message payloads received within the specified time. Requires an async Subscribe to have been called previously. `topic` topic to listen to `timeout` duration to listen `limit` the max number of payloads that will be returned. Specify 0 for no limit Examples: Listen and get a list of all messages received within 5 seconds | ${messages}= | Listen | test/test | timeout=5 | limit=0 | Listen and get 1st message received within 60 seconds | @{messages}= | Listen | test/test | timeout=60 | limit=1 | | Length should be | ${messages} | 1 | """ timer_start = time.time() while time.time() < timer_start + self._loop_timeout: if self._subscribed: break; time.sleep(1) if not self._subscribed: logger.warn('Cannot listen when not subscribed to a topic') return [] if topic not in self._messages: logger.warn('Cannot listen when not subscribed to topic: %s' % topic) return [] # If enough messages have already been gathered, return them if limit != 0 and len(self._messages[topic]) >= limit: messages = self._messages[topic][:] # Copy the list's contents self._messages[topic] = [] return messages[-limit:] seconds = convert_time(timeout) limit = int(limit) logger.info('Listening on topic: %s' % topic) timer_start = time.time() while time.time() < timer_start + seconds: if limit == 0 or len(self._messages[topic]) < limit: # If the loop is running in the background # merely sleep here for a second or so and continue # otherwise, do the loop ourselves if self._background_mqttc: time.sleep(1) else: self._mqttc.loop() else: # workaround for client to ack the publish. Otherwise, # it seems that if client disconnects quickly, broker # will not get the ack and publish the message again on # next connect. time.sleep(1) break messages = self._messages[topic][:] # Copy the list's contents self._messages[topic] = [] return messages[-limit:] if limit != 0 else messages
def _convert(self, value, explicit_type=True): return convert_time(value, result_format='timedelta')
def __init__(self, loop_timeout=LOOP_TIMEOUT): self._loop_timeout = convert_time(loop_timeout) self._username = None self._password = None
def docker_compose_down(self, timeout: str = None, rmi: str = None, volumes: bool = True, remove_orphans: bool = True) -> None: """ Stops and removes containers, networks, volumes and images created by 'up'. All parameters are forwarded to `docker-compose`. As this function is intended to be used in teardown, to help keep the test environment clean, the following parameters are added to `docker-compose down` by default: - --remove-orphans - --volumes `timeout` Specify shutdown timeout in seconds (default: 10). `rmi` Remove images. Specify 'all' ro remove all images used by any service or 'local' to remove only images that don't have a custom tag set by the 'image' field. `volumes` Remove named volumes declared in the 'volumes' section of the Compose file and anonymous volumes attached to containers. `remove_orphans` Remove containers for services not defined in the Compose file. = Examples = Stop And Remove All Containers And Volumes | Docker Compose Down | Stop And Remove All Containers And Volumes | Docker Compose Down | """ cmd: [str] = self._prepare_base_cmd() cmd.append('down') if self._docker_compose_version >= packaging.version.parse('1.18.0'): if timeout is None: timeout = '10 seconds' cmd.append('--timeout') cmd.append(str(int(convert_time(timeout)))) elif timeout is not None: logger.warn( 'Docker Compose Down: --timeout option' ' is not supported for docker-compose version {}'.format( self._docker_compose_version)) if rmi is not None: cmd.append('--rmi') cmd.append(rmi) if volumes: cmd.append('--volumes') if remove_orphans: cmd.append('--remove-orphans') try: subprocess.check_output(cmd, cwd=self._project_directory, stdin=subprocess.DEVNULL, stderr=subprocess.STDOUT, encoding=sys.getdefaultencoding(), text=True) except subprocess.CalledProcessError as e: raise AssertionError('Failed to shutdown services: {}'.format( e.output.rstrip())) from e
def docker_compose_up(self, timeout: str = '10 seconds', no_deps: bool = False, force_recreate: bool = True, always_recreate_deps: bool = None, no_recreate: bool = False, no_build: bool = False, no_start: bool = False, build: bool = False, renew_anon_volumes: bool = True, remove_orphans: bool = True, service_names: List[str] = None) -> None: """Builds, (re)creates, starts, and attaches to containers for a service. All parameters are forwarded to `docker-compose`. `no_deps` Don't start linked services (default: False). `force_recreate` Recreate containers even if their configuration and images haven't changed (default: False). `always_recreate_deps` Recreate dependent containers (default: False). Incompatible with 'no_recreate`. `no_recreate` If containers already exist, don't recreate `no_build` Don't build an image, even if it's missing (default: False). `no_start` Don't start the services after creating them (default: False). `build` Build images before starting containers (default: False). `renew_anon_volumes` Recreate anonymous volumes instead of retrieving data from the previous containers (default: True). `remove_orphans` Remove containers for services not defined in the Compose file (default: True). `service_names` A list of service names to be started. All services are started by default. = Examples = Start All Services | Docker Compose Up | """ cmd: [str] = self._prepare_base_cmd() cmd.append('up') cmd.append('--timeout') cmd.append(str(int(convert_time(timeout)))) cmd.append('-d') if no_deps: cmd.append('--no-deps') if force_recreate: cmd.append('--force-recreate') if self._docker_compose_version >= packaging.version.parse('1.19.0'): if always_recreate_deps is None or always_recreate_deps is True: cmd.append('--always-recreate-deps') elif always_recreate_deps is not None: logger.warn( 'Docker Compose Up: --always-recreate-deps option' ' is not supported for docker-compose version {}'.format( self._docker_compose_version)) if no_recreate: cmd.append('--no-recreate') if no_build: cmd.append('--no-build') if no_start: cmd.append('--no-start') if build: cmd.append('--build') if self._docker_compose_version >= packaging.version.parse('1.19.0'): if renew_anon_volumes is None or renew_anon_volumes is True: cmd.append('--renew-anon-volumes') elif always_recreate_deps is not None: logger.warn( 'Docker Compose Up: --renew-anon-volumes option' ' is not supported for docker-compose version {}'.format( self._docker_compose_version)) if remove_orphans: cmd.append('--remove-orphans') if service_names is not None: cmd.extend(service_names) try: subprocess.check_output(cmd, cwd=self._project_directory, stdin=subprocess.DEVNULL, stderr=subprocess.STDOUT, encoding=sys.getdefaultencoding(), text=True) except subprocess.CalledProcessError as e: raise AssertionError('Failed to start services: {}'.format( e.output.rstrip())) from e
def __init__(self, loop_timeout=LOOP_TIMEOUT): self._loop_timeout = convert_time(loop_timeout) self._mqttc = None
def _convert_timedelta(self, name, value, explicit_type=True): try: return convert_time(value, result_format='timedelta') except ValueError: return self._handle_error(name, value, 'timedelta', explicit_type)