def template_html_to_pdf( self, template: str = None, filename: str = None, variables: dict = None, create_dirs: bool = True, exists_ok: bool = True, ) -> None: """Use HTML template file to generate PDF file. :param template: filepath to HTML template :param filename: filepath where to save PDF document :param variables: dictionary of variables to fill into template, defaults to {} :param create_dirs: directory structure is created if it is missing, default `True` :param exists_ok: file is overwritten if it exists, default `True` """ required_param([template, filename], "template_html_to_pdf") variables = variables or {} with open(template, "r") as templatefile: html = templatefile.read() for key, value in variables.items(): html = html.replace("{{" + key + "}}", str(value)) target_path = self.output_directory / filename self._write_html_to_pdf(html, target_path, create_dirs, exists_ok)
def mute_run_on_failure(self, keywords: Any = None, optional_keyword_to_run: str = None) -> None: """Set keywords which should not execute `SeleniumLibrary` default behaviour of running keyword on failure. Keyword names do not need to be full names of keywords, ie. all keywords matching even partially will be affected. `Run Keyword` would match all `BuiltIn` library keywords (17 keywords in RF 3.2.1) and of course all `Run Keyword` named keywords in any resource and/or library file which are imported would be matched also. By default `SeleniumLibrary` executes `Capture Page Screenshot` on failure. If `optional_keyword_to_run` is not given then nothing is done on failure, but this can be set to override `SeleniumLibrary` default behaviour for a set of keywords. :param keywords: list of keywords to mute :param optional_keyword_to_run: name of the keyword to execute if keyword defined by `keywords` fail """ required_param(keywords, "mute_run_on_failure") if not isinstance(keywords, list): keywords = [keywords] for keyword in keywords: robotized_keyword = self._robotize_keyword(keyword) if robotized_keyword not in self.KEYWORDS_TO_MUTE: self.KEYWORDS_TO_MUTE.append(robotized_keyword) status, rpabrowser = BuiltIn().run_keyword_and_ignore_error( "get_library_instance", "RPA.Browser") self.rpabrowser_instance = rpabrowser if status == "PASS" else None self.optional_keyword_to_run_failure = optional_keyword_to_run
def tweet(self, content: str = None) -> None: """Make a tweet with content :param content: text for the status update """ required_param(content, "tweet") self.api.update_status(content)
def get_user_tweets(self, username: str = None, count: int = 100) -> list: """Get user tweets :param username: whose tweets to get :param count: maximum number of tweets, defaults to 100 :return: list of user tweets """ required_param(username, "get_user_tweets") tweets = [] try: # Pulling individual tweets from query for tweet in self.api.user_timeline(id=username, count=count): # Adding to list that contains all tweets tw = Tweet( created_at=tweet.created_at, id=tweet.id, tweet_id_str=tweet.id_str, text=tweet.text, in_reply_to_screen_name=tweet.in_reply_to_screen_name, lang=tweet.lang, name=tweet.user.name, screen_name=tweet.user.screen_name, hashtags=[ht["text"] for ht in tweet.entities["hashtags"]], is_truncated=tweet.truncated, favorite_count=tweet.favorite_count, retweeted=tweet.retweeted, retweet_count=tweet.retweet_count, ) tweets.append(tw) except TweepError as e: self.logger.warning("Twitter timeline failed: %s", str(e)) return tweets
def download_files(self, bucket_name: str = None, files: list = None, target_directory: str = None) -> list: """Download files from bucket to local filesystem :param bucket_name: name for the bucket :param files: list of S3 object names :param target_directory: location for the downloaded files, default current directory :return: number of files downloaded """ required_param([bucket_name, files, target_directory], "download_files") client = self._get_client_for_service("s3") download_count = 0 for _, object_name in enumerate(files): try: object_as_path = Path(object_name) download_path = str( Path(target_directory) / object_as_path.name) response = client.download_file(bucket_name, object_name, download_path) if response is None: download_count += 1 except ClientError as e: self.logger.error("Download error with '%s': %s", object_name, str(e)) return download_count
def html_to_pdf( self, content: str = None, filename: str = None, variables: dict = None, create_dirs: bool = True, exists_ok: bool = True, ) -> None: """Use HTML content to generate PDF file. :param content: HTML content :param filename: filepath where to save PDF document :param variables: dictionary of variables to fill into template, defaults to {} :param create_dirs: directory structure is created if it is missing, default `True` :param exists_ok: file is overwritten if it exists, default `True` """ required_param([content, filename], "html_to_pdf") variables = variables or {} html = content.encode("utf-8").decode("latin-1") for key, value in variables.items(): html = html.replace("{{" + key + "}}", str(value)) target_path = self.output_directory / filename self._write_html_to_pdf(html, target_path, create_dirs, exists_ok)
def delete_queue(self, queue_name: str = None): """Delete queue with name :param queue_name: [description], defaults to None :return: delete queue response as dict """ required_param(queue_name, "delete_queue") client = self._get_client_for_service("sqs") response = client.delete_queue(queue_name) return response
def delete_message(self, receipt_handle: str = None): """Delete message in the queue :param receipt_handle: message handle to delete :return: delete message response as dict """ required_param(receipt_handle, "delete_message") client = self._get_client_for_service("sqs") response = client.delete_message(QueueUrl=self.queue_url, ReceiptHandle=receipt_handle) return response
def detect_entities(self, text: str = None, lang="en") -> dict: """Inspects text for named entities, and returns information about them :param text: A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters :param lang: language code of the text, defaults to "en" """ required_param(text, "detect_entities") client = self._get_client_for_service("comprehend") response = client.detect_entities(Text=text, LanguageCode=lang) return response
def register_protected_keywords(self, names: Any = None) -> None: """Register keywords that are not going to be logged into Robot Framework logs. :param names: list of keywords to protect """ required_param(names, "register_protected_keywords") if not isinstance(names, list): names = [names] for name in names: robotized_keyword = self._robotize_keyword(name) if robotized_keyword not in self.KEYWORDS_TO_PROTECT: self.KEYWORDS_TO_PROTECT.append(robotized_keyword)
def only_info_level(self, names: Any = None): """Register keywords that are allowed only INFO level logging :param names: list of keywords to protect """ required_param(names, "only_info_level") if not isinstance(names, list): names = [names] for name in names: robotized_keyword = self._robotize_keyword(name) if robotized_keyword not in self.INFO_LEVEL_KEYWORDS: self.INFO_LEVEL_KEYWORDS.append(robotized_keyword)
def get_user_profile(self, username: str = None) -> dict: """Get user's Twitter profile :param username: whose profile to get :return: profile as dictionary """ required_param(username, "get_user_profile") try: profile = self.api.get_user(username) return profile._json # pylint: disable=W0212 except TweepError: return None
def unfollow(self, user: str = None) -> bool: """Unfollow Twitter user :param user: screen name of the user :return: `True` if user was followed, `False` if not """ required_param(user, "unfollow") try: self.api.destroy_friendship(user) return True except TweepError: self.logger.warning("Could not unfollow user: %s", user) return False
def only_info_level(self, names: Union[str, List] = None): """Register keywords that are allowed only INFO level logging :param names: list of keywords to protect """ required_param(names, "only_info_level") if not isinstance(names, list): names = [names] for name in names: normalized = self._normalize(name) if normalized not in self.INFO_LEVEL_KEYWORDS: self.INFO_LEVEL_KEYWORDS.append(normalized)
def delete_bucket(self, bucket_name: str = None) -> bool: """Delete S3 bucket with name :param bucket_name: name for the bucket :return: boolean indicating status of operation """ required_param(bucket_name, "delete_bucket") client = self._get_client_for_service("s3") try: response = client.delete_bucket(Bucket=bucket_name) return response["ResponseMetadata"]["HTTPStatusCode"] == 204 except ClientError as e: self.logger.error(e) return False
def list_files(self, bucket_name) -> list: """List files in the bucket :param bucket_name: name for the bucket :return: list of files """ required_param(bucket_name, "list_files") client = self._get_client_for_service("s3") files = [] try: response = client.list_objects_v2(Bucket=bucket_name) files = response["Contents"] if "Contents" in response else [] except ClientError as e: self.logger.error(e) return files
def detect_sentiment(self, text: str = None, lang="en") -> dict: """Inspects text and returns an inference of the prevailing sentiment :param text: A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters :param lang: language code of the text, defaults to "en" """ required_param(text, "detect_sentiment") client = self._get_client_for_service("comprehend") response = client.detect_sentiment(Text=text, LanguageCode=lang) return { "Sentiment": response["Sentiment"] if "Sentiment" in response else False, "Score": response["SentimentScore"] if "SentimentScore" in response else False, }
def upload_file( self, bucket_name: str = None, filename: str = None, object_name: str = None ) -> tuple: """Upload single file into bucket :param bucket_name: name for the bucket :param filename: filepath for the file to be uploaded :param object_name: name of the object in the bucket, defaults to None :return: tuple of upload status and error If `object_name` is not given then basename of the file is used as `object_name`. """ required_param([bucket_name, filename], "upload_file") if object_name is None: object_name = Path(filename).name return self._s3_upload_file(bucket_name, filename, object_name)
def unlike(self, tweet: Tweet = None) -> bool: """Unlike a tweet :param tweet: as a class `Tweet` :return: `True` if Tweet was unliked, `False` if not """ required_param(tweet, "unlike") try: self.api.destroy_favorite(tweet.id) return True except TweepError: self.logger.warning( 'Could not unlike tweet "%s" by user "%s"', tweet.text, tweet.screen_name, ) return False
def template_html_to_pdf(self, template: str = None, filename: str = None, variables: dict = None) -> None: """Use HTML template file to generate PDF file. :param template: filepath to HTML template :param filename: filepath where to save PDF document :param variables: dictionary of variables to fill into template, defaults to {} """ required_param([template, filename], "template_html_to_pdf") variables = variables or {} with open(template, "r") as templatefile: html = templatefile.read() for key, value in variables.items(): html = html.replace("{{" + key + "}}", str(value)) self._write_html_to_pdf(html, self.output_directory / filename)
def send_message(self, message: str = None, message_attributes: dict = None) -> dict: """Send message to the queue :param message: body of the message :param message_attributes: attributes of the message :return: send message response as dict """ required_param(message, "send_message") client = self._get_client_for_service("sqs") if message_attributes is None: message_attributes = {} response = client.send_message( QueueUrl=self.queue_url, DelaySeconds=10, MessageAttributes=message_attributes, MessageBody=message, ) return response
def mute_run_on_failure(self, keywords: Union[str, List] = None, optional_keyword_to_run: str = None) -> None: """Set keywords which should not execute `SeleniumLibrary` default behaviour of running keyword on failure. :param keywords: list of keywords to mute :param optional_keyword_to_run: name of the keyword to execute if keyword defined by `keywords` fail Keyword names do not need to be full names of keywords, ie. all keywords matching even partially will be affected. `Run Keyword` would match all `BuiltIn` library keywords (17 keywords in RF 3.2.1) and of course all `Run Keyword` named keywords in any resource and/or library file which are imported would be matched also. By default `SeleniumLibrary` executes `Capture Page Screenshot` on failure. If `optional_keyword_to_run` is not given then nothing is done on failure, but this can be set to override `SeleniumLibrary` default behaviour for a set of keywords. """ if not self._is_robot_running(): raise RuntimeError("Not supported outside Robot Framework") required_param(keywords, "mute_run_on_failure") if not isinstance(keywords, list): keywords = [keywords] for keyword in keywords: normalized = self._normalize(keyword) if normalized not in self.KEYWORDS_TO_MUTE: self.KEYWORDS_TO_MUTE.append(normalized) for library in ("RPA.Browser", "RPA.Browser.Selenium"): status, instance = BuiltIn().run_keyword_and_ignore_error( "get_library_instance", library) if status == "PASS": self.muted_optionals.append( (instance, optional_keyword_to_run))
def html_to_pdf( self, content: str = None, filename: str = None, variables: dict = None, ) -> None: """Use HTML content to generate PDF file. :param content: HTML content :param filename: filepath where to save PDF document :param variables: dictionary of variables to fill into template, defaults to {} """ required_param([content, filename], "html_to_pdf") variables = variables or {} html = content.encode("utf-8").decode("latin-1") for key, value in variables.items(): html = html.replace("{{" + key + "}}", str(value)) self._write_html_to_pdf(html, self.output_directory / filename)
def upload_files(self, bucket_name: str = None, files: list = None) -> list: """Upload multiple files into bucket :param bucket_name: name for the bucket :param files: list of files (2 possible ways, see above) :return: number of files uploaded Giving files as list of filepaths: ['/path/to/file1.txt', '/path/to/file2.txt'] Giving files as list of dictionaries (including filepath and object name): [{'filepath':'/path/to/file1.txt', 'object_name': 'file1.txt'}, {'filepath': '/path/to/file2.txt', 'object_name': 'file2.txt'}] """ required_param([bucket_name, files], "upload_files") upload_count = 0 for _, item in enumerate(files): filepath = None object_name = None if isinstance(item, dict): filepath = item["filepath"] object_name = item["object_name"] elif isinstance(item, str): filepath = item object_name = Path(item).name else: error = "incorrect input format for files" if filepath and object_name: uploaded, error = self._s3_upload_file(bucket_name, filepath, object_name) if uploaded: upload_count += 1 if error: self.logger.warning("File upload failed with error: %s", error) return upload_count
def delete_files(self, bucket_name: str = None, files: list = None): """Delete files in the bucket :param bucket_name: name for the bucket :param files: list of files to delete :return: number of files deleted or `False` """ required_param(bucket_name, "delete_files") if not files: self.logger.warning( "Parameter `files` is empty. There is nothing to delete.") return False if not isinstance(files, list): files = files.split(",") client = self._get_client_for_service("s3") try: objects = {"Objects": [{"Key": f} for f in files]} response = client.delete_objects(Bucket=bucket_name, Delete=objects) return len(response["Deleted"]) if "Deleted" in response else 0 except ClientError as e: self.logger.error(e) return False
def text_search_tweets( self, query: str = None, count: int = 100, geocode: str = None, lang: str = None, locale: str = None, result_type: str = "mixed", until: str = None, since_id: str = None, max_id: str = None, ) -> list: """Search tweets defined by search query Results types: - mixed : include both popular and real time results in the response - recent : return only the most recent results in the response - popular : return only the most popular results in the response :param query: search query string of 500 characters maximum, including operators :param count: maximum number of tweets, defaults to 100 :param geocode: tweets by users located within a given radius of the given latitude/longitude :param lang: language code of tweets :param locale: language of the query you are sending :param result_type: type of search results you would prefer to receive, default "mixed" :param until: tweets created before the given date :param since_id: Returns only statuses with an ID greater than :param max_id: only statuses with an ID less than :return: list of matching tweets """ required_param(query, "text_search_tweets") tweets = [] try: # Pulling individual tweets from query for tweet in self.api.search( q=query, count=count, geocode=geocode, lang=lang, locale=locale, result_type=result_type, until=until, since_id=since_id, max_id=max_id, ): tw = Tweet( created_at=tweet.created_at, id=tweet.id, tweet_id_str=tweet.id_str, text=tweet.text, in_reply_to_screen_name=tweet.in_reply_to_screen_name, lang=tweet.lang, name=tweet.user.name, screen_name=tweet.user.screen_name, hashtags=[ht["text"] for ht in tweet.entities["hashtags"]], is_truncated=tweet.truncated, favorite_count=tweet.favorite_count, retweeted=tweet.retweeted, retweet_count=tweet.retweet_count, ) tweets.append(tw) except TweepError as e: self.logger.warning("Twitter search failed: %s", str(e)) return tweets