def __init__(self, threadID: int, name: str): threading.Thread.__init__(self) self.threadID = threadID self.name = name self.logger: logging.Logger = get_logger(__name__) self.INFO_QUIET: int = main_config.INFO_QUIET self.VERBOSE: int = main_config.VERBOSE self.host_name = "0.0.0.0" self.port = 8930
def __init__(self, request: bytes, client_address: Tuple[str, int], server: socketserver.BaseServer): self.logger: logging.Logger = get_logger(__name__) self.INFO_QUIET: int = main_config.INFO_QUIET self.VERBOSE: int = main_config.VERBOSE # TODO: Implement Global Handle On Repo # Initiate Repo For Server self.repo: Optional[Dolt] = None self.initRepo(path=config.ARCHIVE_TWEETS_REPO_PATH, create=False, url=config.ARCHIVE_TWEETS_REPO_URL) super().__init__(request, client_address, server)
def __init__(self, threadID: int, name: str, threadLock: threading.Lock, requested_wait_time: int = 60, commit: bool = True): threading.Thread.__init__(self) self.threadID = threadID self.name = name self.logger: logging.Logger = get_logger(__name__) self.INFO_QUIET: int = main_config.INFO_QUIET self.VERBOSE: int = main_config.VERBOSE self.repo: Optional[Dolt] = None # Thread Lock To Share With Rover self.threadLock = threadLock # Setup Repo self.initRepo(path=config.ARCHIVE_TWEETS_REPO_PATH, create=False, url=config.ARCHIVE_TWEETS_REPO_URL) # Setup For Twitter API with open(config.CREDENTIALS_FILE_PATH, "r") as file: credentials = json.load(file) # Token token: BearerAuth = BearerAuth(token=credentials['BEARER_TOKEN']) # Twitter API V2 and Potential Alt Auth if "ALT_BEARER_TOKEN" in credentials: alt_token: BearerAuth = BearerAuth( token=credentials['ALT_BEARER_TOKEN']) self.twitter_api: TweetAPI2 = TweetAPI2(auth=token, alt_auth=alt_token) else: self.twitter_api: TweetAPI2 = TweetAPI2(auth=token) # Wait Time Remaining self.requested_wait_time = requested_wait_time self.wait_time: Optional[int] = None # Should Commit Data (For Debugging) self.commit: bool = commit # Media Threads self.media_threads: List[int] = []
def __init__(self, threadID: int, name: str, video_url: str, output_directory: str, callback: classmethod, tweet_id: int): threading.Thread.__init__(self) self.threadID = threadID self.name = name # Logger self.logger: logging.Logger = get_logger(__name__) self.INFO_QUIET: int = main_config.INFO_QUIET self.VERBOSE: int = main_config.VERBOSE # Video URL self.video_url: str = video_url # Output Directory self.output_directory: str = output_directory # Callback self.callback: classmethod = callback self.tweet_id: int = tweet_id
def __init__(self, threadID: int, name: str, threadLock: threading.Lock, requested_wait_time: int = 60, reply: bool = True): threading.Thread.__init__(self) self.threadID = threadID self.name = name self.logger: logging.Logger = get_logger(__name__) self.INFO_QUIET: int = main_config.INFO_QUIET self.VERBOSE: int = main_config.VERBOSE self.status_file: str = config.STATUS_FILE_PATH self.credentials_file: str = config.CREDENTIALS_FILE_PATH # Thread Lock To Share With Archiver self.threadLock = threadLock # Wait Time Remaining self.requested_wait_time = requested_wait_time self.wait_time: Optional[int] = None # For Debugging config.REPLY = reply # TODO: Figure Out How To Automatically Determine This self.user_id: int = config.TWITTER_USER_ID self.user_name: str = config.TWITTER_USER_HANDLE # Debugging Paths self.logger.info("Working Directory: {working_directory}".format( working_directory=config.WORKING_DIRECTORY)) # Setup For Twitter API with open(self.credentials_file, "r") as file: self.__credentials: dict = json.load(file)
from sqlalchemy.engine import Engine from sqlalchemy.dialects.postgresql import insert from sqlalchemy import Table, Column from sqlalchemy.dialects import mysql, postgresql from doltpy.etl.sql_sync.db_tools import DoltAsSourceWriter, get_target_writer_helper from doltpy.core.system_helpers import get_logger from typing import List logger = get_logger(__name__) POSTGRES_TO_DOLT_TYPE_MAPPINGS = { postgresql.CIDR: mysql.VARCHAR(43), postgresql.INET: mysql.VARCHAR(43), postgresql.MACADDR: mysql.VARCHAR(43), postgresql.JSON: mysql.LONGTEXT, postgresql.JSONB: mysql.LONGTEXT, postgresql.ARRAY: mysql.LONGTEXT, postgresql.UUID: mysql.VARCHAR(43), postgresql.BYTEA: mysql.LONGTEXT } def get_target_writer(engine: Engine, update_on_duplicate: bool = True) -> DoltAsSourceWriter: """ Given a psycopg2 connection returns a function that takes a map of tables names (optionally schema prefixed) to list of tuples and writes the list of tuples to the table in question. Each tuple must have the data in the order of the target tables columns sorted lexicographically. :param engine: database connection. :param update_on_duplicate: perform upserts instead of failing on duplicate primary keys :return:
VERBOSE = logging.DEBUG - 1 logging.addLevelName(VERBOSE, "VERBOSE") INFO_QUIET = logging.INFO + 1 logging.addLevelName(INFO_QUIET, "INFO_QUIET") # Argument Parser Setup parser = argparse.ArgumentParser(description='Arguments For Tweet Searcher') parser.add_argument("-log", "--log", help="Set Log Level (Defaults to INFO_QUIET)", dest='logLevel', default='INFO_QUIET', type=str.upper, choices=['VERBOSE', 'DEBUG', 'INFO', 'INFO_QUIET', 'WARNING', 'ERROR', 'CRITICAL']) # Logger logger: logging.Logger = get_logger(__name__) def main(arguments: argparse.Namespace): logger.setLevel(arguments.logLevel) # This Script's Log Level global repo repo = create_repo_if_not_exists("./county-level-votes") create_tables_if_not_exists() parse_and_insert_votes("California", "working/california-18-presidential.csv") parse_and_insert_votes("Nevada", "working/nevada.csv") def create_repo_if_not_exists(path: str) -> Dolt: try:
def __init__(self): # Logger self.logger: logging.Logger = get_logger(__name__)