class GlobalContainer(object): log_root = "logs" log_level = 30 log_size = 5 * 1024 * 1024 log_number = 10 debug_mode = "True" influx_url = "http://*****:*****@localhost/{db_schema}') cmd = ( f'mysql+mysqlconnector://{self.mysql_user}:{self.mysql_password}@{self.mysql_host}/{self.mysql_db}' ) self.logger.debug(cmd) self.eng = create_engine(cmd) #Base = declarative_base() #base.Base.metadata.bind = eng #base.Base.metadata.create_all() common.base.Base.metadata.create_all(self.eng, checkfirst=True) Session = sessionmaker(bind=self.eng) self.ses = Session() except Exception as e: self.logger.exception('Crash!', exc_info=e) sys.exit(99) def connectInfluxDatabase(self): try: # prepare database self.logger.debug( f'Connecting to Influx with: Host:{self.influx_host}, Port: {self.influx_port}, User: {self.influx_user}, DB: {self.influx_db}' ) if (self.influx_version == 1): pass self.influxClient = DataFrameClient(self.influx_host, self.influx_port, self.influx_user, self.influx_pwd, self.influx_db) elif (self.influx_version == 2): retries = WritesRetry(total=20, backoff_factor=1, exponential_base=1) self.influxClient = InfluxDBClient( url=f"http://{self.influx_host}:{self.influx_port}", token=self.influx_token, org=self.influx_org, retries=retries, timeout=180_000) self.influx_query_api = self.influxClient.query_api() self.influx_write_api = self.influxClient.write_api( write_options=WriteOptions( batch_size=500, write_type=WriteType.synchronous, flush_interval=10_000, jitter_interval=2_000, retry_interval=30_000, max_retries=25, max_retry_delay=60_000, exponential_base=2)) #self.influx_write_api = self.influxClient.write_api(write_options=SYNCHRONOUS) except Exception as e: self.logger.exception('Crash!', exc_info=e) sys.exit(99) def resetDatabases(self): try: self.logger.warning("Resetting Databases") self.resetMySQLDatabases() self.resetInfluxDatabases() except Exception as e: self.logger.exception('Crash!', exc_info=e) def resetMySQLDatabases(self): try: self.logger.warning("Resetting MySQL-Database") #Base = declarative_base() common.base.Base.metadata.drop_all(self.eng, checkfirst=True) common.base.Base.metadata.create_all(self.eng, checkfirst=True) except Exception as e: self.logger.exception('Crash!', exc_info=e) def resetInfluxDatabases(self): try: self.logger.warning("Resetting Influx-Database") if (self.influx_version == 1): self.influxClient.drop_database(self.influx_db) self.influxClient.create_database(self.influx_db) else: with InfluxDBClient( url=f"http://{self.influx_host}:{self.influx_port}", token=self.influx_token, org=self.influx_org, timeout=180_000) as client: buckets_api = client.buckets_api() my_bucket = buckets_api.find_bucket_by_name(self.influx_db) if (my_bucket is not None): buckets_api.delete_bucket(my_bucket) org_name = self.influx_org org = list( filter( lambda it: it.name == org_name, self.influxClient.organizations_api(). find_organizations()))[0] retention_rules = BucketRetentionRules( type="forever", every_seconds=0, shard_group_duration_seconds=60 * 60 * 24 * 90) #3600*24*365*200 created_bucket = buckets_api.create_bucket( bucket_name=self.influx_db, retention_rules=retention_rules, org_id=org.id) except Exception as e: self.logger.exception('Crash!', exc_info=e) sys.exit(-99) def writeJobStatus(self, Status, StartDate=None, EndDate=None, statusMessage=None, SuccessDate=None): try: jobStatus = None res = self.ses.query(ScriptStatus).filter( ScriptStatus.Name == self.jobName) if (res.count() == 0): self.logger.debug( f'ScriptStatus {self.jobName} not found, creating...') jobStatus = ScriptStatus(self.jobName) self.ses.add(jobStatus) self.ses.commit() else: jobStatus = res.first() jobStatus.StatusDateTime = datetime.datetime.now() jobStatus.Status = Status if SuccessDate is not None: jobStatus.LastSuccessDateTime = SuccessDate if StartDate is not None: jobStatus.StartDateTime = StartDate if EndDate is not None: jobStatus.EndDateTime = EndDate if statusMessage is not None: jobStatus.StatusMessage = statusMessage jobStatus.ErrorNumbers = self.numErrors jobStatus.ErrorMessage = self.errMsg jobStatus.WarningNumbers = self.numWarnings jobStatus.WarningMessage = self.warnMsg self.ses.add(jobStatus) self.ses.commit() except Exception as e: self.logger.exception('Crash!', exc_info=e) def chunk(self, seq, size): return (seq[pos:pos + size] for pos in range(0, len(seq), size)) def writeJobMessage(self, logType, logObject, logObjectId, message): """ Writes a message into the log table logType: Error, Warning, Info, Debug logObject: Stock, Depot, Script,... LogObjectId: ISIN, Depot-Name,... Message: Message """ try: jobMessage = LogMessage(self.runId, logType, logObject, logObjectId, message) self.ses.add(jobMessage) self.ses.commit() except Exception as e: self.logger.exception('Crash!', exc_info=e) def iQuery(self, qry): """Executes the flow query against the innodb""" loc = locals() logger = logging.getLogger(__name__) res = None try: msg = f"Starting iQuery with {loc}" logger.debug(msg) self.writeJobStatus("Running", statusMessage=msg) with InfluxDBClient( url=f"http://{self.influx_host}:{self.influx_port}", token=self.influx_token, org=self.influx_org, timeout=180_000) as client: res = client.query_api().query_data_frame(qry) self.writeJobStatus("Running", statusMessage=msg + " - DONE") logger.debug(msg + " - DONE") return res except Exception as e: logger.exception(f'Crash iQuery with {loc}!', exc_info=e) self.numErrors += 1 self.errMsg += f"Crash iQuery with {loc}; "