def rename_file(self, old_file: str, new_file: str) -> None: try: os.rename(old_file, new_file) except OSError as e: logger.error(f'Unable to rename {old_file} to {new_file}: {e}') try: os.unlink(old_file) except OSError as e: logger.warning(f'Failed to remove temp. file {old_file}: {e}') raise error('rename_file')
def log_release(self) -> None: application = 'BACKEND-{id}'.format(id=self.id.upper()) now = datetime.now() try: with DB() as db: data: Dict[str, Any] = { 'host_name': fqdn, 'application_name': application } rq = db.querys( db.qselect( oracle= ('SELECT version_number ' 'FROM release_log_tbl ' 'WHERE host_name = :host_name AND application_name = :application_name ' 'ORDER BY startup_timestamp DESC'), mysql= ('SELECT version_number ' 'FROM release_log_tbl ' 'WHERE host_name = :host_name AND application_name = :application_name ' 'ORDER BY startup_timestamp DESC ' 'LIMIT 1')), data) if rq is None or rq.version_number != spec.version: data.update({ 'version_number': spec.version, 'startup_timestamp': now, 'build_time': spec.timestamp, 'build_host': spec.host, 'build_user': spec.user }) count = db.update( 'INSERT INTO release_log_tbl ' ' (host_name, application_name, version_number, startup_timestamp, build_time, build_host, build_user) ' 'VALUES ' ' (:host_name, :application_name, :version_number, :startup_timestamp, :build_time, :build_host, :build_user)', data, commit=True) if count != 1: raise error( f'failed to create new record, expected 1 row, inserted {count} rows' ) except (IOError, error) as e: logger.debug( f'log_release: failed to write to database ({e}), try to spool information' ) log_path = os.path.join(base, 'log') if os.path.isdir(log_path): with open(os.path.join(log_path, 'release.log'), 'a') as fd: fd.write( f'{licence};{fqdn};{application};{spec.version};{now:%Y-%m-%d %H:%M:%S};{spec.timestamp:%Y-%m-%d %H:%M:%S};{spec.host};{spec.user}\n' ) else: logger.debug( f'no path {log_path} exists, no information is written')
def update_config_file(self, new: str) -> None: if new != self.last: temp = '%s.%d' % (self.config_filename, os.getpid()) try: with open(temp, 'w') as fd: fd.write(new) self.rename_file(temp, self.config_filename) self.last = new except IOError as e: logger.error('Unable to write %s %s' % (temp, e)) raise error('update_config_file.open', e)
def rename_file(self, old_file: str, new_file: str) -> None: try: os.rename(old_file, new_file) except OSError as e: logger.error('Unable to rename %s to %s %s' % (old_file, new_file, e)) try: os.unlink(old_file) except OSError as e: logger.warning('Failed to remove temp. file %s %s' % (old_file, e)) raise error('rename_file')
def update_config_file(self, new: str) -> None: if new != self.last: temp = '{config_filename}.{pid}'.format( config_filename=BavUpdate.config_filename, pid=os.getpid()) try: with open(temp, 'w') as fd: fd.write(new) self.rename_file(temp, BavUpdate.config_filename) self.last = new except IOError as e: logger.error(f'Unable to write {temp}: {e}') raise error('update_config_file.open', e)
def use_arguments(self, args: argparse.Namespace) -> None: self.verbose = args.verbose self.id = args.id self.config_filename = args.config_filename self.config_source = args.config_source self.options = {} if args.option: for option in args.option: try: (o, v) = option.split('=', 1) self.options[o] = v except ValueError: raise error(f'option: invalid syntax: {option}') self.parameter = args.parameter
def __init__(self, cfg: Config, id: str, parameter: List[str]) -> None: self.cfg = cfg self.id = id self.ec = 0 self.plugins: Dict[str, PluginService] = {} if len(parameter) == 0: self.command = Process.known_commands[0] self.selective = None else: self.command = parameter[0] if self.command not in Process.known_commands: raise error('Command %s unknown, available commands are %s' % (self.command, ', '.join(Process.known_commands))) self.selective = set(parameter[1:])
def __init__ (self) -> None: self.incoming = syscfg.get_str ('direct-path-incoming', os.path.join (base, 'DIRECT')) self.archive = syscfg.get_str ('direct-path-archive', os.path.join (base, 'ARCHIVE')) self.recover = syscfg.get_str ('direct-path-recover', os.path.join (base, 'RECOVER')) self.queues = syscfg.get_list ('direct-path-queues', ',', Stream (os.listdir (base)) .filter (lambda f: bool (f.startswith ('QUEUE'))) .map (lambda f: os.path.join (base, f)) .filter (lambda p: os.path.isdir (p) and not os.path.isfile (os.path.join (p, '.ignore'))) .list () ) if len (self.queues) == 0: raise error ('No queues for spooling found') self.queues.sort () self.cur = multiprocessing.Value ('i', 0) self.mta = MTA ()
def use_arguments (self, args: argparse.Namespace) -> None: if args.verbose: log.loglevel = logging.DEBUG log.outlevel = logging.DEBUG log.outstream = sys.stderr self.instance = args.instance self.background = args.background self.restart_delay = args.restart_delay self.termination_delay = args.termination_delay self.output = args.output self.prior = args.prior self.limit = args.limit self.command = args.command if not self.command: raise error ('no command to start under watchdog control') if self.instance: if self.instance == '-': log.name = '{basename}-wd'.format (basename = os.path.basename (self.command[0]).split ('.', 1)[0]) else: log.name = self.instance
def executor(self) -> bool: if self.verbose: log.outlevel = logging.DEBUG log.outstream = sys.stderr try: cwd = os.getcwd() except OSError: cwd = '' if base != cwd: try: os.chdir(base) except OSError as e: raise error( f'Failed to chdir to home directory "{base}" from "{cwd}": {e}' ) cfg = Config() cfg.setup_namespace(id=self.id) cfg.enable_substitution() if os.path.isfile(cfg.filename()): cfg.read() if self.config_source is not None: cfg.read(StringIO(self.config_source)) elif self.config_filename is not None: cfg.read(self.config_filename) for (option, value) in self.options.items(): cfg[option] = value service = Service(cfg, self.id, self.parameter) if service.sanity_check(): serivce_filedescriptior_name = 'SVCFD' fd: Optional[int] = None if sys.stderr is not None: try: fd = fcntl.fcntl(sys.stderr.fileno(), fcntl.F_DUPFD) os.environ[serivce_filedescriptior_name] = str(fd) except OSError as e: logger.warning(f'Failed to setup reporting FD: {e}') service.execute() if fd is not None: os.close(fd) del os.environ[serivce_filedescriptior_name] sys.exit(service.ec)
def setup_parameter(self) -> None: #{{{ logger.info('Reading parameter from company info table') cc = CompanyConfig(self.db) cc.read() for (company_id, company_config) in cc.company_info.items(): with Ignore(KeyError): parameter = company_config[self.conversion_name] try: self.config[company_id] = Parameter(parameter) except Exception: logger.exception( 'Failed to parse parameter %r for company_id %d' % (parameter, company_id)) raise error( f'failed to parse parameter {parameter} for {company_id}' ) logger.info('%d parameter read' % len(self.config)) for row in self.db.query( 'SELECT company_id, status, mailtracking FROM company_tbl'): self.companies[row.company_id] = Softbounce.Company( active=row.status == 'active', mailtracking=row.mailtracking == 1)
def recover_mailings(self) -> None: #{{{ if not self.dryrun and self.mailings and self.startup_delay > 0: logger.info('Wait for backend to start up') n = self.startup_delay while n > 0: time.sleep(1) n -= 1 if not self.running: raise error('abort due to process termination') for m in self.mailings: m.collect_seen() if self.dryrun: print('%s: %d recipients already seen' % (self.__mailing_name(m.mailing_id), len(m.seen))) else: m.create_filelist() count = 0 for (total_mails, ) in self.db.query( 'SELECT total_mails FROM mailing_backend_log_tbl WHERE status_id = :sid', {'sid': m.status_id}): if not total_mails is None and total_mails > count: count = total_mails m.set_generated_count(count) self.db.update( 'DELETE FROM mailing_backend_log_tbl WHERE status_id = :sid', {'sid': m.status_id}) self.db.update( 'DELETE FROM world_mailing_backend_log_tbl WHERE mailing_id = :mid', {'mid': m.mailing_id}) self.db.update( 'UPDATE maildrop_status_tbl SET genstatus = 1 WHERE status_id = :sid', {'sid': m.status_id}) self.db.sync() logger.info('Start backend using status_id %d for %s' % (m.status_id, self.__mailing_name(m.mailing_id))) starter = agn3.emm.mailing.Mailing() if not starter.fire(status_id=m.status_id, cursor=self.db.cursor): logger.error('Failed to trigger mailing %d') self.report.append( '%s [%d]: Failed to trigger mailing' % (self.__mailing_name(m.mailing_id), m.mailing_id)) break self.db.sync() self.report.append( '%s [%d]: Start recovery using status_id %d' % (self.__mailing_name(m.mailing_id), m.mailing_id, m.status_id)) if not self.dryrun: query = 'SELECT genstatus FROM maildrop_status_tbl WHERE status_id = :status_id' start = int(time.time()) ok = True last_generation_status = 1 while self.running and m.active and ok: now = int(time.time()) self.db.sync(False) row = self.db.querys(query, {'status_id': m.status_id}) if row is None or row[0] is None: logger.info('Failed to query status for mailing %d' % m.mailing_id) self.report.append( '%s [%d]: Recovery failed due to missing status' % (self.__mailing_name(m.mailing_id), m.mailing_id)) ok = False else: generation_status = row[0] if generation_status != last_generation_status: logger.info( f'Mailings {m.mailing_id} generation status has changed from {last_generation_status} to {generation_status}' ) last_generation_status = generation_status if generation_status == 3: logger.info('Mailing %d terminated as expected' % m.mailing_id) self.report.append( '%s [%d]: Recovery finished' % (self.__mailing_name( m.mailing_id), m.mailing_id)) m.active = False elif generation_status == 2: if m.last: current = 0 for (currentMails, ) in self.db.query( 'SELECT current_mails FROM mailing_backend_log_tbl WHERE status_id = :sid', {'sid': m.status_id}): if not currentMails is None: current = currentMails if current != m.current: logger.debug( f'Mailing {m.mailing_id} has created {current:,d} vs. {m.current:,d} when last checked' ) m.current = current m.last = now else: if (current > 0 and m.last + 1200 < now ) or (current == 0 and m.last + 3600 < now): logger.info( 'Mailing %d terminated due to inactivity after %d mails' % (m.mailing_id, current)) self.report.append( '%s [%d]: Recovery timed out' % (self.__mailing_name( m.mailing_id), m.mailing_id)) ok = False else: m.last = now elif generation_status == 1: if start + 1800 < now: logger.info( 'Mailing %d terminated while not starting up' % m.mailing_id) self.report.append( '%s [%d]: Recovery not started' % (self.__mailing_name( m.mailing_id), m.mailing_id)) ok = False elif generation_status > 3: logger.info( 'Mailing %d terminated with status %d' % (m.mailing_id, generation_status)) self.report.append( '%s [%d]: Recovery ended with unexpected status %d' % (self.__mailing_name(m.mailing_id), m.mailing_id, generation_status)) m.active = False if m.active and ok: if start + 30 * 60 < now: logger.info( 'Failed due to global timeout to recover %d' % m.mailing_id) self.report.append( '%s [%d]: Recovery ended due to global timeout' % (self.__mailing_name( m.mailing_id), m.mailing_id)) ok = False else: time.sleep(1) if not m.active: count = 0 for (total_mails, ) in self.db.query( 'SELECT total_mails FROM mailing_backend_log_tbl WHERE status_id = :sid', {'sid': m.status_id}): if not total_mails is None: count = total_mails count += len(m.seen) self.db.update( 'UPDATE mailing_backend_log_tbl SET total_mails = :cnt, current_mails = :cnt WHERE status_id = :sid', { 'sid': m.status_id, 'cnt': count }) self.db.update( 'UPDATE world_mailing_backend_log_tbl SET total_mails = :cnt, current_mails = :cnt WHERE mailing_id = :mid', { 'mid': m.mailing_id, 'cnt': count }) self.db.sync() if not self.running or not ok: break
def parse (self, log_id: LogID, info: SyslogParser.Info, line: str) -> bool: raise error ('Subclass must implement parse()')
def __enter__(self) -> Activator: if not self.db.isopen(): raise error(f'{Activator.db_path}: failed to open database') return self