def __init__(self, module_name): self.module = module_name self.config = Config() self.log = AppLogger(self.config, self.module)
class NifSync(threading.Thread): """Populate and sync change messages from NIF api Currently handles following services and respective entity types: * SynchronizationService/GetChanges3: * Person * Function * Organization * SynchronizationService/GetChangesCompetence2: * Competence * SynchronizationService/GetChangesLicense: * License .. danger:: The club (org_type_id=5) integration users can only use GetChanges3 ( :py:attr:`sync_type` ='changes'). For GetChangesCompetence2 ( :py:attr:`sync_type` ='competence') and GetChangesLicense ( :py:attr:`sync_type` = 'license') an integration user on federation level is required The class will automatically handle when to :py:meth:`populate` and :py:meth:`sync`. .. note:: :py:meth:`_check` is called on init and checks with the api to find last change message for :py:attr:`org_id` and the last message is then used as the initial starting point. :param org_id: The integration user organization id, required :type org_id: int :param login: The full path integration username ('app_id/function_id/username'), required :type login: str :param password: Password, required :type password: str :param created: A datetime string representing creation date of org_id, required :type created: str :param stopper: a threading.Event flag to exit :type stopper: threading.Event :param restart: On True will reset all AppLogger handlers :type restart: bool :param background: Sets the scheduler. Defaults to False and BlockingScheduler :type background: bool :param initial_timedelta: The initial timedelta to use from last change message in ms :type initial_timedelta: int :param overlap_timedelta: A optional timedelta for overlap functions in hours :type overlap_timedelta: int :param lock: The semaphore object, if None uses :py:class:`.FakeSemaphore` :type lock: threading.BoundedSemaphore :param sync_type: The sync type for this user, allowed ``changes``, ``competence`` and ``license``. Defaults to ``changes``. :type sync_type: str :param sync_interval: The interval for the sync scheduler in minutes. Defaults to NIF_SYNC_INTERVAL :type sync_interval: int :param populate_interval: The interval for populating in days. Defaults to NIF_POPULATE_INTERVAL :type populate_interval: int Usage - threading:: from sync import NifSync sync = NifSync(org_id, username, password) sync.start() # sync is of threading.Thread Usage - blocking:: from sync import NifSync sync = NifSync(org_id, username, password) sync.run() # sync starts without thread running Usage - with semaphore:: import threading from sync import NifSync bound_semaphore = threading.BoundedSemaphore(value=10) sync = NifSync(org_id, username, password, lock=bounding_semaphore) sync.start() # sync is of threading.Thread, semaphore has 10 slots .. note:: The usernames and passwords for integration users on club level is stored in integration/users and accessible through :py:mod:` """ def __init__(self, org_id, username, password, created, stopper=False, restart=False, background=False, initial_timedelta=0, overlap_timedelta=0, lock=None, sync_type='changes', sync_interval=NIF_CHANGES_SYNC_INTERVAL, populate_interval=NIF_POPULATE_INTERVAL): self.state = SyncState() # Init thread super().__init__(name='klubb-{0}'.format(org_id)) if sync_type in ['changes', 'license', 'competence', 'federation']: self.sync_type = sync_type else: raise Exception('{} is not a valid sync type'.format(sync_type)) self.id = org_id self.username = username self.started = datetime.now() self.sync_errors = 0 # self.sync_errors_max = 3 # Errors in a row! self.sync_interval = sync_interval # minutes self.populate_interval = populate_interval # days self.initial_timedelta = initial_timedelta self.overlap_timedelta = overlap_timedelta self.messages = 0 # Holds number of successfully processed messages self.stopper = stopper self.background = background self.initial_start = None self.from_to = [None, None] self.sync_started = False self.tz_local = tz.gettz(LOCAL_TIMEZONE) self.tz_utc = tz.gettz('UTC') # Init logger self.log = AppLogger(name='klubb-{0}'.format(org_id), stdout=not background, last_logs=100, restart=restart) # No stopper, started directly check for stream resume token! if self.stopper is False: from pathlib import Path resume_token = Path(STREAM_RESUME_TOKEN_FILE) if resume_token.is_file() is not True: self.log.warning( 'No resume token at {}'.format(STREAM_RESUME_TOKEN_FILE)) self.log.warning( 'Requires stream to have or be running and a valid token file' ) if lock is not None and (isinstance(lock, threading.BoundedSemaphore) or isinstance(lock, threading.Semaphore)): self.lock = lock else: self.lock = FakeSemaphore( ) # Be able to run singlethreaded as well # Lungo REST API self.api_integration_url = '%s/integration/changes' % API_URL # Make a startup log entry self.log.debug('[STARTUP]') self.log.debug('Org_id: {0}'.format(org_id)) self.log.debug('Login: {0}'.format(username)) self.log.debug('Pwd: {0}'.format(password)) self.log.debug('Created: {0}'.format(created)) self.log.debug('Skew: {0} seconds'.format( self.initial_timedelta)) self.log.debug('Sync: {0} minutes'.format(self.sync_interval)) self.log.debug('Populate: {0} hours'.format(self.populate_interval)) self.log.debug('Api url: {0}'.format(self.api_integration_url)) # Created self.org_created = dateutil.parser.parse(created) if self.org_created.tzinfo is None or self.org_created.tzinfo.utcoffset( self.org_created) is None: """self.org_created is naive, no timezone we assume CET""" self.org_created = self.org_created.replace(tzinfo=self.tz_local) self.org_id = org_id try: self.nif = NifApiSynchronization(username, password, realm=NIF_REALM, log_file=SYNC_LOG_FILE, test_login=False) except: self.log.exception( 'Sync client creation for {} failed, terminating'.format( username)) # sys.exit(0) raise Exception('Could not create sync client') # Setup job scheduler if self.background: self.scheduler = BackgroundScheduler() self.log.info('Scheduler: BackgroundScheduler') else: self.scheduler = BlockingScheduler() self.log.info('Scheduler: BlockingScheduler') self.job_misfires = 0 self.scheduler.add_listener(self._job_fire, EVENT_JOB_EXECUTED) self.scheduler.add_listener(self._job_misfire, EVENT_JOB_MISSED) self.job = self.scheduler.add_job(self.sync, 'interval', minutes=self.sync_interval, max_instances=1) self.state.set_state(state='finished') def __del__(self): """Destructor, shutdown the scheduler on exit""" try: self.log.debug('Destructor called, terminating thread') except: pass try: if self.scheduler.running is True: self.scheduler.shutdown(wait=False) self.log.debug('Shutting down scheduler') except: self.log.error('Could not shut down scheduler') pass @property def uptime(self) -> (int, int): """Calculate thread uptime :returns uptime: integer tuple (days, seconds) since thread start """ t = datetime.now() - self.started return t.days, t.seconds @property def job_next_run_time(self): if self.scheduler.state == 1: return self.job.next_run_time return None def job_pause(self): if self.scheduler.state == 1: self.job.pause() def job_resume(self): if self.scheduler.state == 1: self.job.resume() @property def scheduler_state(self): return self.scheduler.state def _job_misfire(self, event) -> None: """Callback for failed job execution. Increases :py:attr:`job_misfires` :param event: apcscheduler.Event """ self.job_misfires += 1 def _job_fire(self, event) -> None: """Callback for succesfull job execution. Decreases :py:attr:`job_misfires` :param event: apcscheduler.Event """ if self.job_misfires > 0: self.job_misfires -= 1 def run(self) -> None: """Start the thread, conforms to threading.Thread.start() Calls :py:meth:`._check` which determines wether to run :py:meth:`populate` or start a job with target :py:meth:`sync` """ self.log.debug('[Starting thread]') self._check() def _stopper(self, force=False) -> None: """If stopper is threading event and is set, then terminate""" # Check if too many errors if self.sync_errors >= NIF_SYNC_MAX_ERRORS: self.state.set_state(mode=self.state.mode, state='terminated', reason='too many errors') self._shutdown() # because setting stopper propagates to all! if isinstance(self.stopper, threading.Event): if self.stopper.is_set(): self.log.warning('Stopper is set, terminating thread') self._shutdown() if force is True: self.log.warning('Forcing shutdown, terminating thread') self._shutdown() def _shutdown(self) -> None: """Shutdown in an orderly fashion""" if self.scheduler.state > 0: try: self.log.debug('Shutting down scheduler') if self.scheduler.running is True: self.scheduler.shutdown(wait=False) # wait=False except Exception as e: self.log.exception('Error shutting down scheduler') self.log.exception('[TERMINATING]') # Terminate instance/thread sys.exit(0) def _check(self) -> None: """Checks to decide to populate or sync on startup .. danger:: On errors from the api, calls :py:meth:`_stopper(force=True)` which will terminate thread. """ self.state.set_state(mode='checking', state='running') # @TODO: check if in changes/stream - get last, then use last date retrieved as start_date (-1microsecond) changes = requests.get( '%s?where={"_org_id":%s, "_realm":"%s"}&sort=[("sequence_ordinal", -1)]&max_results=1' % (self.api_integration_url, self.org_id, NIF_REALM), headers=API_HEADERS) if changes.status_code == 404: # populate, should not happen! # self.populate() self.log.error('404 from {0}, terminating'.format( self.api_integration_url)) self._stopper(force=True) elif changes.status_code == 200: r = changes.json() c = r['_items'] if len(c) == 0: self.log.debug('No change records, populating') self.populate() elif len(c) == 1: # Check date then decide to populate or not! self.log.debug('Got last change records, checking times') sequential_ordinal = dateutil.parser.parse( c[0]['sequence_ordinal']).replace(tzinfo=self.tz_utc) self.log.debug('Last change message recorded {0}'.format( sequential_ordinal.astimezone(self.tz_local).isoformat())) self.initial_start = sequential_ordinal + timedelta( seconds=self.initial_timedelta) - timedelta( hours=self.overlap_timedelta) if self.initial_start.tzinfo is None or self.initial_start.tzinfo.utcoffset( self.initial_start) is None: self.initial_start = self.initial_start.replace( self.tz_local) if self.initial_start < datetime.utcnow().replace( tzinfo=self.tz_utc) - timedelta( hours=self.populate_interval): """More than 30 days!""" self.log.debug('More than {} days, populating'.format( self.populate_interval)) self.populate() self.state.set_state(mode='populate', state='initialized') else: self.log.debug('Less than {} hours, syncing'.format( self.populate_interval)) self.job.modify(next_run_time=datetime.now()) self.log.debug('Told job to start immediately') self.log.debug('Starting sync scheduler') self.scheduler.start() self.state.set_state(mode='sync', state='started') else: self.log.error('{0} from {1}, terminating'.format( changes.status_code, self.api_integration_url)) sys.exit() def _eve_fix_sync(self, o) -> dict: """Just make soap response simpler .. danger:: Deferred. Uses :py:class:`typings.changes.Changes` instead. """ if o['Changes'] is None: o['Changes'] = [] elif 'ChangeInfo' in o['Changes']: o['Changes'] = o['Changes']['ChangeInfo'] for key, value in enumerate(o['Changes']): if o['Changes'][key]['MergeResultOf'] is None: o['Changes'][key]['MergeResultOf'] = [] elif 'int' in o['Changes'][key]['MergeResultOf']: o['Changes'][key]['MergeResultOf'] = o['Changes'][key][ 'MergeResultOf']['int'] else: o['Changes'][key]['MergeResultOf'] = [] o['_org_id'] = self.org_id return o def _update_changes(self, changes) -> None: """Update change message .. note:: Creates a custom unique '_ordinal' for each change message before trying to insert into api. The purpose is to let it gracefully fail with a http 422 if the change message already exists in the api:: sha224(bytearray(entity_type, id, sequence_ordinal, org_id)) :param changes: list of change messages :type changes: :py:class:`typings.changes.Changes` """ for v in changes: v['_ordinal'] = hashlib.sha224( bytearray( "%s%s%s%s" % (v['entity_type'], v['id'], v['sequence_ordinal'], self.org_id), 'utf-8')).hexdigest() # bytearray("%s%s%s%s" % (self.org_id, v['EntityType'], v['Id'], v['sequence_ordinal']), 'utf-8')).hexdigest() v['_status'] = 'ready' # ready -> running -> finished v['_org_id'] = self.org_id v['_realm'] = NIF_REALM r = requests.post(self.api_integration_url, data=json.dumps(v, cls=EveJSONEncoder), headers=API_HEADERS) if r.status_code == 201: self.log.debug( 'Created change message for {0} with id {1}'.format( v['entity_type'], v['id'])) self.messages += 1 elif r.status_code == 422: self.log.debug('422 {0} with id {1} already exists'.format( v['entity_type'], v['id'])) else: self.log.error( '{0} - Could not create change message for {1} with id {2}' .format(r.status_code, v['entity_type'], v['id'])) self.log.error(r.text) def _get_change_messages(self, start_date, end_date, resource) -> None: """Use NIF GetChanges3""" # To avoid future date? time.sleep(NIF_SYNC_DELAY) if resource == 'changes': status, changes = self.nif.get_changes( start_date.astimezone(self.tz_local), end_date.astimezone(self.tz_local)) elif resource == 'competence': status, changes = self.nif.get_changes_competence( start_date.astimezone(self.tz_local), end_date.astimezone(self.tz_local)) elif resource == 'license': status, changes = self.nif.get_changes_license( start_date.astimezone(self.tz_local), end_date.astimezone(self.tz_local)) elif resource == 'federation': status, changes = self.nif.get_changes_federation( start_date.astimezone(self.tz_local), end_date.astimezone(self.tz_local)) else: raise Exception('Resource gone bad, {}'.format(resource)) if status is True: self.log.debug('Got {} changes for {}'.format( len(changes), resource)) if len(changes) > 0: self._update_changes(changes) else: self.log.debug('Empty change messages list') """ try: self.log.debug('Got {} changes for {}'.format(len(changes), resource)) if len(changes) > 0: self._update_changes(changes) except TypeError: self.log.debug('Empty change messages list (_get_changes3)') except Exception as e: self.log.exception('Unknown exception (_get_changes3)') """ else: self.log.error('GetChanges returned error: {0} - {1}'.format( changes.get('code', 0), changes.get('error', 'Unknown error'))) raise Exception('_get_changes_messages returned an error') def _get_changes(self, start_date, end_date) -> None: """Get change messages based on :py:attr:`.sync_type`""" self.from_to = [start_date, end_date] # Adding extra info try: self._get_change_messages(start_date, end_date, self.sync_type) if self.sync_errors > 0: self.sync_errors -= 1 return True except requests.exceptions.ConnectionError: self.sync_errors += 1 self.log.error('Connection error in _get_changes') # Retry @TODO see if retry should be in populate and sync if NIF_SYNC_MAX_ERRORS >= self.sync_errors: time.sleep(3 * self.sync_errors) self._get_changes(start_date, end_date) except TypeError: self.log.debug('TypeError: Empty change messages list ({})'.format( self.sync_type)) except Exception as e: self.sync_errors += 1 self.log.exception('Exception in _get_changes') # @TODO Need to verify if this is reason to warn somehow?? return False def sync(self) -> None: """This method is the job run by the scheduler when last change message is < NIF_POPULATE_INTERVAL. If a job misfires, then on next run the interval to sync will be twice. .. note:: Checks if :py:attr:`.sync_errors` > :py:attr:`.sync_errors_max` and if so it will set :py:attr:`._stopper` for this thread and will run :py:meth:`._stopper` as it always checks, which in turn calls :py:meth:`._shutdown` and terminates the thread. """ self.state.set_state(mode='sync', state='running') # Check if stopper is set self._stopper() self.log.debug('Getting sync messages') if self.initial_start is not None: end = datetime.utcnow().replace(tzinfo=self.tz_utc) start = self.initial_start + timedelta( seconds=self.initial_timedelta) self.log.debug('From: {0}'.format( start.astimezone(self.tz_local).isoformat())) self.log.debug('To: {0}'.format( end.astimezone(self.tz_local).isoformat())) if end > start: if self._get_changes(start, end): self.initial_start = end else: self.log.error('Inconsistence between dates') else: end = datetime.utcnow().replace(tzinfo=self.tz_utc) self.initial_start = end - timedelta(minutes=self.sync_interval) self.log.debug('From: {0}'.format( self.initial_start.astimezone(self.tz_local).isoformat())) self.log.debug('To: {0}'.format( end.astimezone(self.tz_local).isoformat())) if end > self.initial_start: if self._get_changes(self.initial_start, end): self.initial_start = end else: self.log.error('Inconsistence between dates') self.state.set_state(mode='sync', state='sleeping') def populate(self, sync_after=True) -> None: """Populates change messages from :py:attr:`.org_created` until last change message timedelta is less than :py:attr:`.populate_interval` from which it will exit and start :py:attr:`scheduler`. .. attention:: :py:meth:`populate` requires a slot in the connectionpool. Getting a slot requires acquiring :py:attr:`lock`. Number of slots available is set in :py:mod:`syncdaemon` on startup. """ self.state.set_state(mode='populate', state='initializing') self.log.debug('Populate, interval of {0} hours...'.format( self.populate_interval)) # Initial if self.initial_start is None: end_date = self.org_created else: end_date = self.initial_start start_date = end_date - timedelta(hours=self.populate_interval) # Populate loop while end_date < datetime.utcnow().replace( tzinfo=self.tz_utc) + timedelta(hours=self.populate_interval): # Check stopper self._stopper() # Aquire lock and run! self.log.debug('Waiting for slot in connectionpool...') self.state.set_state(state='waiting', reason='connection pool') with self.lock: # .acquire(blocking=True): self.state.set_state(state='running') # Check stopper, might have waited long time self._stopper() # Overlapping end date if end_date > datetime.utcnow().replace(tzinfo=self.tz_utc): end_date = datetime.utcnow().replace(tzinfo=self.tz_utc) self.log.debug( 'Getting last changes between {0} and {1}'.format( start_date.astimezone(self.tz_local).isoformat(), end_date.astimezone(self.tz_local).isoformat())) if self._get_changes(start_date, end_date) is True: # Last populate break # Break while else: self.log.debug( 'Getting changes between {0} and {1}'.format( start_date.astimezone(self.tz_local).isoformat(), end_date.astimezone(self.tz_local).isoformat())) if self._get_changes(start_date, end_date) is True: # Next iteration start_date = end_date end_date = end_date + timedelta( hours=self.populate_interval) time.sleep(0.1) # Grace before we release lock # Since last assignment do not work, use last end_date = start_date for last iteration self.initial_start = start_date self.state.set_state(mode='populate', state='finished', reason='ended populate') if sync_after is True: self.log.debug('Starting sync scheduler...') self.scheduler.start() self.state.set_state(mode='sync', state='started', reason='starting after populate')
class Base(object): STATUS = { 'WAITING': 0, 'QUEUED': 10, 'PROCESSING': 20, 'SUCCESS': 30, 'SKIPPED': 40, 'UNSTABLE': 50, 'TIMEOUT': 60, 'CANCELLED': 70, 'FAILED': 80 } def __init__(self, module_name): self.module = module_name self.config = Config() self.log = AppLogger(self.config, self.module) def command(self, cmd, working_dir, script=False): self.log.info('Executing command: {0}\nDir: {1}'.format( cmd, working_dir)) if script: self.log.debug('Executing user command') return self.__exec_user_command(cmd, working_dir) else: self.log.debug('Executing system command') self.__exec_system_command(cmd, working_dir) def __exec_system_command(self, cmd, working_dir): self.log.debug('System command runner \nCmd: {0}\nDir: {1}'.format( cmd, working_dir)) cmd = '{0} 2>&1'.format(cmd) self.log.info('Running {0}'.format(cmd)) proc = None try: proc = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, cwd=working_dir, env=os.environ.copy(), universal_newlines=True) stdout, stderr = proc.communicate() returncode = proc.returncode if returncode != 0: command_status = self.STATUS['FAILED'] self.log.debug(stdout) self.log.debug(stderr) else: self.log.debug('System command completed {0}\nOut:{1}'.format( cmd, stdout)) command_status = self.STATUS['SUCCESS'] #self.log.log_command_op(stdout) self.log.debug(stdout) self.log.debug('Command completed {0}'.format(cmd)) except Exception as exc: error_message = 'Error running system command. Err: {0}'.format(exc) self.log.error(error_message) trace = traceback.format_exc() self.log.error(exc) self.log.error(trace) raise Exception(error_message) self.log.debug('Returning command status: {0}'.format(command_status)) return command_status def __exec_user_command(self, cmd, working_dir): self.log.debug('Executing streaming command {0}'.format(cmd)) current_step_state = self.STATUS['FAILED'] command_thread_result = { 'success': False, 'returncode': None } command_thread = threading.Thread( target=self.__command_runner, args=(cmd, working_dir, command_thread_result,)) command_thread.start() self.log.debug('Waiting for command thread to complete') command_thread.join(self.config['MAX_COMMAND_SECONDS']) self.log.debug('Command thread join has returned. Result: {0}'\ .format(command_thread_result)) if command_thread.is_alive(): self.log.log_command_err('Command timed out') self.log.error('Command thread is still running') is_command_success = False current_step_state = self.STATUS['TIMEOUT'] self.log.log_command_err('Command thread timed out') else: self.log.debug('Command completed {0}'.format(cmd)) is_command_success = command_thread_result['success'] if is_command_success: self.log.debug('command executed successfully: {0}'.format(cmd)) current_step_state = self.STATUS['SUCCESS'] else: error_message = 'Command failed : {0}'.format(cmd) exception = command_thread_result.get('exception', None) if exception: error_message += '\nException {0}'.format(exception) self.log.error(error_message) current_step_state = self.STATUS['FAILED'] self.log.error(error_message) self.log.flush_console_buffer() return current_step_state def __command_runner(self, cmd, working_dir, result): self.log.debug('command runner \nCmd: {0}\nDir: {1}'.format( cmd, working_dir)) cmd = '{0} 2>&1'.format(cmd) self.log.info('Running {0}'.format(cmd)) proc = None success = False try: proc = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, cwd=working_dir, env=os.environ.copy(), universal_newlines=True) exception = 'Invalid or no script tags received' current_group_info = None current_group_name = None current_cmd_info = None for line in iter(proc.stdout.readline, ''): self.log.debug(line) line_split = line.split('|') if line.startswith('__SH__GROUP__START__'): current_group_info = line_split[1] current_group_name = '|'.join(line_split[2:]) current_group_info = json.loads(current_group_info) show_group = current_group_info.get('is_shown', True) if show_group == 'false': show_group = False console_out = { 'consoleId': current_group_info.get('id'), 'parentConsoleId': '', 'type': 'grp', 'message': current_group_name, 'msgTimestamp': self.__get_timestamp(), 'completed': False, 'isShown': show_group } self.log.append_console_buffer(console_out) elif line.startswith('__SH__CMD__START__'): current_cmd_info = line_split[1] current_cmd_name = '|'.join(line_split[2:]) current_cmd_info = json.loads(current_cmd_info) parent_id = current_group_info.get('id') if current_group_info else None console_out = { 'consoleId': current_cmd_info.get('id'), 'parentConsoleId': parent_id, 'type': 'cmd', 'message': current_cmd_name, 'msgTimestamp': self.__get_timestamp(), 'completed': False } if parent_id: self.log.append_console_buffer(console_out) elif line.startswith('__SH__CMD__END__'): current_cmd_end_info = line_split[1] current_cmd_end_name = '|'.join(line_split[2:]) current_cmd_end_info = json.loads(current_cmd_end_info) parent_id = current_group_info.get('id') if current_group_info else None is_completed = False if current_cmd_end_info.get('completed') == '0': is_completed = True console_out = { 'consoleId': current_cmd_info.get('id'), 'parentConsoleId': parent_id, 'type': 'cmd', 'message': current_cmd_end_name, 'msgTimestamp': self.__get_timestamp(), 'completed': is_completed } if parent_id: self.log.append_console_buffer(console_out) elif line.startswith('__SH__GROUP__END__'): current_grp_end_info = line_split[1] current_grp_end_name = '|'.join(line_split[2:]) current_grp_end_info = json.loads(current_grp_end_info) is_completed = False if current_grp_end_info.get('completed') == '0': is_completed = True console_out = { 'consoleId': current_group_info.get('id'), 'parentConsoleId': '', 'type': 'grp', 'message': current_grp_end_name, 'msgTimestamp': self.__get_timestamp(), 'completed': is_completed } self.log.append_console_buffer(console_out) elif line.startswith('__SH__SCRIPT_END_SUCCESS__'): success = True break elif '__SH__ARCHIVE_END__' in line: success = True break elif line.startswith('__SH__SCRIPT_END_FAILURE__'): if current_group_info: console_out = { 'consoleId': current_group_info.get('id'), 'parentConsoleId': '', 'type': 'grp', 'message': current_group_name, 'msgTimestamp': self.__get_timestamp(), 'completed': False } self.log.append_console_buffer(console_out) success = False exception = 'Script failure tag received' break else: parent_id = current_cmd_info.get('id') if current_cmd_info else None console_out = { 'consoleId': str(uuid.uuid4()), 'parentConsoleId': parent_id, 'type': 'msg', 'message': line, 'msgTimestamp': self.__get_timestamp(), 'completed': False } if parent_id: self.log.append_console_buffer(console_out) else: self.log.debug(console_out) proc.kill() if success == False: self.log.debug('Command failure') result['returncode'] = 99 result['success'] = False result['exception'] = exception else: self.log.debug('Command successful') result['returncode'] = 0 result['success'] = True except Exception as exc: self.log.error('Exception while running command: {0}'.format(exc)) trace = traceback.format_exc() self.log.error(trace) result['returncode'] = 98 result['success'] = False result['exception'] = trace self.log.info('Command returned {0}'.format(result['returncode'])) def __get_timestamp(self): return int(time.time() * 1000000) def pop_step(self, execute_plan, step): self.log.debug('popping the top of stack: {0}'\ .format(execute_plan['steps'])) try: for k in execute_plan['steps'].keys(): if k == step['step_key']: del execute_plan['steps'][k] self.log.debug('popped out top of stack. \n stack {0}'\ .format(execute_plan['steps'])) return execute_plan except Exception as exc: self.log.error('error occurred while poping ' \ 'step: {0}'.format(str(exc))) raise exc def get_top_of_stack(self, execute_plan): error_occurred = False error_message = '' try: self.log.info('inside get_top_of_stack') steps = execute_plan.get('steps', None) if steps is None: error_message = 'No steps found in the execute plan: {0}'\ .format(execute_plan) error_occurred = True return if len(steps) == 0: self.log.info('No steps present in execute plan, returning' \ 'empty TopOfStack') return None keys = [] for k in steps.keys(): keys.append(int(str(k))) self.log.debug('steps keys {0}'.format(keys)) keys.sort() self.log.debug('sorted keys {0}'.format(keys)) current_step_key = str(keys[0]) current_step = steps[current_step_key] current_step['step_key'] = current_step_key return current_step except Exception as exc: error_message = 'Error occurred while trying to get the step' \ ' from execute plan \nError: {0} execute plan: {1}' \ .format(str(exc), execute_plan) error_occurred = True finally: if error_occurred: raise Exception(error_message)
''' This is the example code from APDM's Python SDK with some settings and logging features added ''' import apdm import settings from app_logger import AppLogger logger = AppLogger(__name__) class SensorConfig: @staticmethod def configure(): context = apdm.apdm_ctx_allocate_new_context() try: apdm.apdm_ctx_open_all_access_points(context) streaming_config = apdm.apdm_streaming_config_t() apdm.apdm_init_streaming_config(streaming_config) streaming_config.enable_accel = settings.ENABLE_ACCEL streaming_config.enable_gyro = settings.ENABLE_GYRO streaming_config.enable_mag = settings.ENABLE_MAG streaming_config.wireless_channel_number = settings.WIRELESS_CHANNEL streaming_config.output_rate_hz = settings.SENSOR_OUTPUT_RATE_HZ r = apdm.apdm_ctx_autoconfigure_devices_and_accesspoint_streaming( context, streaming_config) if r != apdm.APDM_OK: error_msg = "Unable to autoconfigure system: {0}".format( apdm.apdm_strerror(r)) logger.logger.error(error_msg) raise Exception(error_msg)
def __init__(self, org_id, username, password, created, stopper=False, restart=False, background=False, initial_timedelta=0, overlap_timedelta=0, lock=None, sync_type='changes', sync_interval=NIF_CHANGES_SYNC_INTERVAL, populate_interval=NIF_POPULATE_INTERVAL): self.state = SyncState() # Init thread super().__init__(name='klubb-{0}'.format(org_id)) if sync_type in ['changes', 'license', 'competence', 'federation']: self.sync_type = sync_type else: raise Exception('{} is not a valid sync type'.format(sync_type)) self.id = org_id self.username = username self.started = datetime.now() self.sync_errors = 0 # self.sync_errors_max = 3 # Errors in a row! self.sync_interval = sync_interval # minutes self.populate_interval = populate_interval # days self.initial_timedelta = initial_timedelta self.overlap_timedelta = overlap_timedelta self.messages = 0 # Holds number of successfully processed messages self.stopper = stopper self.background = background self.initial_start = None self.from_to = [None, None] self.sync_started = False self.tz_local = tz.gettz(LOCAL_TIMEZONE) self.tz_utc = tz.gettz('UTC') # Init logger self.log = AppLogger(name='klubb-{0}'.format(org_id), stdout=not background, last_logs=100, restart=restart) # No stopper, started directly check for stream resume token! if self.stopper is False: from pathlib import Path resume_token = Path(STREAM_RESUME_TOKEN_FILE) if resume_token.is_file() is not True: self.log.warning( 'No resume token at {}'.format(STREAM_RESUME_TOKEN_FILE)) self.log.warning( 'Requires stream to have or be running and a valid token file' ) if lock is not None and (isinstance(lock, threading.BoundedSemaphore) or isinstance(lock, threading.Semaphore)): self.lock = lock else: self.lock = FakeSemaphore( ) # Be able to run singlethreaded as well # Lungo REST API self.api_integration_url = '%s/integration/changes' % API_URL # Make a startup log entry self.log.debug('[STARTUP]') self.log.debug('Org_id: {0}'.format(org_id)) self.log.debug('Login: {0}'.format(username)) self.log.debug('Pwd: {0}'.format(password)) self.log.debug('Created: {0}'.format(created)) self.log.debug('Skew: {0} seconds'.format( self.initial_timedelta)) self.log.debug('Sync: {0} minutes'.format(self.sync_interval)) self.log.debug('Populate: {0} hours'.format(self.populate_interval)) self.log.debug('Api url: {0}'.format(self.api_integration_url)) # Created self.org_created = dateutil.parser.parse(created) if self.org_created.tzinfo is None or self.org_created.tzinfo.utcoffset( self.org_created) is None: """self.org_created is naive, no timezone we assume CET""" self.org_created = self.org_created.replace(tzinfo=self.tz_local) self.org_id = org_id try: self.nif = NifApiSynchronization(username, password, realm=NIF_REALM, log_file=SYNC_LOG_FILE, test_login=False) except: self.log.exception( 'Sync client creation for {} failed, terminating'.format( username)) # sys.exit(0) raise Exception('Could not create sync client') # Setup job scheduler if self.background: self.scheduler = BackgroundScheduler() self.log.info('Scheduler: BackgroundScheduler') else: self.scheduler = BlockingScheduler() self.log.info('Scheduler: BlockingScheduler') self.job_misfires = 0 self.scheduler.add_listener(self._job_fire, EVENT_JOB_EXECUTED) self.scheduler.add_listener(self._job_misfire, EVENT_JOB_MISSED) self.job = self.scheduler.add_job(self.sync, 'interval', minutes=self.sync_interval, max_instances=1) self.state.set_state(state='finished')
# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the LICENSE.md file for more details. # # You should have received a copy of the GNU General Public License # along with this program (the LICENSE.md file). If not, see <http://www.gnu.org/licenses/>. # import tkinter as tk # In python2 it's Tkinter from PIL import Image, ImageTk from itertools import count from app_logger import AppLogger # Logger init the_app_logger = AppLogger("lumiclock") logger = the_app_logger.getAppLogger() class AnimatedGIFLabel(tk.Label): """ a label that displays images, and plays them if they are GIFs Adapted from the following SO article https://stackoverflow.com/questions/43770847/play-an-animated-gif-in-python-with-tkinter """ def __init__(self, parent, **args): tk.Label.__init__(self, parent, **args) self.loc = 0 self.im = None self.frames = [] self.delay = 100
import sqlite3 import argparse import yaml import pathlib from pathlib import Path from flask_sqlalchemy import SQLAlchemy # Import parent libs from app_logger import AppLogger from config_manager import ConfigManager from utils import AppUtils logger = AppLogger.get_logger(__name__) DEFAULT_DB = "data.db" db = SQLAlchemy() class DbInit: @staticmethod def users(): with DbConnection() as db: logger.info("Initializing users") create_table = "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, username text, password text, email text)" # INTEGER PRIMARY KEY allows Auto incrementing id db.cursor.execute(create_table) @staticmethod def items(): with DbConnection() as db: logger.info("Initializing items") create_table = "CREATE TABLE IF NOT EXISTS items (id INTEGER PRIMARY KEY, name text, price real)" # INTEGER PRIMARY KEY allows Auto incrementing id
class PyroService: """A RPC interface based upon Pyro4 :param workers_stop: The stopper for all worker threads :type workers_stop: threading.Event :param pyro_stop: The stopper for the RPC interface :type pyro_stop: threading.Event :param workers_started: A flag for signalling if workers are started :type workers_started: threading.Event :param daemon: The daemon instance of the RPC interface :type daemon: Pyro4.Daemon """ def __init__(self, workers_stop, pyro_stop, workers_started, daemon): self.workers_stop = workers_stop self.workers_started = workers_started self.pyro_stop = pyro_stop self.daemon = daemon self.work = None self.log = AppLogger(name='syncdaemon') self.log.info('Pyro service started, ready for commands') # self.start_workers() def pyro_status(self) -> bool: """Returns True as long as RPC is running""" return True def status(self) -> bool: """Returns True as long as RPC is running""" return True @Pyro4.oneway def shutdown(self) -> None: """Shutdown. Shutdowns workers then RPC interface and exits daemon context cleanly .. attention:: Since the workers might be asleep awaiting the scheduler event to run the job, this might take some time to let all worker threads wake up and then exit cleanly. """ try: self.workers_stop.set() self.work.shutdown() except: pass try: self.pyro_stop.set() except: pass @Pyro4.oneway def shutdown_workers(self) -> None: """Shutdown all worker threads. See :py:meth:`.shutdown` for notes.""" if self.work is not None: self.work.shutdown() self.work = None @Pyro4.oneway def start_workers(self) -> None: """Start all workers""" self.workers_stop.clear() if self.workers_started.is_set() is False: if self.work is None: # and not isinstance(self.work, SyncWrapper): self.work = SyncWrapper(stopper=self.workers_stop, workers_started=self.workers_started, restart=True) self.work.start() @Pyro4.oneway def reboot_workers(self) -> None: """Reboot all workers. Shuts down all workers before startup. See :py:meth:`.shutdown` for notes.""" self.workers_stop.set() try: self.shutdown_workers() time.sleep(0.5) self.start_workers() except Exception as e: self.log.exception('Exception restarting workers', e) pass def get_workers_status(self) -> [dict]: """Get status of all worker threads. Indexed according to :py:attr:`.work.workers`""" status = [] for index, worker in enumerate(self.work.workers): status.append(self.get_worker_status(index)) return status def get_worker_status(self, index) -> dict: """Returns status of worker at index in :py:attr:`.work.workers` :param index: Worker index, starts at 0 :type index: int """ return {'name': self.work.workers[index].name, 'id': self.work.workers[index].id, 'status': self.work.workers[index].is_alive(), 'state': self.work.workers[index].state.get_state().get('state', 'error'), 'mode': self.work.workers[index].state.get_state().get('mode', 'Unknown'), 'reason': self.work.workers[index].state.get_state().get('reason', 'Unknown'), 'index': index, 'uptime': self.work.workers[index].uptime, 'started': self.work.workers[index].started, 'messages': self.work.workers[index].messages, 'sync_type': self.work.workers[index].sync_type, 'sync_interval': self.work.workers[index].from_to, 'sync_misfires': self.work.workers[index].job_misfires, 'sync_errors': self.work.workers[index].sync_errors, 'next_run_time': self.work.workers[index].job_next_run_time } def restart_worker(self, index) -> bool: """Restart worker at index in :py:attr:`.work.workers` .. attention:: Will only try to start a dead worker, it will not shutdown and then start the worker :param index: Worker index, starts at 0 :type index: int """ if self.work.workers[index].is_alive() is False: self.work.workers[index].run() # start() return self.work.workers[index].is_alive() def get_logs(self) -> [dict]: """Get the logs retained by the logger for all :py:attr:`.work.workers`""" log = [] for w in self.get_workers_status(): log.append({'id': w['id'], 'log': self.get_worker_log(w['index'])}) return log def get_worker_log(self, index) -> dict: """Get retained log from worker at index in :py:attr:`.work.workers` :param index: Worker index, starts at 0 :type index: int """ return self.work.workers[index].log.get_tail() def get_failed_clubs(self) -> list: """Get clubs that failed startup""" return self.work.failed_clubs
class SyncWrapper: def __init__(self, stopper, workers_started, restart=False): self.log = AppLogger(name='syncdaemon') self.workers = [] self.failed_clubs = [] self.stopper = stopper self.workers_started = workers_started self.club_list = [] self.integration = NifIntegration() self.bound_semaphore = threading.BoundedSemaphore(value=SYNC_CONNECTIONPOOL_SIZE) self.restart = restart # Build list of workers # for i in range(0, 10): # self.workers.append(ProducerThread(i, workers_stop, restart)) # time.sleep(1) def start(self, start=False): self.log.info('Starting workers') self.workers_started.set() integration_users = [] # clubs = self.integration.get_clubs() # Only a list of integers! clubs = self.integration.get_active_clubs_from_ka() self.log.info('Got {} integration users'.format(len(clubs))) # Setup each integration user from list of integers for club_id in clubs: if club_id in self.club_list: continue elif club_id not in NIF_INTEGERATION_CLUBS_EXCLUDE: self.club_list.append(club_id) try: if club_id not in NIF_INTEGERATION_CLUBS_EXCLUDE: integration_users.append(NifIntegrationUser(club_id=club_id, create_delay=0)) time.sleep(0.2) except NifIntegrationUserError as e: self.log.exception('Problems creating user for club_id {}: {}'.format(club_id, e)) self.failed_clubs.append({'name': 'From list', 'club_id': club_id}) except Exception as e: self.log.exception('Problems with club id {}: {}'.format(club_id, e)) self.failed_clubs.append({'name': 'From list', 'club_id': club_id}) # Sleep because last created user! time.sleep(180) # Add each integration user to workers for club_user in integration_users: try: if club_user.test_login(): self.workers.append(NifSync(org_id=club_user.club_id, username=club_user.username, password=club_user.password, created=club_user.club_created, stopper=self.stopper, restart=self.restart, background=False, initial_timedelta=0, overlap_timedelta=5, lock=self.bound_semaphore, sync_type='changes', sync_interval=NIF_CHANGES_SYNC_INTERVAL)) self.log.info('Added {}'.format(club_user.username)) else: self.log.error('Failed login for {} with password {}'.format(club_user.club_id, club_user.password)) self.failed_clubs.append({'name': club_user.club_name, 'club_id': club_user.club_id}) except Exception as e: self.failed_clubs.append({'name': club_user.club_name, 'club_id': club_user.club_id}) self.log.exception('Problems for {} ({})'.format(club_user.club_name, club_user.club_id)) # Add license-sync try: self.log.info('Adding competences and license sync') org = NifOrganization(376) self.workers.append(NifSync(org_id=900001, username=NIF_FEDERATION_USERNAME, password=NIF_FEDERATION_PASSWORD, created=org.created, stopper=self.stopper, restart=self.restart, background=False, initial_timedelta=0, overlap_timedelta=5, lock=self.bound_semaphore, sync_type='license', sync_interval=NIF_LICENSE_SYNC_INTERVAL)) self.workers.append(NifSync(org_id=900002, username=NIF_FEDERATION_USERNAME, password=NIF_FEDERATION_PASSWORD, created=org.created, stopper=self.stopper, restart=self.restart, background=False, initial_timedelta=0, overlap_timedelta=5, lock=self.bound_semaphore, sync_type='competence', sync_interval=NIF_COMPETENCE_SYNC_INTERVAL)) self.workers.append(NifSync(org_id=376, username=NIF_FEDERATION_USERNAME, password=NIF_FEDERATION_PASSWORD, created=org.created, stopper=self.stopper, restart=self.restart, background=False, initial_timedelta=0, overlap_timedelta=5, lock=self.bound_semaphore, sync_type='federation', sync_interval=NIF_COMPETENCE_SYNC_INTERVAL)) except Exception as e: self.log.exception('Error initiating licenses and competences') # Start all workers self.log.info('Starting all workers') for worker in self.workers: worker.start() time.sleep(1) # Spread each worker accordingly def shutdown(self): self.log.info('Shutdown workers called') self.stopper.set() for worker in self.workers: self.log.info('Joining {}'.format(worker.name)) worker.join() self.workers_started.clear()
Shuts down all workers, then restarts them""" # Signal maps to function living in global ex SIGKILL signal_map = { signal.SIGHUP: shutdown_workers, signal.SIGINT: shutdown_workers, signal.SIGTTIN: shutdown_workers, signal.SIGTTOU: shutdown_workers, signal.SIGTERM: shutdown_workers, signal.SIGTSTP: shutdown_workers, signal.SIGUSR1: reboot_workers, } if __name__ == '__main__': log = AppLogger(name='syncdaemon') log.info('[STARTUP]') log.info('** ENV: {} **'.format(NIF_REALM)) log.info('Entering daemon context') with DaemonContext(signal_map=signal_map, detach_process=True, # False for running front stdin=None, stdout=None, # sys.stdout, # None stderr=None, # sys.stderr, # None pidfile=pidfile.PIDLockFile( '{}/{}'.format(os.getcwd(), SYNCDAEMON_PID_FILE)), chroot_directory=None, # Same working_directory='{}/'.format(os.getcwd()) ):