def get(self): """Get JSON from remote URL. Args: None Returns: result: dict of JSON retrieved. """ # Initialize key variables result = {} url = self._url # Get URL try: with urllib.request.urlopen(url) as u_handle: try: result = json.loads(u_handle.read().decode()) except: (etype, evalue, etraceback) = sys.exc_info() log_message = ( 'Error reading JSON from URL {}: [{}, {}, {}]' ''.format(url, etype, evalue, etraceback)) log.log2info(1008, log_message) except: # Most likely no connectivity or the TCP port is unavailable (etype, evalue, etraceback) = sys.exc_info() log_message = ('Error contacting URL {}: [{}, {}, {}]' ''.format(url, etype, evalue, etraceback)) log.log2info(1186, log_message) # Return return result
def start(self): """Start the daemon. Args: None Returns: None """ # Check for a pidfile to see if the daemon already runs pid = _pid(self.pidfile) # Die if already running if bool(pid) is True: log_message = ( 'PID file: {} already exists. Daemon already running?' ''.format(self.pidfile)) log.log2die(1073, log_message) # Start the daemon self._daemonize() # Log success log_message = ('Daemon {} started - PID file: {}' ''.format(self.name, self.pidfile)) log.log2info(1070, log_message) # Run code for daemon self.run()
def db_query(error_code, close=True): """Provide a transactional scope around Query operations. From https://docs.sqlalchemy.org/en/13/orm/session_basics.html Args: error_code: Error code to use in messages close: Close session if True. GraphQL mutations sometimes require the session to remain open. Returns: None """ # Initialize key variables prefix = 'Unable to read database.' # Create session from pool session = POOL() # Setup basic functions try: yield session except Exception as exception_error: session.close() log_message = '{}. Error: "{}"'.format(prefix, exception_error) log.log2info(error_code, log_message) except: session.close() log_message = '{}. Unknown error'.format(prefix) log.log2info(error_code, log_message) finally: # Return the Connection to the pool if bool(close) is True: session.close()
def start(self): """Start the daemon. Args: None Returns: None """ # Check for a pidfile to see if the daemon already runs try: with open(self.pidfile, 'r') as pf_handle: pid = int(pf_handle.read().strip()) except IOError: pid = None if pid: log_message = ( 'PID file: {} already exists. Daemon already running?' ''.format(self.pidfile)) log.log2die(1062, log_message) # Start the daemon self._daemonize() # Log success log_message = ('Daemon {} started - PID file: {}' ''.format(self.name, self.pidfile)) log.log2info(1070, log_message) # Run code for daemon self.run()
def registervariables(self): """Get list polling target information in configuration file.. Args: group: Group name to filter results by Returns: result: List of RegisterVariable items """ # Initialize key variables result = [] # Get configuration snippet key = 'polling_groups' groups = self._agent_config.get(key) if groups is None: log_message = '''\ "{}" parameter not found in configuration file. Will not poll.''' log.log2info(65003, log_message) return result # Create Tuple instance to populate result = [] # Create snmp objects for group in groups: for register in ['input_registers', 'holding_registers']: if register in group: drvs = self._create_drv(group, register) result.extend(drvs) return result
def _get_api_public_key(session, exchange_url): """Use previously established session to get the API server's public key. Args: session: Request Session object exchange_url: URL to use for key exchange Returns: result: JSON key data from API server """ # Predefine failure response result = None status = None # Get API information try: response = session.get(exchange_url) status = response.status_code except: _exception = sys.exc_info() log_message = ('Key exchange failure') log.log2exception(1106, _exception, message=log_message) # Checks that the API sent over information if status == 200: # Process API server response result = response.json() else: log_message = ( 'Cannot get public key from API server. Status: {}'.format(status)) log.log2info(1057, log_message) return result
def query(self): """Query all remote targets for data. Args: None Returns: None """ # Initialize key variables use_script = False _running = False config = self.config interval = config.ingester_interval() script = '{}{}{}'.format(_BIN_DIRECTORY, os.sep, PATTOO_INGESTER_SCRIPT) # Post data to the remote server while True: # Get start time ts_start = time() # Check lockfile status if use_script is True: _running = check_lockfile() # Process if _running is False: if bool(use_script) is True: # Process cache with script _result = shared_files.execute(script, die=False) success = not bool(_result) else: # Process cache with function success = files.process_cache() if bool(success) is False: log_message = ('''\ Ingester failed to run. Please check log files for possible causes.''') log.log2warning(20029, log_message) else: log_message = ('''\ Ingester is unexpectedly still running. Check your parameters of error logs \ for possible causes.''') log.log2warning(20129, log_message) # Sleep. The duration could exceed the polling interval. Set sleep # time to the polling interval when this occurs. duration = time() - ts_start if duration >= interval: log_message = ('''Ingestion exceeded configured \ "ingester_interval" parameter by {:6.2f}s.'''.format(duration - interval)) sleep_time = 0 else: sleep_time = abs(interval - duration) log_message = ( 'Ingester sleeping for {:6.2f}s.'.format(sleep_time)) log.log2info(20100, log_message) sleep(sleep_time)
def query(self): """Query all remote targets for data. Args: None Returns: None """ # Check for lock and pid files if os.path.exists(self.lockfile_parent) is True: log_message = ('''\ Lock file {} exists. Multiple API daemons running API may have died \ catastrophically in the past, in which case the lockfile should be deleted.\ '''.format(self.lockfile_parent)) log.log2see(1083, log_message) if os.path.exists(self.pidfile_parent) is True: log_message = ('''\ PID file: {} already exists. Daemon already running? If not, it may have died \ catastrophically in the past in which case you should use --stop --force to \ fix.'''.format(self.pidfile_parent)) log.log2see(1084, log_message) ###################################################################### # # Assign options in format that the Gunicorn WSGI will accept # # NOTE! to get a full set of valid options pprint(self.cfg.settings) # in the instantiation of _StandaloneApplication. The option names # do not exactly match the CLI options found at # http://docs.gunicorn.org/en/stable/settings.html # ###################################################################### options = { 'bind': _ip_binding(self._agent_api_variable), 'accesslog': self.config.log_file_api(), 'errorlog': self.config.log_file_api(), 'capture_output': True, 'pidfile': self._pidfile_child, 'loglevel': self.config.log_level(), 'workers': _number_of_workers(), 'umask': 0o0007, } # Log so that user running the script from the CLI knows that something # is happening log_message = ( 'Pattoo API running on {}:{} and logging to file {}.' ''.format( self._agent_api_variable.ip_listen_address, self._agent_api_variable.ip_bind_port, self.config.log_file_api())) log.log2info(1022, log_message) # Run _StandaloneApplication(self._app, self.parent, options=options).run()
def stop(self): """Stop the daemon. Args: None Returns: None """ # Get the pid from the pidfile try: with open(self.pidfile, 'r') as pf_handle: pid = int(pf_handle.read().strip()) except IOError: pid = None if not pid: log_message = ('PID file: {} does not exist. Daemon not running?' ''.format(self.pidfile)) log.log2warning(1063, log_message) # Not an error in a restart return # Try killing the daemon process try: while 1: if self.lockfile is None: os.kill(pid, signal.SIGTERM) else: time.sleep(0.3) if os.path.exists(self.lockfile) is True: continue else: os.kill(pid, signal.SIGTERM) time.sleep(0.3) except OSError as err: error = str(err.args) if error.find("No such process") > 0: self.delpid() self.dellock() else: log_message = (str(err.args)) log_message = ('{} - PID file: {}'.format( log_message, self.pidfile)) log.log2die(1068, log_message) except: log_message = ('Unknown daemon "stop" error for PID file: {}' ''.format(self.pidfile)) log.log2die(1066, log_message) # Log success self.delpid() self.dellock() log_message = ('Daemon {} stopped - PID file: {}' ''.format(self.name, self.pidfile)) log.log2info(1071, log_message)
def _send_symmetric_key(session, encryption, url, symmetric_key, data): """Send symmetric_key to the remote API server. Args: session: Request Session object encryption: Encryption object url: URL to use for exchanging the symmetric key symmetric_key: Symmetric key data: Data to post Returns: success: True if successful """ # Predefine failure response success = False status = None # Process API server information api_email = data['api_email'] api_key = data['api_key'] encrypted_nonce = data['encrypted_nonce'] # Import API public key encryption.pimport(api_key) api_fingerprint = encryption.fingerprint(api_email) encryption.trust(api_fingerprint) # Decrypt nonce decrypted_nonce = encryption.decrypt(encrypted_nonce) # Create JSON to post data_ = json.dumps({ 'encrypted_nonce': encryption.sencrypt(decrypted_nonce, symmetric_key), 'encrypted_sym_key': encryption.encrypt(symmetric_key, api_fingerprint) }) # POST data to API try: response = session.post(url, json=data_) status = response.status_code except: _exception = sys.exc_info() log_message = ('Symmetric key exchange failure') log.log2exception(1098, _exception, message=log_message) # Check that the transaction was validated if status == 200: success = True else: log_message = '''\ Cannot exchange symmetric keys with API server. Status: {}'''.format(status) log.log2info(1099, log_message) return success
def crypt_receive(): """Receive encrypted data from agent Args: None Returns: message (str): Reception result response (int): HTTP response code """ # If a symmetric key has already been established, skip if 'symm_key' not in session: log_message = 'No session symmetric key' log.log2info(20171, log_message) return (log_message, 208) if request.method == 'POST': try: # Get data from agent data_dict = json.loads(request.get_json(silent=False)) except: _exception = sys.exc_info() log_message = 'Client sent corrupted validation JSON data' log.log2exception(20169, _exception, message=log_message) return (log_message, 500) # Symmetrically decrypt data data = encryption.sdecrypt(data_dict['encrypted_data'], session['symm_key']) # Extract posted data and source try: final_data = json.loads(data) except: _exception = sys.exc_info() log_message = 'Decrypted data extraction failed' log.log2exception(20174, _exception, message=log_message) abort(500, description=log_message) # Save data success = _save_data(final_data['data'], final_data['source']) if bool(success) is False: abort(500, description='Invalid JSON data received.') # Return log_message = 'Decrypted and received' log.log2info(20184, log_message) return (log_message, 202) # Otherwise abort return ('Proceed to key exchange first', 400)
def route_chart_data(idx_datapoint): """Get API data from remote host. Args: idx_datapoint: Datapoint index value to chart Returns: None """ # Initialize key variables success = False response = False data = [] config = Config() # Get URL parameters secondsago = uri.integerize_arg(request.args.get('secondsago')) if bool(secondsago) is False: secondsago = SECONDS_IN_DAY # Create URL for DataPoint data url = ('{}/{}?secondsago={}'.format( config.web_api_server_url(graphql=False), idx_datapoint, secondsago)) # Get data try: result = requests.get(url) response = True except: # Most likely no connectivity or the TCP port is unavailable error = sys.exc_info()[:2] log_message = ('Error contacting URL {}: ({} {})' ''.format(url, error[0], error[1])) log.log2info(80010, log_message) # Define success if response is True: if result.status_code == 200: success = True else: log_message = ('''\ HTTP {} error for receiving data from server {}\ '''.format(result.status_code, url)) log.log2warning(80011, log_message) # Present the data if success is True: data = result.json() return jsonify(data)
def args(self): """Return all the CLI options. Args: None Returns: _args: Namespace() containing all of our CLI arguments as objects - filename: Path to the configuration file """ # Initialize key variables width = 80 # Log the cli command log_message = 'CLI: {}'.format(' '.join(sys.argv)) log.log2info(20043, log_message) # Header for the help menu of the application parser = _Parser(description=self._help, formatter_class=argparse.RawTextHelpFormatter) # Add subparser subparsers = parser.add_subparsers(dest='action') # Parse "show", return object used for parser _Show(subparsers, width=width) # Parse "create", return object used for parser _Create(subparsers, width=width) # Parse "set", return object used for parser _Set(subparsers, width=width) # Parse "import", return object used for parser _Import(subparsers, width=width) # Parse "assign", return object used for parser _Assign(subparsers, width=width) # Show help if no arguments if len(sys.argv) == 1: parser.print_help(sys.stderr) sys.exit(1) # Return the CLI arguments _args = parser.parse_args() # Return our parsed CLI arguments return (_args, parser)
def wrapper(): """Wrapper function""" if self.__daemon_running(): log_message = '''{} Lock file exists, Process still running'''.format(self.name) log.log2info(1101, log_message) # Continually checks if daemon is still running exits loop once # instance graceful_timeout limit reached timeout_counter = time.time() while True: # Updating timeout duration current_duration = time.time() - timeout_counter - 1 time.sleep(1) if not self.__daemon_running() is True: log_message = '''Process {} no longer processing'''.format(self.name) log.log2info(1103, log_message) break if current_duration >= self.graceful_timeout: log_message = '''Process {} failed to shutdown, DUE TO TIMEOUT'''.format(self.name) log.log2info(1104, log_message) log_message = '''{}, hard shutdown in progress'''.format(self.name) log.log2info(1105, log_message) break callback()
def target_polling_points(self): """Get list polling target information in configuration file. Args: group: Group name to filter results by Returns: result: List of TargetPollingPoints objects """ # Initialize key variables result = [] # Get configuration snippet key = 'polling_groups' groups = self._agent_config.get(key) if groups is None: log_message = '''\ "{}" parameter not found in configuration file. Will not poll.''' log.log2info(70003, log_message) return result # Create snmp objects for group in groups: # Ignore bad values if isinstance(group, dict) is False: continue # Process data ip_target = group.get('ip_target') ip_port = group.get('ip_port') username = group.get('username') password = group.get('password') auth = OPCUAauth( ip_target=ip_target, ip_port=ip_port, username=username, password=password) nodes = group.get('nodes') poll_targets = configuration.get_polling_points(nodes) dpt = TargetPollingPoints(auth) dpt.add(poll_targets) if dpt.valid is True: result.append(dpt) return result
def _send_agent_public_key(session, encryption, exchange_url): """Send public key to the remote API server. Args: session: Request Session object encryption: Encryption object exchange_url: URL to use for key exchange Returns: success: True is successful """ # Predefine failure response success = False status = None # Data for POST send_data = { 'pattoo_agent_email': encryption.email, 'pattoo_agent_key': encryption.pexport() } # Convert dict to str send_data = json.dumps(send_data) try: # Send over data response = session.post(exchange_url, json=send_data) status = response.status_code except: _exception = sys.exc_info() log_message = ('Key exchange failure') log.log2exception(1077, _exception, message=log_message) # Checks that sent data was accepted if status in [202, 208]: success = True else: log_message = ( 'Cannot send public key to API server. Status: {}'.format(status)) log.log2info(1069, log_message) return success
def _snmpvariables(_configuration): """Get list of dicts of SNMP information in configuration file. Args: _configuration: Configuration to process Returns: result: List of SNMPVariable items """ # Initialize key variables result = [] # Get configuration snippet key = 'auth_groups' sub_config = _configuration.get(key) if sub_config is None: log_message = '''\ "{}" parameter not found in configuration file. Will not poll.''' log.log2info(55001, log_message) return result # Create snmp objects groups = _validate_snmp(sub_config) for group in groups: # Save the authentication parameters snmpauth = SNMPAuth(version=group.get('snmp_version', 2), community=group.get('snmp_community', 'public'), port=group.get('snmp_port', 161), secname=group.get('snmp_secname'), authprotocol=group.get('snmp_authprotocol'), authpassword=group.get('snmp_authpassword'), privprotocol=group.get('snmp_privprotocol'), privpassword=group.get('snmp_privpassword')) # Create the SNMPVariableList snmpvariablelist = SNMPVariableList(snmpauth, group['ip_targets']) snmpvariables = snmpvariablelist.snmpvariables result.extend(snmpvariables) # Return return result
def stop(self): """Stop the daemon. Args: None Returns: None """ # Check for a pidfile to see if the daemon already runs pid = _pid(self.pidfile) if bool(pid) is False: log_message = ('PID file: {} does not exist. Daemon not running?' ''.format(self.pidfile)) log.log2warning(1063, log_message) # Not an error in a restart return # Try killing the daemon process try: os.kill(pid, signal.SIGTERM) except OSError as err: error = str(err.args) if error.find('No such process') > 0: self.delpid() self.dellock() else: log_message = (str(err.args)) log_message = ('{} - PID file: {}'.format( log_message, self.pidfile)) log.log2die(1068, log_message) except: log_message = ('Unknown daemon "stopped" error for PID file: {}' ''.format(self.pidfile)) log.log2die(1066, log_message) # Log success self.delpid() self.dellock() log_message = ('Daemon {} stopped - PID file: {}' ''.format(self.name, self.pidfile)) log.log2info(1071, log_message)
def _target_polling_points(_configuration): """Get list of dicts of SNMP information in configuration file. Args: _configuration: Configuration to process Returns: result: List of IPTargetPollingPoints objects """ # Initialize key variables result = [] datapoint_key = 'oids' # Get configuration snippet key = 'polling_groups' sub_config = _configuration.get(key) if sub_config is None: log_message = '''\ "{}" parameter not found in configuration file. Will not poll.''' log.log2info(55000, log_message) return result # Create snmp objects groups = _validate_oids(sub_config) for group in groups: # Ignore bad values if isinstance(group, dict) is False: continue # Process data if 'ip_targets' and datapoint_key in group: for ip_target in group['ip_targets']: poll_targets = configuration.get_polling_points( group[datapoint_key]) dpt = IPTargetPollingPoints(ip_target) dpt.add(poll_targets) if dpt.valid is True: result.append(dpt) return result
def target_polling_points(self): """Get list polling target information in configuration file. Args: group: Group name to filter results by Returns: result: List of IPTargetPollingPoints objects """ # Initialize key variables result = [] datapoint_key = 'points' # Get configuration snippet key = 'polling_groups' groups = self._agent_config.get(key) if groups is None: log_message = '''\ "{}" parameter not found in configuration file. Will not poll.''' log.log2info(60003, log_message) return result # Create snmp objects for group in groups: # Ignore bad values if isinstance(group, dict) is False: continue # Process data if 'ip_targets' and datapoint_key in group: for ip_target in group['ip_targets']: poll_targets = configuration.get_polling_points( group[datapoint_key]) dpt = IPTargetPollingPoints(ip_target) dpt.add(poll_targets) if dpt.valid is True: result.append(dpt) return result
def db_modify(error_code, die=True): """Provide a transactional scope around Update / Insert operations. From https://docs.sqlalchemy.org/en/13/orm/session_basics.html Args: error_code: Error code to use in messages die: Die if True Returns: None """ # Initialize key variables prefix = 'Unable to modify database.' # Create session from pool session = POOL() # Setup basic functions try: yield session session.commit() except Exception as exception_error: session.rollback() log_message = '{}. Error: "{}"'.format(prefix, exception_error) if bool(die) is True: log.log2die(error_code, log_message) else: log.log2info(error_code, log_message) except: session.rollback() log_message = '{}. Unknown error'.format(prefix) if bool(die) is True: log.log2die(error_code, log_message) else: log.log2info(error_code, log_message) finally: # Return the Connection to the pool session.close()
def __init__(self, agent): """Initialize the class. Args: agent: Agent object Returns: None """ try: self.graceful_timeout = agent.config.graceful_timeout() except AttributeError as err: # Sets default GracefulDaemon shutdown timeout if not defined by the # agent configuration log_message = '''Graceful Timeout configuration not set, {}\n Default setting to 10s'''.format(err) log.log2info(1100, log_message) self.graceful_timeout = 10 Daemon.__init__(self, agent)
def records(self): """Create PattooDBrecord objects from cache directory. Args: None Returns: result: List of list of PattooDBrecord objects grouped by agent_id """ # Initialize list of files that have been processed _cache = {} result = [] # Read data from files for filepath, json_data in sorted(self._data): # Get data from JSON file. Convert to rows of key-pairs if bool(json_data) is True and isinstance(json_data, dict) is True: pdbrs = converter.cache_to_keypairs(json_data) if bool(pdbrs) is False: log_message = ('''\ File {} has invalid data. It will not be processed'''.format(filepath)) log.log2info(20026, log_message) continue # Group data by agent_id pattoo_agent_id = pdbrs[0].pattoo_agent_id if pattoo_agent_id in _cache: _cache[pattoo_agent_id].extend(pdbrs) else: _cache[pattoo_agent_id] = pdbrs # Aggregate data if bool(_cache) is True: for _, item in sorted(_cache.items()): result.append(item) # Return return result
def key_exchange(): """Process public key exhange. Args: None Returns: result: Various responses """ # Initialize key variables required_keys = ['pattoo_agent_email', 'pattoo_agent_key'] # If a symmetric key has already been established, skip if 'symm_key' in session: log_message = 'Symmetric key already set.' log.log2info(20148, log_message) return (log_message, 208) # Get data from incoming agent POST if request.method == 'POST': try: # Get data from agent data_dict = json.loads(request.get_json(silent=False)) except: _exception = sys.exc_info() log_message = 'Client sent corrupted JSON data' log.log2exception(20167, _exception, message=log_message) return (log_message, 500) # Check for minimal keys for key in required_keys: if key not in data_dict.keys(): log_message = '''\ Required JSON key "{}" missing in key exchange'''.format(key) log.log2warning(20164, log_message) abort(404) # Save email in session session['email'] = data_dict['pattoo_agent_email'] # Save agent public key in keyring encryption.pimport(data_dict['pattoo_agent_key']) return ('Key received', 202) # Get data from incoming agent POST if request.method == 'GET': if 'email' in session: # Generate server nonce session['nonce'] = hashlib.sha256(str( uuid.uuid4()).encode()).hexdigest() # Retrieve information from session. Set previously in POST agent_fingerprint = encryption.fingerprint(session['email']) # Trust agent key encryption.trust(agent_fingerprint) # Encrypt api nonce with agent public key encrypted_nonce = encryption.encrypt(session['nonce'], agent_fingerprint) data_dict = { 'api_email': encryption.email, 'api_key': encryption.pexport(), 'encrypted_nonce': encrypted_nonce } # Send api email, public key and encrypted nonce log_message = 'API information sent' return jsonify(data_dict) # Otherwise send error message return ('Send email and key first', 403) # Return aborted status abort(400)
def crypt_receive(): """Receive encrypted data from agent Args: None Returns: message (str): Reception result response (int): HTTP response code """ # Read configuration config = Config() cache_dir = config.agent_cache_directory(PATTOO_API_AGENT_NAME) try: # Retrieves Pgpier class gpg = get_gnupg(PATTOO_API_AGENT_NAME, config) #Sets key ID gpg.set_keyid() # Checks if a Pgpier object exists if gpg is None: raise Exception('Could not retrieve Pgpier for {}'.format( PATTOO_API_AGENT_NAME)) except Exception as e: response = 500 message = 'Server error' log_msg = 'Could not retrieve Pgpier: >>>{}<<<'.format(e) log.log2warning(20175, log_msg) return message, response # Predefined error message and response response = 400 message = 'Proceed to key exchange first' # Block connection if a symmetric key was not stored if 'symm_key' not in session: message = 'No symmetric key' response = 403 return message, response if request.method == 'POST': # Get data from agent data_json = request.get_json(silent=False) data_dict = json.loads(data_json) # Retrieved symmetrically encrypted data encrypted_data = data_dict['encrypted_data'] # Symmetrically decrypt data data = gpg.symmetric_decrypt(encrypted_data, session['symm_key']) # Initialize key variables prefix = 'Invalid posted data.' posted_data = None source = None # Extract posted data and source try: data_extract = json.loads(data) posted_data = data_extract['data'] source = data_extract['source'] except Exception as e: log_message = 'Decrypted data extraction failed: {}'\ .format(e) log.log2warning(20176, log_message) log_message = 'Decrypted data extraction successful' log.log2info(20177, log_message) # Abort if posted_data isn't a list if isinstance(posted_data, dict) is False: log_message = '{} Not a dictionary'.format(prefix) log.log2warning(20178, log_message) abort(404) if len(posted_data) != len(CACHE_KEYS): log_message = ('''{} Incorrect length. Expected length of {} '''.format(prefix, len(CACHE_KEYS))) log.log2warning(20179, log_message) abort(404) for key in posted_data.keys(): if key not in CACHE_KEYS: log_message = '{} Invalid key'.format(prefix) log.log2warning(20180, log_message) abort(404) # Extract key values from posting try: timestamp = posted_data['pattoo_agent_timestamp'] except: _exception = sys.exc_info() log_message = ('API Failure') log.log2exception(20181, _exception, message=log_message) abort(404) # Create filename. Add a suffix in the event the source is posting # frequently. suffix = str(randrange(100000)).zfill(6) json_path = ('{}{}{}_{}_{}.json'.format(cache_dir, os.sep, timestamp, source, suffix)) # Create cache file try: with open(json_path, 'w+') as temp_file: json.dump(posted_data, temp_file) except Exception as err: log_message = '{}'.format(err) log.log2warning(20182, log_message) abort(404) except: _exception = sys.exc_info() log_message = ('API Failure') log.log2exception(20183, _exception, message=log_message) abort(404) # Return message = 'Decrypted and received' response = 202 log.log2info(20184, message) return message, response
def test_log2info(self): """Testing function log2info.""" # Test should not cause script to crash log.log2info(self.code, self.message)
def process_cache(batch_size=500, max_duration=3600, fileage=10, script=False): """Ingest data. Args: batch_size: Number of files to process at a time max_duration: Maximum duration fileage: Minimum age of files to be processed in seconds Returns: success: True if successful Method: 1) Read the files in the cache directory older than a threshold 2) Process the data in the files 3) Repeat, if new files are found that are older than the threshold, or we have been running too long. Batches of files are read to reduce the risk of overloading available memory, and ensure we can exit if we are running too long. """ # Initialize key variables records = 0 start = time.time() looptime = 0 files_read = 0 success = True # Get cache directory config = Config() directory = config.agent_cache_directory(PATTOO_API_AGENT_NAME) # Log what we are doing log_message = 'Processing ingest cache.' log.log2info(20085, log_message) # Get the number of files in the directory files_found = len( [_ for _ in os.listdir(directory) if _.endswith('.json')]) # Create lockfile only if running as a script. # The daemon has its own locking mechanism if bool(script) is True: success = _lock() if bool(success) is False: return bool(success) # Process the files in batches to reduce the database connection count # This can cause errors while True: # Agents constantly update files. We don't want an infinite loop # situation where we always have files available that are newer than # the desired fileage. loopstart = time.time() fileage = fileage + looptime # Automatically stop if we are going on too long.(1 of 2) duration = loopstart - start if duration > max_duration: log_message = ('''\ Stopping ingester after exceeding the maximum runtime duration of {}s. \ This can be adjusted on the CLI.'''.format(max_duration)) log.log2info(20022, log_message) break # Automatically stop if we are going on too long.(2 of 2) if files_read >= files_found: # No need to log. This is an expected outcome. break # Read data from cache. Stop if there is no data found. cache = Cache(batch_size=batch_size, age=fileage) count = cache.ingest() # Automatically stop if we are going on too long.(2 of 2) if bool(cache.files) is False: # No need to log. This is an expected outcome. break # Get the records processed, looptime and files read records += count files_read += cache.files looptime = max(time.time() - loopstart, looptime) # Print result duration = time.time() - start if bool(records) is True and bool(duration) is True: log_message = ('''\ Agent cache ingest completed. {0} records processed in {1:.2f} seconds, \ {2:.2f} records / second. {3} files read. \ '''.format(records, duration, records / duration, files_read)) log.log2info(20084, log_message) else: log_message = 'No files found to ingest' log.log2info(20021, log_message) # Delete lockfile only if running as a script. # The daemon has its own locking mechanism if bool(script) is True: success = _lock(delete=True) # Log what we are doing log_message = 'Finished processing ingest cache.' log.log2info(20020, log_message) return bool(success)
async def _serial_poller_async(tpp): """Poll OPCUA agent data. Args: tpp: TargetDataPoints object Returns: target_datapoints: TargetDataPoints object """ # Initialize key variables connected = False # Test for validity if isinstance(tpp, TargetPollingPoints) is False: return None if isinstance(tpp.target, OPCUAauth) is False: return None if tpp.valid is False: return None # Create URL for polling ip_target = tpp.target.ip_target ip_port = tpp.target.ip_port username = tpp.target.username password = tpp.target.password url = 'opc.tcp://{}:{}'.format(ip_target, ip_port) # Intialize data gathering target_datapoints = TargetDataPoints(ip_target) # Create a client object to connect to OPCUA server client = Client(url=url) client.set_user(username) client.set_password(password) # Connect try: await client.connect() connected = True except: log_message = ( 'Authentication for polling target {} is incorrect'.format(url)) log.log2warning(51011, log_message) pass if connected is True: for point in tpp.data: # Make sure we have the right data type if isinstance(point, PollingPoint) is False: log_message = ('''\ Invalid polling point {} for OPC UA URL {}'''.format(point, url)) log.log2info(51012, log_message) continue # Get data address = point.address try: node = client.get_node(address) value = await node.read_value() except BadNodeIdUnknown: log_message = ('''\ OPC UA node {} not found on server {}'''.format(address, url)) log.log2warning(51015, log_message) continue except: _exception = sys.exc_info() log_message = ('OPC UA server communication error') log.log2exception(51014, _exception, message=log_message) log_message = ('''\ Cannot get value from polling point {} for OPC UA URL {}\ '''.format(address, url)) log.log2info(51013, log_message) continue # Create datapoint if bool(point.multiplier) is True: if is_numeric(value) is True and (is_numeric(point.multiplier) is True): value = value * point.multiplier else: value = 0 datapoint = DataPoint(address, value) datapoint.add(DataPointMetadata('OPCUA Server', ip_target)) target_datapoints.add(datapoint) # Disconnect client await client.disconnect() return target_datapoints
def purge(url, identifier): """Purge data from cache by posting to central server. Args: url: URL to receive posted data identifier: Unique identifier for the source of the data. (AgentID) Returns: None """ # Initialize key variables config = Config() cache_dir = config.agent_cache_directory(identifier) # Add files in cache directory to list only if they match the # cache suffix all_filenames = [ filename for filename in os.listdir(cache_dir) if os.path.isfile(os.path.join(cache_dir, filename)) ] filenames = [ filename for filename in all_filenames if filename.endswith('.json') ] # Read cache file for filename in filenames: # Only post files for our own UID value if identifier not in filename: continue # Get the full filepath for the cache file and post filepath = os.path.join(cache_dir, filename) with open(filepath, 'r') as f_handle: try: data = json.load(f_handle) except: # Log removal log_message = ('''\ Error reading previously cached agent data file {} for identifier {}. May be \ corrupted.'''.format(filepath, identifier)) log.log2warning(1064, log_message) # Delete file if os.path.isfile(filepath) is True: os.remove(filepath) log_message = ('''\ Deleting corrupted cache file {} for identifier {}.\ '''.format(filepath, identifier)) log.log2warning(1036, log_message) # Go to the next file. continue # Post file success = post(url, data, identifier, save=False) # Delete file if successful if success is True: if os.path.exists(filepath) is True: os.remove(filepath) # Log removal log_message = ('''\ Purging cache file {} after successfully contacting server {}\ '''.format(filepath, url)) log.log2info(1007, log_message)
def validation(): """Validate remote agent. Process: 1) Decrypt the symmetric key received from the agent 2) Decrypting the nonce from agent 3) Verify that nonce is the same as originally sent. 4) Store symmetric key in session to be used for future decryption. 6) Delete the agent public key Args: None Returns: message (str): Validation response message response (int): HTTP response code """ # If a symmetric key has already been established, skip if 'symm_key' in session: log_message = 'Symmetric key already set.' log.log2info(20165, log_message) return (log_message, 208) # If no nonce is set, inform agent to exchange keys if 'nonce' not in session: return ('Proceed to key exchange first', 403) # Get data from incoming agent POST if request.method == 'POST': try: # Get data from agent data_dict = json.loads(request.get_json(silent=False)) except: _exception = sys.exc_info() log_message = 'Client sent corrupted validation JSON data' log.log2exception(20168, _exception, message=log_message) return (log_message, 500) # Decrypt symmetric key symmetric_key = encryption.decrypt(data_dict['encrypted_sym_key']) # Symmetrically decrypt nonce nonce = encryption.sdecrypt(data_dict['encrypted_nonce'], symmetric_key) # Checks if the decrypted nonce matches one sent if nonce != session['nonce']: log_message = 'Nonce does not match' log.log2info(20166, log_message) return (log_message, 401) # Delete agent public key encryption.pdelete(encryption.fingerprint(session['email'])) # Session parameter cleanup session['symm_key'] = symmetric_key session.pop('email', None) session.pop('nonce', None) # Return response log_message = 'Symmetric key saved' log.log2info(20173, log_message) return (log_message, 200) # Otherwise abort abort(404)