def run(self): tries = config.config()['log_synchronization']['tries'] period = 60*config.config()['log_synchronization']['period'] server_ip = config.config()['network']['server_ip'] server_port = config.config()['network']['server_port'] while True: try: with db.manager().transaction(): log.logger().info('Sending log to server...') _log = proto.man_message() _log.message_type = 'log' db.manager().get_log(_log.db) while tries > 0: try: self.__conn.send_message((server_ip, server_port), _log.SerializeToString()) break except Exception as exc: log.logger().error('Log synchronizer: ' + repr(exc)) tries -= 1 if tries > 0: db.manager().drop_log() log.logger().info('Log successfully sent.') else: raise LogSyncError('Log sending failed.') except LogSyncError as err: log.logger().info(err.value) except Exception as exc: log.logger().error(repr(exc)) tries = config.config()['log_synchronization']['tries'] time.sleep(period)
def __init__(self): self._tcp_port = config.config()["network"]["port"] tcp_handlers = {"new_db": save_db_handler} self._tcp_net = ConnServer(("localhost", self._tcp_port), tcp_handlers) self._local_net = CANserver(config.config()["can_net"]["interface"], self.local_message_handler) self._conf_period = config.config()["can_net"]["confirmation_period"] # number of periods to wait: self._conf_timeout = config.config()["can_net"]["confirmation_timeout"] / self._conf_period self._conf_waiters = {} # _local_handlers struct: (message hanler, message decoder) self._local_handlers = { 0: (self._handle_confirmation_msg, lprotocols.in_lvl2_confirmation), 1: (self._handle_common_msg, lprotocols.in_lvl2_common), } self._points_manager = PointsManager(self.send_local_msg) self._log_sync = LogSynchronizer(self._tcp_net.server_out)
def __init__(self, local_net_send): log.logger().info('initializing PointsManager') try: extenions = config()['extensions'] self.points = {} for point_ext in extenions: log.logger().info('loading extension %s' % point_ext) try: point_mod = __import__('node.extensions.' + point_ext + '.' + point_ext, fromlist = ['node', 'extensions', point_ext]) point_class = getattr(point_mod, point_ext) for point_parameters in point_class.config(): log.logger().info('creating access point') try: point_object = point_class(point_parameters, local_net_send) addresses = point_object.address_list() for addr in addresses: self.points[addr] = point_object except DbError, exc: log.logger().error('failed to create access point: ' + traceback.format_exc()) except Exception, exc: log.logger().error('failed to load extension: ' + traceback.format_exc()) except Exception: log.logger().fatal('failed to initialize PointsManager') raise
def run(self): nodes = self.__msg.db.node tries = config.config()['server']['tries'] while ((len(nodes) > 0) and (tries > 0)): problematic_nodes = [] for node in nodes: if node.ip: try: log.logger().info('Synchronizer: processing node with id=%s, ip=%s', node.id, node.ip) self.__conn.send_message((node.ip, config.config()['server']['nodes_port']), self.__msg.SerializeToString()) log.logger().info('Synchronizer: finished processing node with id=%s, ip=%s', node.id, node.ip) except Exception as exc: problematic_nodes.append(node) log.logger().error('error while sending message: ' + repr(exc)) if len(problematic_nodes) > 0: tries -= 1 nodes = problematic_nodes sending_delay = config.config()['server']['sending_delay'] time.sleep(sending_delay) if tries: self.__everything_OK = True
def main(event, lambda_context): logging.info('Starting Lambda') error_count = 0 print("event: ", event) print("lambda_context: ", lambda_context) overall_result = [] # non_compliant_count = 0 #(1) check for required SSM associations #get all ssm associations from AWS all_ssm_association = ssm().list_associations() #retrieve list of required SSM association from .env required_ssm_association = (os.getenv("MONITOR_SSM")).split(',') check_ssm_association(all_ssm_association, required_ssm_association, overall_result) #(2) check for required AWS Config rules required_aws_config_rules = (os.getenv("MONITOR_AWS_CONFIG")).split(',') #retrieve each aws config rule details. all_aws_config_response = [] call_config = config() for x in required_aws_config_rules: all_aws_config_response.append(call_config.describe_config_rules(x)) check_aws_config_rule(all_aws_config_response, required_aws_config_rules, overall_result) #(3) check for required cloudwatch events required_events = (os.getenv("MONITOR_EVENT")).split(',') call_events = events() all_events = call_events.list_rules() check_cloudwatch_event(all_events, required_events, overall_result) notfound = 0 for x in overall_result: if MESSAGE_NOTFOUND in x.status: notfound += 1 #sending alert alert & output result to csv. prepared_message = prepare_alert_message(notfound, overall_result) call_alerts = alerts(ssm().get_parameter("slack")) output_file = output2csv(overall_result) if notfound > 0: call_alerts.send_alert(os.getenv("SLACK_CHANNEL"), prepared_message, output_file) else: call_alerts.send_alert(os.getenv("SLACK_CHANNEL"), prepared_message, output_file) logging.info('Total Errors: {}'.format(error_count)) if error_count > 255: error_count = 255 logging.info('Exiting lambda') if error_count > 0: # exit if error_count (lambda doesn't like sys.exit(0)) sys.exit('Exiting lambda')
def __init__(self, socket, handler, timer_period=None): ''' @param socket int: will use this can socket when receiving ''' threading.Thread.__init__(self) self.daemon = True self._handler = handler self._socket = socket self.incomplete = {} if (timer_period != None): self._timer_period = timer_period else: self._timer_period = config.config()['can_net']['recieve_delay']
def main(event, lambda_context): logging.info('Starting Lambda') error_count = 0 print("event: ", event) print("lambda_context: ", lambda_context) #capture the list of targeted AWS Config rules aws_config_rules = (os.getenv("AWS_CONFIG_RULES")).split(',') call_config = config() # #start evalutation on the selected aws config rules # #useful for on-demand request, mainly for testing purpose. # call_config.start_config_rules_evaluation(aws_config_rules) #Get the non-compliant resources found in AWS Config non_compliant = [] for x in aws_config_rules: result = call_config.get_compliance_details_by_config_rule( x, 'NON_COMPLIANT') #NON_COMPLIANT for y in result: non_compliant.append(y) #parse the non-compliant resources found parse_non_compliant = parse_scan_result(non_compliant) #prepare the slack alert message message = prepare_alert_message(parse_non_compliant) call_alerts = alerts(ssm().get_parameter("slack")) if len(parse_non_compliant) < 1: print("NO new non-compliant found!") call_alerts.send_alert(os.getenv("SLACK_CHANNEL"), message, None) else: #generate a csv file in /tmp (writeable in Lambda) which store the new non-compliant configurations found. output_file = output2csv(parse_non_compliant) call_alerts.send_alert(os.getenv("SLACK_CHANNEL"), message, output_file) call_s3 = s3(os.getenv("BUCKET_NAME")) now = datetime.now(timezone(os.getenv("TIMEZONE"))) now.strftime('%H:%M:%S') upload_filename = str(os.getenv("AWS_CONFIG")) upload_filename += str(now.strftime('/%d%b%Y-')) upload_filename += str(now.strftime('%H%M%S-')) upload_filename += str(output_file)[5:] call_s3.upload_file(output_file, upload_filename) logging.info('Total Errors: {}'.format(error_count)) if error_count > 255: error_count = 255 logging.info('Exiting lambda') if error_count > 0: # exit if error_count (lambda doesn't like sys.exit(0)) sys.exit('Exiting lambda')
def start(self): try: log.logger().info('Server started.') self.__servers.start_servers() while True: wait = 60 * float(config.config()['server']['sync_period']) new_db = proto.man_message() new_db.message_type = 'new_db' database.manager().get_sync_data(new_db.db) sync = Synchronizer(new_db, self.__servers.server_out).start() time.sleep(wait) except KeyboardInterrupt: log.logger().warning('Stopping server on user command.') except Exception as exc: log.logger().error(repr(exc)) finally: self.stop()
# ip link add type vcan # -Create a virtual CAN network interface with a specific name 'can0'. Don't forget to become root. # ip link add dev can0 type vcan # -Raise your new interface. Don't forget to become root. # ifconfig can0 up # from node import can_net import time import datetime import os import threading import node.main from shared import log, config def print_handler(msg): print "print_handler msg str=<{0}> and recieve time={1}".format(msg, datetime.datetime.now()) if __name__ == "__main__": print "main: threading.current_thread().ident %u" % threading.current_thread().ident print "main: os.getpid() %u" % os.getpid() print "can net recieve delay = %f ms " % (1000.0 * float(config.config()["can_net"]["recieve_delay"])) s = can_net.CANserver("can0", print_handler) print "start time =", datetime.datetime.now() s.server_in.start() time.sleep(5) s.server_in.shutdown() print ("__main__ END")
def __init__(self): self.__servers = ConnServer(('localhost', config.config()['server']['port']), {'log': log_handler})
import os, time from shared import log, config, database from server.synchronizer import Synchronizer from shared.network import * import shared.man_db_msg_pb2 as proto default_log_conf_path = os.path.dirname(os.path.abspath(__file__)) + '/../config/server_logging.conf' default_conf_path = os.path.dirname(os.path.abspath(__file__)) + '/../config/server_config.yml' log.initialize(default_log_conf_path) config.initialize(default_conf_path) db_connection_string = 'mysql://' + config.config()['database']['user'] + ':' + config.config()['database']['password'] + '@' + config.config()['database']['host'] + '/' + config.config()['database']['db'] db_debug_mode = config.config()['database']['debug'] database.initialize(db_connection_string, db_debug_mode) def log_handler(msg): try: log.logger().info('Got log, synchronizing...') with database.manager().transaction(): database.manager().load_log(msg.db) log.logger().info('Log successfully synchronized.') except Exception as exc: log.logger().error(repr(exc))
# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Project Master Device. If not, see <http://www.gnu.org/licenses/>. import os from shared import log, config, database from node.node import Node default_log_conf_path = os.path.dirname(os.path.abspath(__file__)) + '/../config/node_logging.conf' default_conf_path = os.path.dirname(os.path.abspath(__file__)) + '/../config/node_config.yml' log.initialize(default_log_conf_path) config.initialize(default_conf_path) db_connection_string = 'sqlite:' + os.path.abspath(config.config()['database']['filename']) db_debug_mode = config.config()['database']['debug'] database.initialize(db_connection_string, db_debug_mode) if __name__ == '__main__': try: node = Node() node.start() except KeyboardInterrupt: log.logger().warning('Stopping node on user command.') node.stop()