def peer_ls(): 'list passwords for inbound connections' conf = utils.load_conf(CJDROUTE_CONF, CJDROUTE_BIN) c = cjdns.connect(password=conf['admin']['password']) for user in c.listPasswords()['users']: yield user c.disconnect()
def start(): conf = utils.load_conf(CJDROUTE_CONF, CJDROUTE_BIN) p = Popen(['cjdroute'], stdin=PIPE) p.communicate(json.dumps(conf)) c = cjdns.connect(password=conf['admin']['password']) for peer in os.listdir(YRD_PEERS): yield '[*] adding %r' % peer try: with open(os.path.join(YRD_PEERS, peer)) as f: info = json.load(f) except ValueError: yield '[-] invalid json' else: if info['type'] == 'in': try: c.addPassword(info['name'], info['password']) except KeyError: yield '[-] key error' elif info['type'] == 'out': addr = utils.dns_resolve(info['addr']) c.udpBeginConnection(str(addr), str(info['pk']), str(info['password'])) c.disconnect()
def nf_announce(tracker, password, contact, oneshot=False): 'announce yourself as public peer' import nf conf = utils.load_conf(CJDROUTE_CONF, CJDROUTE_BIN) addr = conf['interfaces']['UDPInterface'][0]['bind'] peer = { 'port': int(addr.split(':')[1]), 'publicKey': conf['publicKey'], 'password': password } if contact: peer['contact'] = contact while True: try: if nf.announce(tracker, **peer): yield '[+] Told the tracker we\'re here' except (IOError, ValueError), e: yield '[-] %s' % e if oneshot: break time.sleep(120)
def peer_auth(name, live=False, cjdroute=False, yrd=False): 'add a password for inbound connections' if '/' in name: yield 'nope' exit(1) path = os.path.join(YRD_PEERS, name) if os.path.exists(path): with open(path) as f: password = json.load(f)['password'] else: password = utils.generate_key(31) info = {'type': 'in', 'name': name, 'password': password} if not live: with open(path, 'w') as f: f.write(json.dumps(info)) conf = utils.load_conf(CJDROUTE_CONF) c = cjdns.connect(password=conf['admin']['password']) c.addPassword(name, password) c.disconnect() publicKey = conf['publicKey'] port = conf['interfaces']['UDPInterface'][0]['bind'].split(':')[1] if (not cjdroute and not yrd) or cjdroute: yield utils.to_credstr(utils.get_ip(), port, publicKey, password) if not cjdroute and not yrd: yield '' if (not cjdroute and not yrd) or yrd: yield 'yrd peer add namehere %s:%s %s %s' % (utils.get_ip(), port, publicKey, password)
def peer_add(name, addr, pk, password, live=False): 'add an outbound connection' if '/' in name: yield 'nope' exit(1) if not password: password = raw_input('Password: '******'type': 'out', 'name': name, 'addr': addr, 'pk': pk, 'password': password } if not live: with open(path, 'w') as f: f.write(json.dumps(info)) conf = utils.load_conf(CJDROUTE_CONF, CJDROUTE_BIN) addr = utils.dns_resolve(addr) c = cjdns.connect(password=conf['admin']['password']) c.udpBeginConnection(addr, pk, password) c.disconnect()
def n(neighbours=False): 'shows your neighbours' conf = utils.load_conf(CJDROUTE_CONF) c = cjdns.connect(password=conf['admin']['password']) STAT_FORMAT = '%s %19s v%-2d %9d %9d %12s %d/%d/%d ' nodestore = list(c.dumpTable()) connections = {} try: for peer in os.listdir(YRD_PEERS): with open(os.path.join(YRD_PEERS, peer)) as f: info = json.load(f) try: connections[info['pk']] = str(info['name']) except KeyError: pass except OSError: pass for peer in c.peerStats(): result = c.nodeForAddr(peer.ip)['result'] route = utils.grep_ns(nodestore, peer.ip) path = utils.get_path(route) setattr(peer, 'path', path) line = STAT_FORMAT % (peer.ip, peer.path, peer.version, peer.bytesIn, peer.bytesOut, peer.state, peer.duplicates, peer.lostPackets, peer.receivedOutOfRange) if hasattr(peer, 'user'): line += repr(peer.user) elif peer.publicKey in connections: line += repr(connections[peer.publicKey]) yield line if neighbours: for i in range(result['linkCount']): link = c.getLink(peer.ip, i) if link and 'child' in link['result']: child = link['result']['child'] route = utils.grep_ns(nodestore, child) version = utils.get_version(route) path = utils.get_path(route) yield ' %s %s v%s' % (child, path, version) else: yield ' -' c.disconnect()
def setUp(self): conf = utils.load_conf("test_config.yml") neo4j_driver = utils.connect_to_neo4j( conf["neo4j"]["host"], conf["neo4j"]["bolt_port"], conf["neo4j"].get("user", None), conf["neo4j"].get("password", None)) session = neo4j_driver.session() self.static_inventory = load_ansible_inventory( conf["ansible"]["playbook_dir"], conf["ansible"]["inventory_path"], None) store(session, self.static_inventory, conf["label_name"]) session.close()
def run_configure(default_conf_file): print("*** Welcome to mamba_dlp configuration wizard ***") #Generate conf file print("*** Configuring mamba_dlp.conf ***") default_conf_file_exists = os.path.exists(default_conf_file) if (default_conf_file_exists): default_conf_file_content = utils.load_conf(default_conf_file) else: default_conf_file_content = "" print(f"* Checking default conf file {default_conf_file}: {default_conf_file_exists}") print(json.dumps(default_conf_file_content , indent =2)) if (utils.input_radio_choice("Generate new conf file? (y/n): ",["y","n"]) == "y"): conf_file_json = generate_conf_file(default_conf_file) config = conf_file_json utils.save_config_to_file(conf_file_json , default_conf_file) else: if (os.path.exists(default_conf_file)): config = utils.load_conf(default_conf_file) else: print("Default conf file \"" + default_conf_file + "\" Not found!") exit() #deloy Dynamotable print("\n*** Configuring Dynamo_table ***") table_name = config['global_conf']['dynamo_table'] #print(f"Checking Dynamo table: {table_name}: {check_dynamo_table(table_name)}") deploy_dynamo_table(table_name) #configure realtime print("\n*** Configuring real_time scanning lambda ***") if (utils.input_radio_choice("Configure mamba_dlp realtime? (y/n): ",["y","n"]) == "y"): for aws_account in config['global_conf']['aws_accounts']: cfn_bucket = input("Enter bucket name to be used for Cloudformation template: ") function_arn = deploy_realtime_function(aws_account , cfn_bucket , table_name) deploy_realtime(aws_account , function_arn)
def wrbt_confirm(name, url): 'confirm a peering request' import wrbt request = wrbt.decode(url) conf = utils.load_conf(CJDROUTE_CONF, CJDROUTE_BIN) host = utils.get_ip() port = conf['interfaces']['UDPInterface'][0]['bind'].split(':')[1] publicKey = conf['publicKey'] password = utils.generate_key(31) # TODO: authorize yield wrbt.confirm(request, (host, port), publicKey, password)
def peer_remove(user): 'unpeer a node' if '/' in user: yield 'nope' exit(1) path = os.path.join(YRD_PEERS, user) if os.path.exists(path): os.unlink(path) else: yield 'user not found' conf = utils.load_conf(CJDROUTE_CONF, CJDROUTE_BIN) c = cjdns.connect(password=conf['admin']['password']) c.removePassword(user) c.disconnect()
def __init__(self, conf_name): self.job_list = { "running":{}, "done":{}, "pending":{} } self.start_url_info_dict = utils.load_conf(['../uniform/start_url_info_new.conf','../uniform/start_url_info_soufun.conf']) self.start_url_job_dict = self.load_conf(conf_name) self.job_pending_list = [] self.job_list['running'] = self.gen_job_list(self.start_url_job_dict) self.job_list_db = { 'pending': Table('job_list', 'pending', 'job_id'), 'running': Table('job_list', 'running', 'job_id'), 'done': Table('job_list', 'done', 'job_id') } self.max_slots = 10
def addr(): 'show infos about your node' conf = utils.load_conf(CJDROUTE_CONF, CJDROUTE_BIN) c = cjdns.connect(password=conf['admin']['password']) res = c.nodeForAddr()['result'] table = list(c.dumpTable()) yield 'addr\t\t' + res['bestParent']['ip'] yield 'key\t\t' + res['key'] yield 'version\t\tv' + str(res['protocolVersion']) yield '' yield 'links\t\t' + str(res['linkCount']) yield 'known routes\t' + str(len(table)) c.disconnect()
def uplinks(ip, trace=False): 'show uplinks of a node' conf = utils.load_conf(CJDROUTE_CONF, CJDROUTE_BIN) c = cjdns.connect(password=conf['admin']['password']) nodestore = list(c.dumpTable()) result = c.nodeForAddr(ip)['result'] for i in range(result['linkCount']): link = c.getLink(ip, i) if link and 'child' in link['result']: child = link['result']['child'] route = utils.grep_ns(nodestore, child) version = utils.get_version(route) path = utils.get_path(route) yield '%s %s v%d' % (child, path, version) else: yield ('-') c.disconnect()
def lambda_handler(event, context): logger = logging.getLogger() logger.setLevel(logging.INFO) conf_file = "conf/mamba_dlp.conf" config = utils.load_conf(conf_file) #Obtain local account ID client = boto3.client("sts") aws_account = client.get_caller_identity()["Account"] #Read event for record in event['Records']: bucket = record['s3']['bucket']['name'] key = record['s3']['object']['key'] object_id = aws_account + ":" + bucket + ":" + key #build object to scan object_to_scan = {'objects': []} object_to_scan['objects'].append({ 'object_id': object_id, 'object_type': 's3', 'aws_account': aws_account, 'bucket': bucket, 'key': key }) print(json.dumps(object_to_scan, sort_keys=True, indent=2)) #run scan state = state_object.sensitive_data(config['global_conf']['dynamo_table']) sesitive_data = scan_single_object(config, state, json.dumps(object_to_scan)) action = actions.action(config) action_response = action.initiate(sesitive_data) print("Sensitive Data Found:") print(json.dumps(sesitive_data, sort_keys=True, indent=2)) return
def ping(ip, count=0, switch=False): 'ping a node' conf = utils.load_conf(CJDROUTE_CONF, CJDROUTE_BIN) c = cjdns.connect(password=conf['admin']['password']) ping = c.switchPing if switch else c.routerPing for _ in xrange(count) if count else itertools.repeat(None): try: resp = ping(ip) except Exception, e: resp = {'error': e} if 'error' in resp: yield 'Error: %s' % resp['error'] elif resp['result'] == 'pong' and switch: yield 'Reply from %s %dms' % (resp['path'], resp['ms']) elif resp['result'] == 'pong': yield 'Reply from %s %dms' % (resp['from'], resp['ms']) elif resp['result'] == 'timeout': yield 'Timeout from %s after %dms' % (ip, resp['ms']) time.sleep(1)
def __init__(self): self.conf = load_conf()
def main(): ####### Main starts here ####### display_banner() #process arguments parser = argparse.ArgumentParser() parser.add_argument('--run', required=1) parser.add_argument('--config_file') parser.add_argument('--object') parser.add_argument('--key') parser.add_argument('--bucket') parser.add_argument('--aws_account') args = parser.parse_args() #load configurations default_conf_file = "code/conf/mamba_dlp.conf" if args.config_file == None: if (os.path.exists(default_conf_file)): conf_file = default_conf_file else: print("Default conf file \"" + default_conf_file + "\" Not found!") conf_file_json = deploy.generate_conf_file() utils.save_config_to_file(conf_file_json, default_conf_file) #set conf_file conf_file = default_conf_file else: conf_file = args.config config = utils.load_conf(conf_file) #Start logic if args.run == "full_scan": state = state_object.sensitive_data( config['global_conf']['dynamo_table']) scan_result = full_scan(config, state) print("Sensitive Data Found:") print(json.dumps(scan_result, sort_keys=True, indent=2)) elif args.run == "scan_object": if args.object != None: #scan single object state = state_object.sensitive_data( config['global_conf']['dynamo_table']) scan_result = scan_single_object(config, state, args.object) print("Sensitive Data Found:") print(json.dumps(scan_result, sort_keys=True, indent=2)) elif args.bucket != None and args.key != None and args.aws_account != None: #Build the object from arguments object_id = args.aws_account + ":" + args.bucket + ":" + args.key object = { "objects": [{ "object_id": object_id, "object_type": "s3", "aws_account": args.aws_account, "bucket": args.bucket, "key": args.key }] } #scan single object state = state_object.sensitive_data( config['global_conf']['dynamo_table']) scan_result = scan_single_object(config, state, json.dumps(object)) print("Sensitive Data Found:") print(json.dumps(scan_result, sort_keys=True, indent=2)) else: print_usage() exit() elif args.run == "configure": deploy.run_configure(default_conf_file) elif args.run == "deploy_realtime": for aws_account in config['global_conf']['aws_accounts']: cfn_bucket = input( "Enter bucket name to be used for Cloudformation template: ") function_arn = deploy.deploy_realtime_function( aws_account, cfn_bucket, config['global_conf']['dynamo_table']) deploy.deploy_realtime(aws_account, function_arn) else: print_usage()
self.mystery_state = VERIFY_TABLE[status] elif VERIFY_TABLE[status] == Light.UNKN: logging.info("status is ❓") self.mystery_state = Light.UNKN elif VERIFY_TABLE[status] != self.state: logging.info("status is 🚫 toggling 💡") self.toggle() self.mystery_state = Light.UNKN else: logging.info("✅ status correct. quieting log messages") set_log_level(logging.WARNING) # XXX this might not be working ... just suck it up and accept the log messages ... could do this via apscheduler.job.pause() but that would require a dance if __name__ == "__main__": dir_path = os.path.dirname(os.path.realpath(__file__)) conf = load_conf(f"{dir_path}/conf.yaml") # create a logging handler that rotates at 3MB handler = logging.handlers.RotatingFileHandler(conf['logfile'], backupCount=3, maxBytes=3*1000*1000) logging.basicConfig(level=logging.INFO, handlers=[handler], format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M') logging.info(f"making LightMachine()") lm = LightMachine(conf) logging.info(f"making BlockingScheduler()") sched = BlockingScheduler({
def __init__(self): self.conf = utils.load_conf() self.logger = utils.logger() self.http = HTTPClient()
from pprint import pprint import sys import os from pprint import pprint import yaml from datemath import datemath, dm import datetime import dateutil.parser import utils from utils import logit from fabric.api import abort, task, run, env, sudo, hide, get, settings from fabric.contrib.console import confirm SHOME = os.path.abspath(os.path.join(os.path.dirname(__file__))) conf = utils.load_conf() env.conf = conf today = datetime.datetime.utcnow().strftime('%Y.%m.%d') def get_conn(): try: access_key = env.account['access_key'] access_secret = env.account['access_secret'] conn = boto.ec2.connect_to_region(env.region, aws_access_key_id=access_key, aws_secret_access_key=access_secret) env.snsc = boto.sns.connect_to_region(env.region, aws_access_key_id=access_key, aws_secret_access_key=access_secret) return conn except Exception as e: error_msg = 'get_conn(): unable to connect to ec2 region, reason: {}'.format(e) logit('exception', error_msg)
if __name__ == "__main__": import argparse import getpass from ansibleutils.ansibleutils import load_ansible_inventory parser = argparse.ArgumentParser() parser.add_argument('-c', '--config', default='config.yml', action='store', help='config file path') args = parser.parse_args() conf = utils.load_conf(args.config) if "user" in conf["neo4j"].keys() and "password" not in conf["neo4j"].keys( ): conf["neo4j"]["password"] = getpass.getpass( prompt='Enter Neo4j password') if "use_vault" in conf["ansible"].keys( ) and "password" not in conf["ansible"].keys(): conf["ansible"]["password"] = getpass.getpass( prompt='Enter Ansible Vault password') inventory = load_ansible_inventory(conf["ansible"]["playbook_dir"], conf["ansible"]["inventory_path"], conf["ansible"].get("password", None))