def run(self): for k, v in self.objs.items(): if k.startswith("_"): continue for cls, col in CODENAMES: if v["_class"] == cls and v[col] is not None: v[col] = encode_codename(v[col]) if v[col] == "": logger.critical( "The dump contains an instance of %s whose %s " "field contains an invalid codename: `%s'.", cls, col, v[col]) sys.exit(1) for cls, col in FILENAMES: if v["_class"] == cls and v[col] is not None: v[col] = encode_codename(v[col], extra="%.") if v[col] in {"", ".", ".."}: logger.critical( "The dump contains an instance of %s whose %s " "field contains an invalid filename: `%s'.", cls, col, v[col]) sys.exit(1) for cls, col in FILENAME_DICTS: if v["_class"] == cls and v[col] is not None: v[col] = {encode_codename(k, extra="%."): v for k, v in v[col].items()} for k in v[col]: if k in {"", ".", ".."}: logger.critical( "The dump contains an instance of %s whose %s " "field contains an invalid filename: `%s'.", cls, col, v[col]) sys.exit(1) for cls, col in DIGESTS: if v["_class"] == cls and v[col] is not None: if not re.match("^([0-9a-f]{40}|x)$", v[col]): logger.critical( "The dump contains an instance of %s whose %s " "field contains an invalid SHA-1 digest: `%s'.", cls, col, v[col]) sys.exit(1) for cls, col in IP_ADDRESSES: if v["_class"] == cls and v[col] is not None: v[col] = list(network.strip() for network in v[col].split()) for network in v[col]: try: ipaddress.ip_network(network) except ValueError: logger.critical( "The dump contains an instance of %s whose %s " "field contains an invalid IPv4 address: `%s'.", cls, col, v[col]) sys.exit(1) return self.objs
def validate_dcos_overlay_network(dcos_overlay_network): assert isinstance(dcos_overlay_network, str) try: overlay_network = json.loads(dcos_overlay_network) except json.JSONDecodeError as ex: assert False, "Must be a valid JSON . Errors while parsing at position {}: {}".format(ex.pos, ex.msg) # Check the VTEP IP, VTEP MAC keys are present in the overlay # configuration assert 'vtep_subnet' in overlay_network.keys(), ( 'Missing "vtep_subnet" in overlay configuration {}'.format(overlay_network)) try: ipaddress.ip_network(overlay_network['vtep_subnet']) except ValueError as ex: assert False, ( "Incorrect value for vtep_subnet. Only IPv4 " "values are allowed: {}".format(ex)) assert 'vtep_mac_oui' in overlay_network.keys(), ( 'Missing "vtep_mac_oui" in overlay configuration {}'.format(overlay_network)) assert 'overlays' in overlay_network.keys(), ( 'Missing "overlays" in overlay configuration {}'.format(overlay_network)) assert len(overlay_network['overlays']) > 0, ( 'We need at least one overlay network configuration {}'.format(overlay_network)) for overlay in overlay_network['overlays']: if (len(overlay['name']) > 13): assert False, "Overlay name cannot exceed 13 characters:{}".format(overlay['name']) try: ipaddress.ip_network(overlay['subnet']) except ValueError as ex: assert False, ( "Incorrect value for vtep_subnet. Only IPv4 " "values are allowed: {}".format(ex))
def validate_dcos_ucr_default_bridge_subnet(dcos_ucr_default_bridge_subnet): try: ipaddress.ip_network(dcos_ucr_default_bridge_subnet) except ValueError as ex: raise AssertionError( "Incorrect value for dcos_ucr_default_bridge_subnet: {}." " Only IPv4 subnets are allowed".format(dcos_ucr_default_bridge_subnet)) from ex
def test_run_instance_with_subnet_boto3(): client = boto3.client('ec2', region_name='eu-central-1') ip_networks = [ (ipaddress.ip_network('10.0.0.0/16'), ipaddress.ip_network('10.0.99.0/24')), (ipaddress.ip_network('192.168.42.0/24'), ipaddress.ip_network('192.168.42.0/25')) ] # Tests instances are created with the correct IPs for vpc_cidr, subnet_cidr in ip_networks: resp = client.create_vpc( CidrBlock=str(vpc_cidr), AmazonProvidedIpv6CidrBlock=False, DryRun=False, InstanceTenancy='default' ) vpc_id = resp['Vpc']['VpcId'] resp = client.create_subnet( CidrBlock=str(subnet_cidr), VpcId=vpc_id ) subnet_id = resp['Subnet']['SubnetId'] resp = client.run_instances( ImageId='ami-1234abcd', MaxCount=1, MinCount=1, SubnetId=subnet_id ) instance = resp['Instances'][0] instance['SubnetId'].should.equal(subnet_id) priv_ipv4 = ipaddress.ip_address(six.text_type(instance['PrivateIpAddress'])) subnet_cidr.should.contain(priv_ipv4)
def validate_dcos_overlay_network(dcos_overlay_network): try: overlay_network = json.loads(dcos_overlay_network) except ValueError as ex: raise AssertionError("Provided input was not valid JSON: {}".format(dcos_overlay_network)) from ex # Check the VTEP IP, VTEP MAC keys are present in the overlay # configuration assert 'vtep_subnet' in overlay_network.keys(), ( 'Missing "vtep_subnet" in overlay configuration {}'.format(overlay_network)) try: ipaddress.ip_network(overlay_network['vtep_subnet']) except ValueError as ex: raise AssertionError( "Incorrect value for vtep_subnet: {}." " Only IPv4 values are allowed".format(overlay_network['vtep_subnet'])) from ex assert 'vtep_mac_oui' in overlay_network.keys(), ( 'Missing "vtep_mac_oui" in overlay configuration {}'.format(overlay_network)) assert 'overlays' in overlay_network.keys(), ( 'Missing "overlays" in overlay configuration {}'.format(overlay_network)) assert len(overlay_network['overlays']) > 0, ( 'We need at least one overlay network configuration {}'.format(overlay_network)) for overlay in overlay_network['overlays']: assert (len(overlay['name']) <= 13), ( "Overlay name cannot exceed 13 characters:{}".format(overlay['name'])) try: ipaddress.ip_network(overlay['subnet']) except ValueError as ex: raise AssertionError( "Incorrect value for vtep_subnet {}." " Only IPv4 values are allowed".format(overlay['subnet'])) from ex
def assert_ip_address(self, section, key, test_default=False): """ Test that key, of type 'ip address', is properly handled by the object. """ # Test default value, if specified. if test_default: self.assert_default(section, key) # Test valid values. self.assert_success(section, key, 3221225985, expected_value=ipaddress.ip_address(3221225985)) self.assert_success(section, key, "192.0.2.1", expected_value=ipaddress.ip_address("192.0.2.1")) self.assert_success(section, key, ipaddress.ip_address("192.0.2.1"), ipaddress.ip_address("192.0.2.1")) self.assert_success( section, key, 42540766411282592856903984951653826561, expected_value=ipaddress.ip_address(42540766411282592856903984951653826561), ) self.assert_success(section, key, "2001:db8::1", ipaddress.ip_address("2001:db8::1")) self.assert_success(section, key, ipaddress.ip_address("2001:db8::1"), ipaddress.ip_address("2001:db8::1")) # Test invalid values. self.assert_fail(section, key, "", util.GetError) self.assert_fail(section, key, -1, util.GetError) self.assert_fail(section, key, util.random_string(32), util.GetError) self.assert_fail(section, key, ipaddress.ip_network("198.51.100.0/24"), util.GetError) self.assert_fail(section, key, ipaddress.ip_network("2001:db8::/32"), util.GetError)
def process(self): event = self.receive_message() ipfields = [field for field in self.ipfields[self.parameters.srcdest] \ if event.contains(field)] asnfields = [field for field in self.asnfields[self.parameters.srcdest] \ if event.contains(field)] # Skip blacklisted IPs or ranges for iprange in self.parameters.ipexcept: for ipf in (event.value(ipfield) for ipfield in ipfields): if ip_address(ipf) in ip_network(iprange): self.logger.debug("Blacklisted IP %s, ignoring", ipf) self.acknowledge_message() return # ASNs for asn in self.parameters.asns: for asnf in (event.value(asnfield) for asnfield in asnfields): if str(asnf) == asn: self.logger.debug("ASN %s matched", asnf) self.send_message(event) self.acknowledge_message() return # IPs for iprange in self.parameters.ipranges: for ipf in (event.value(ipfield) for ipfield in ipfields): if ip_address(ipf) in ip_network(iprange): self.logger.debug("IP %s matched", ipf) self.send_message(event) self.acknowledge_message() return self.acknowledge_message()
def cluster_network(self): """ All storage nodes must have the same cluster network. The cluster network must be valid. """ same_network = {} for node in self.data.keys(): if ('roles' in self.data[node] and 'storage' in self.data[node]['roles']): log.debug("cluster_network: {} {}".format(node, self.data[node]['cluster_network'])) same_network[self.data[node]['cluster_network']] = "" try: ipaddress.ip_network(u'{}'.format(self.data[node]['cluster_network'])) except ValueError as err: msg = "{} on {} is not valid".format(self.data[node]['cluster_network'], node) if 'cluster_network' in self.errors: self.errors['cluster_network'].append(msg) else: self.errors['cluster_network'] = [ msg ] if len(same_network.keys()) > 1: msg = "Different cluster networks {}".format(same_network.keys()) if 'cluster_network' in self.errors: self.errors['cluster_network'].append(msg) else: self.errors['cluster_network'] = [ msg ] if not 'cluster_network' in self.errors: self.passed['cluster_network'] = "valid"
def is_valid_network(network): try: ipaddress.ip_network(unicode(network), strict=False) except: return False return True
def __init__(self, instance_number): """ :param instance_number: the controller instance number :param net: the subnet allocated for the fibbing nodes """ self.leader = False self.instance = instance_number self.name = 'c%s' % instance_number self.nodes = {} self.bridge = Bridge('br0', self.name) self.root = None net = ip_network(CFG.get(DEFAULTSECT, 'base_net')) controller_prefix = CFG.getint(DEFAULTSECT, 'controller_prefixlen') host_prefix = net.max_prefixlen - controller_prefix controller_base = (int(net.network_address) + (instance_number << host_prefix)) controller_net = ip_address(controller_base) self.net = ip_network('%s/%s' % (controller_net, controller_prefix)) self.graph_thread = Thread(target=self.infer_graph, name="Graph inference thread") self.json_proxy = SJMPServer(hostname=CFG.get(DEFAULTSECT, 'json_hostname'), port=CFG.getint(DEFAULTSECT, 'json_port'), invoke=self.proxy_connected, target=FakeNodeProxyImplem(self)) self.json_thread = Thread(target=self.json_proxy.communicate) # Used to assign unique router-id to each node self.next_id = 1 self.links = [] # The fibbing routes self.routes = {} self.route_mappings = {}
def public_network(self): """ All nodes must have the same public network. The public network must be valid. """ same_network = {} for node in self.data.keys(): log.debug("public_network: {} {}".format(node, self.data[node]['public_network'])) same_network[self.data[node]['public_network']] = "" try: ipaddress.ip_network(u'{}'.format(self.data[node]['public_network'])) except ValueError as err: msg = "{} on {} is not valid".format(self.data[node]['public_network'], node) if 'public_network' in self.errors: self.errors['public_network'].append(msg) else: self.errors['public_network'] = [ msg ] if len(same_network.keys()) > 1: msg = "Different public networks {}".format(same_network.keys()) if 'public_network' in self.errors: self.errors['public_network'].append(msg) else: self.errors['public_network'] = [ msg ] if not 'public_network' in self.errors: self.passed['public_network'] = "valid"
def test_ip_autologin_with_ambiguous_addresses(self): # If two users have the same IP address neither of them can autologin. self.contest.ip_autologin = True self.contest.allow_password_authentication = False self.participation.ip = [ipaddress.ip_network("10.0.0.1/32")] other_user = self.add_user() other_participation = self.add_participation( contest=self.contest, user=other_user, ip=[ipaddress.ip_network("10.0.0.1/32")]) self.assertFailure() # In fact, they don't even fall back to cookie-based authentication. self.contest.allow_password_authentication = True self.assertFailure() # But if IP autologin is disabled altogether, ambiguous IP addresses # are disregarded and cookie-based authentication kicks in. self.contest.ip_autologin = False self.assertSuccessAndCookieRefreshed() # Ambiguous IP addresses are allowed if only one of them is non-hidden # (and hidden users are barred from logging in). self.contest.ip_autologin = True self.contest.block_hidden_participations = True other_participation.hidden = True self.assertSuccessAndCookieCleared() # But not if hidden users aren't blocked. self.contest.block_hidden_participations = False self.assertFailure()
def test_routes_by_ipv_both(self): """Tests the routes_by_ipv() and route_count_by_ipv() methods with both IPv4 and IPv6 routes""" vlan_config = { 'routes': [ {'route': {'ip_dst': '10.99.99.0/24', 'ip_gw': '10.0.0.1'}}, {'route': {'ip_dst': '10.99.98.0/24', 'ip_gw': '10.0.0.99'}}, {'route': {'ip_dst': '10.99.97.0/24', 'ip_gw': '10.0.0.99'}}, {'route': {'ip_dst': 'fc00::10:0/112', 'ip_gw': 'fc00::1:1'}}, {'route': {'ip_dst': 'fc00::20:0/112', 'ip_gw': 'fc00::1:99'}} ], } vlan = VLAN(1, 1, vlan_config) self.assertEqual(vlan.routes_by_ipv(4), { ip_network('10.99.99.0/24'): ip_address('10.0.0.1'), ip_network('10.99.98.0/24'): ip_address('10.0.0.99'), ip_network('10.99.97.0/24'): ip_address('10.0.0.99'), }) self.assertEqual(vlan.routes_by_ipv(6), { ip_network('fc00::10:0/112'): ip_address('fc00::1:1'), ip_network('fc00::20:0/112'): ip_address('fc00::1:99'), }) self.assertEqual(vlan.route_count_by_ipv(4), 3) self.assertEqual(vlan.route_count_by_ipv(6), 2)
def _create_subnet(self, subnet_name, tenant_id, network_id): base_cidr = netaddr.IPNetwork(self.project_network_cidr) mask_bits = self.project_network_mask_bits for subnet_cidr in base_cidr.subnet(mask_bits): try: if self.network_resources: resp_body = self.subnets_admin_client.\ create_subnet( network_id=network_id, cidr=str(subnet_cidr), name=subnet_name, tenant_id=tenant_id, enable_dhcp=self.network_resources['dhcp'], ip_version=(ipaddress.ip_network( six.text_type(subnet_cidr)).version)) else: resp_body = self.subnets_admin_client.\ create_subnet(network_id=network_id, cidr=str(subnet_cidr), name=subnet_name, tenant_id=tenant_id, ip_version=(ipaddress.ip_network( six.text_type(subnet_cidr)).version)) break except lib_exc.BadRequest as e: if 'overlaps with another subnet' not in str(e): raise else: message = 'Available CIDR for subnet creation could not be found' raise Exception(message) return resp_body['subnet']
def discover_present_prefixes(iface): """List all prefixes seen by the machine. Calls rdisc6 to aquire a list a prefixes and routes announced on the network. Args: iface: name of the interface to ask for routes Returns: A tuple (prefixes, routes) of IPv6 networks reachable by this node """ prefixes = [] routes = [] with subprocess.Popen(["rdisc6", iface], stdout=subprocess.PIPE) as proc: tio = io.TextIOWrapper(proc.stdout) line = tio.readline() while len(line) > 0: parts = re.match("^ (Prefix|Route)\s*:\s*([^\s]*)", line) if parts != None: if parts.group(1) == "Prefix": prefixes.append(ipaddress.ip_network(parts.group(2))) else: routes.append(ipaddress.ip_network(parts.group(2))) line = tio.readline() proc.wait() if proc.returncode != 0 and proc.returncode != 2: raise DiscoveryFailed return (prefixes, routes)
def test_next_network_bug_issues_247(site): """ Test for a bug where Network.get_next-network() returns an address that is in the assigned state, but not any that are in the allocated state Ref: https://github.com/dropbox/nsot/issues/247 """ objects = load_json('model_tests/data/networks.json') [models.Network.objects.create(site=site, **n) for n in objects] parent = models.Network.objects.get_by_address('10.20.0.0/16') # Create some /32s to push our expected /32 up for i in range(1, 5): models.Network.objects.create( site=site, cidr='10.20.0.{}/32'.format(i), state=models.Network.ASSIGNED ) expected_32 = [ipaddress.ip_network('10.20.0.5/32')] next_32 = parent.get_next_network(prefix_length=32) assert next_32 == expected_32 expected_31 = [ipaddress.ip_network('10.20.0.6/31')] next_31 = parent.get_next_network(prefix_length=31) assert next_31 == expected_31 expected_24 = [ipaddress.ip_network('10.20.30.0/24')] next_24 = parent.get_next_network(prefix_length=24) assert next_24 == expected_24
def calculate_cidr(start_address, end_address): """ The function to calculate a CIDR range(s) from a start and end IP address. Args: start_address: The starting IP address in string format. end_address: The ending IP address in string format. Returns: List: A list of calculated CIDR ranges. """ tmp_addrs = [] try: tmp_addrs.extend(summarize_address_range( ip_address(start_address), ip_address(end_address))) except (KeyError, ValueError, TypeError): # pragma: no cover try: tmp_addrs.extend(summarize_address_range( ip_network(start_address).network_address, ip_network(end_address).network_address)) except AttributeError: # pragma: no cover tmp_addrs.extend(summarize_address_range( ip_network(start_address).ip, ip_network(end_address).ip)) return [i.__str__() for i in collapse_addresses(tmp_addrs)]
def derive_nets(df, v4_prefix=16, v6_prefix=64): """ add columns to the given dataframe for uniform networks given a prefix. allows aggregation by network. modifies the dataframe in place and returns it. """ try: df["sourceIPv4Network"] = df["sourceIPv4Address"].map( lambda x: ip_network(str(x)+"/"+str(v4_prefix), strict=False)) except KeyError: pass try: df["destinationIPv4Network"] = df["destinationIPv4Address"].map( lambda x: ip_network(str(x)+"/"+str(v4_prefix), strict=False)) except KeyError: pass try: df["sourceIPv6Network"] = df["sourceIPv6Address"].map( lambda x: ip_network(str(x)+"/"+str(v6_prefix), strict=False)) except KeyError: pass try: df["destinationIPv6Network"] = df["destinationIPv6Address"].map( lambda x: ip_network(str(x)+"/"+str(v6_prefix), strict=False)) except KeyError: pass return df
def test_next_network_bug_issues_224(site): """ Test for bug where Network.get_next_network() returns the wrong result. Ref: https://github.com/dropbox/nsot/issues/224 """ objects = load_json('model_tests/data/networks.json') [models.Network.objects.create(site=site, **n) for n in objects] # Get the parent parent = models.Network.objects.get_by_address('10.20.0.0/16') # We're expecting that the next /31 is going to be first_cidr first_cidr = '10.20.0.0/31' expected = [ipaddress.ip_network(first_cidr)] first_31 = parent.get_next_network(prefix_length=31) assert first_31 == expected # Create the first /31 and get the next one. models.Network.objects.create(cidr=first_cidr, site=site) next_cidr = '10.20.0.2/31' expected = [ipaddress.ip_network(next_cidr)] next_31 = parent.get_next_network(prefix_length=31) assert next_31 == expected
def validate_ip(value, required=True): if not required and not value: return # will raise a ValueError ipaddress.ip_network(six.text_type(value), strict=False) return value
def get_iptables(self): """ Get the list of iptables commands to use (iptables / ip6tables). """ # Check if the rule explicitly specifies the family. if self.family == "ipv4": return ["iptables"] elif self.family == "ipv6": return ["ip6tables"] # If the rule specifies the source address, use that as a hint. if self.src_ip is not None: addr = ipaddress.ip_network(str(self.src_ip), strict=False) if addr.version == 4: return ["iptables"] elif addr.version == 6: return ["ip6tables"] # If the rule specifies the destination address, use that as a hint. if self.dest_ip is not None: addr = ipaddress.ip_network(str(self.dest_ip), strict=False) if addr.version == 4: return ["iptables"] elif addr.version == 6: return ["ip6tables"] # Default is to generate rules for both IPv4 and IPv6. return ["iptables", "ip6tables"]
def to_ip_network(destination_address, destination_mask=None): """ Transform various input patterns to a consistent output pattern. Accepts one of: (IPv4Network, None) (IPv6Network, None) ("1.2.3.4", N) ("1.2.3.4/N", None) ("1.2.3.4", "255.255.255.0") ("1.2.3.4/255.255.255.0", None) Returns: An IPv4Network or IPv6Network object. """ if destination_mask is None: if isinstance(destination_address, str): assert '/' in destination_address return ip_network(destination_address, strict=False) elif isinstance(destination_address, _BaseNetwork): return destination_address else: raise ValueError('Expected IPv4Network or IPv6Network or string, got %s' % type(destination_address)) else: network = ip_network(u'%s/%s' % (destination_address, destination_mask), strict=False) assert count(str(network), '/') == 1 return network
def process(self, data, whitelist=[]): wl = pytricia.PyTricia() for x in PERM_WHITELIST: wl[x] = True for y in whitelist: y = str(_normalize(y['indicator'])) if '/' not in y: # weird bug work-around it'll insert 172.16.1.60 with a /0 at the end?? y = '{}/32'.format(y) wl[y] = True # this could be done with generators... rv = [] for y in data: if tag_contains_whitelist(y['tags']): continue y['indicator'] = _normalize(y['indicator']) try: if sys.version_info.major < 3: ipaddress.ip_network(unicode(y['indicator'])) else: ipaddress.ip_network(y['indicator']) if str(y['indicator']) not in wl: rv.append(y) except ValueError as e: print(e) print('skipping invalid address: %s' % y['indicator']) return rv
def full11br(): ip4_pfx = ipaddress.ip_network('20.0.0.0/16') ip6_dst = ipaddress.ip_network('bbbb::/32') psid_len = 0 for i in range(ip4_pfx.num_addresses): print("map add domain ip4-pfx " + str(ip4_pfx[i]) + "/32 ip6-pfx " + str(ip6_dst[i]) + "/128 ip6-shared-src cccc:bbbb::1", "ea-bits-len 0 psid-offset 0 psid-len 0")
def setUp(self): super().setUp() methods = [ ('trust', None), ('reject', None), ('md5', 'correctpassword'), ('password', 'correctpassword'), ] self.cluster.reset_hba() create_script = [] for method, password in methods: create_script.append( 'CREATE ROLE {}_user WITH LOGIN{};'.format( method, ' PASSWORD {!r}'.format(password) if password else '' ) ) self.cluster.add_hba_entry( type='local', address=ipaddress.ip_network('127.0.0.0/24'), database='postgres', user='******'.format(method), auth_method=method) self.cluster.add_hba_entry( type='host', address=ipaddress.ip_network('127.0.0.0/24'), database='postgres', user='******'.format(method), auth_method=method) # Put hba changes into effect self.cluster.reload() create_script = '\n'.join(create_script) self.loop.run_until_complete(self.con.execute(create_script))
def test_run_instance_mapped_public_ipv4(): client = boto3.client('ec2', region_name='eu-central-1') vpc_cidr = ipaddress.ip_network('192.168.42.0/24') subnet_cidr = ipaddress.ip_network('192.168.42.0/25') resp = client.create_vpc( CidrBlock=str(vpc_cidr), AmazonProvidedIpv6CidrBlock=False, DryRun=False, InstanceTenancy='default' ) vpc_id = resp['Vpc']['VpcId'] resp = client.create_subnet( CidrBlock=str(subnet_cidr), VpcId=vpc_id ) subnet_id = resp['Subnet']['SubnetId'] client.modify_subnet_attribute( SubnetId=subnet_id, MapPublicIpOnLaunch={'Value': True} ) resp = client.run_instances( ImageId='ami-1234abcd', MaxCount=1, MinCount=1, SubnetId=subnet_id ) instance = resp['Instances'][0] instance.should.contain('PublicDnsName') instance.should.contain('PublicIpAddress') len(instance['PublicDnsName']).should.be.greater_than(0) len(instance['PublicIpAddress']).should.be.greater_than(0)
def run(self, interactive): net = self._net dc_subnets = tuple(ip_network(u'10.1.0.0/16').subnets(4)) dc_subnet = dc_subnets[0] dmz_subnet = ip_network(u'203.0.113.0/24') branch_subnet = ip_network(u'10.2.0.0/24') dc_dist = self.add_router('r0', *dc_subnets) dmz_fw = self.add_router('dmz', dmz_subnet) branch_dist = self.add_router('r1', branch_subnet) external_scanner = net.addHost( 'external', cls=ScannerNode, broker_url=EXTERNAL_BROKER_URL, ip='198.51.100.2/30') dc_to_branch = TCLink( dc_dist, branch_dist, intfName1='dc-to-branch', intfName2='branch-to-dc', delay=50000, bw=1.544) dc_to_branch.intf1.setIP('192.168.0.1/30') dc_dist.cmd('ip route add {} via 192.168.0.2'.format(str(branch_subnet))) dc_to_branch.intf2.setIP('192.168.0.2/30') branch_dist.setDefaultRoute('via 192.168.0.1') dc_to_external = Link( dc_dist, external_scanner, intfName1='dc-to-external', intfName2='external-to-dc') dc_to_external.intf1.setIP(INTERNET_IP, prefixLen=30) dc_dist.setDefaultRoute('dc-to-external') dc_to_external.intf2.setIP('198.51.100.2', prefixLen=30) external_scanner.setDefaultRoute('external-to-dc') dc_dist.cmd('iptables -t nat -A PREROUTING -i dc-to-external -p tcp -m tcp --dport 5672 -j DNAT --to 10.1.0.10') dc_to_dmz = Link( dc_dist, dmz_fw, intfName1='dc-to-dmz', intfName2='dmz-to-dc') dc_to_dmz.intf1.setIP('192.168.0.5/30') dc_dist.cmd('ip route add {} via 192.168.0.6'.format(str(dmz_subnet))) dc_to_dmz.intf2.setIP('192.168.0.6/30') dmz_fw.setDefaultRoute('via 192.168.0.5') dmz_fw.cmd('iptables -A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT') dmz_fw.cmd('iptables -A FORWARD -d 10.1.0.10 -m tcp -p tcp --dport amqp --syn -j ACCEPT') dmz_fw.cmd('iptables -A FORWARD -j DROP') self.add_scanner('scanner1', '10.1.0.254/20') self.add_scanner('scanner2', '10.2.0.254/24') self.add_scanner('dmzscanner', '203.0.113.254/24') console = net.addHost( 'console', ip=CONSOLE_IP, defaultRoute='via 10.1.0.1', inNamespace=False) net.addLink(console, self._switches[dc_subnet]) if interactive: net.interact() else: net.run(_block_indefinitely)
def test_ipv4_public(self): from faker.providers.internet import Provider provider = Provider(self.generator) for _ in range(99): address = provider.ipv4_public() address = six.text_type(address) assert len(address) >= 7 assert len(address) <= 15 assert not ip_address(address).is_private, address assert ( re.compile(r'^(\d{1,3}\.){3}\d{1,3}$').search(address)) for _ in range(99): address = provider.ipv4_public(network=True) address = six.text_type(address) assert len(address) >= 9 assert len(address) <= 18 # Hack around ipaddress module # As 192.0.0.0 is net addr of many 192.0.0.0/* nets # ipaddress considers them as private if ip_network(address).network_address != ip_address('192.0.0.0'): assert not ip_network(address)[0].is_private, address assert ( re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$').search(address))
def test_run_instance_with_specified_private_ipv4(): client = boto3.client('ec2', region_name='eu-central-1') vpc_cidr = ipaddress.ip_network('192.168.42.0/24') subnet_cidr = ipaddress.ip_network('192.168.42.0/25') resp = client.create_vpc( CidrBlock=str(vpc_cidr), AmazonProvidedIpv6CidrBlock=False, DryRun=False, InstanceTenancy='default' ) vpc_id = resp['Vpc']['VpcId'] resp = client.create_subnet( CidrBlock=str(subnet_cidr), VpcId=vpc_id ) subnet_id = resp['Subnet']['SubnetId'] resp = client.run_instances( ImageId='ami-1234abcd', MaxCount=1, MinCount=1, SubnetId=subnet_id, PrivateIpAddress='192.168.42.5' ) instance = resp['Instances'][0] instance['SubnetId'].should.equal(subnet_id) instance['PrivateIpAddress'].should.equal('192.168.42.5')
def query(ip): raw_result = Cymru.__ip_query(ip) results = map(Cymru.__ip_query_parse, raw_result) result = None for res in results: if result is None: result = res elif 'network' not in res: continue elif 'network' not in result: result = res else: ips_a = ipaddress.ip_network(res['network']).num_addresses ips_b = ipaddress.ip_network(result['network']).num_addresses if ips_a < ips_b: result = res if not result: return if "asn" in result: raw_result = Cymru.__asn_query(result['asn']) extra_info = Cymru.__asn_query_parse(raw_result) result.update(extra_info) return result
def getNumberHoneypotsAndAlerts(timeframe, clientDomain, type): ''' retrieves destinct number of honeypots from index''' type = 0 # all honeypot types numHoneypotDaemons = 0 numHoneypotDaemonsOld = 0 numHoneypots = 0 numHoneypotsOld = 0 # pre 17.10 internalDocker = 0 listoutput = "" if clientDomain: listoutput += "\n ------ detailed community honeypot statistics ------\n\n" try: # find all 17.10 T-Pots res = es.search( index=esindex, body={ "query": { "bool": { "must": [{ "term": { "clientDomain": clientDomain } }, { "exists": { "field": "hostname.keyword" } }], "must_not": [{ "term": { "hostname.keyword": "undefined" } }], "filter": { "range": { "recievedTime": { "gte": "now-" + str(timeframe) + "m" } } } } }, "size": 0, "aggs": { "hostnames": { "terms": { "field": "hostname.keyword", "size": 100000 }, "aggs": { "peerIdents": { "terms": { "field": "peerIdent.keyword" } } } } } }) for i in range(len(res['aggregations']['hostnames']['buckets'])): #print(res['aggregations']['hostnames']['buckets'][i]['key'] + str(res['aggregations']['hostnames']['buckets'][i]['peerIdents']['buckets'])) for j in res['aggregations']['hostnames']['buckets'][i][ 'peerIdents']['buckets']: listoutput += ( "[" + res['aggregations']['hostnames']['buckets'][i]['key'] + "]" + "[" + j['key'] + "] : " + str(j['doc_count']) + "\n") numHoneypotDaemons += len( res['aggregations']['hostnames']['buckets'][i] ['peerIdents']['buckets']) numHoneypots = len(res['aggregations']['hostnames']['buckets']) print("COMMUNITY >= 17.10 --> " + str(numHoneypots) + " T-Pot installations with a total of " + str(numHoneypotDaemons) + " honeypot daemons, accounting for " + str(res['hits']['total']) + " alerts.") # Find older Honeypots via dest_ip res2 = es.search( index=esindex, body={ "query": { "bool": { "must": [{ "term": { "clientDomain": clientDomain } }, { "exists": { "field": "hostname.keyword" } }, { "term": { "hostname.keyword": "undefined" } }], "must_not": [{ "term": { "hostname.keyword": "" } }], "filter": { "range": { "recievedTime": { "gte": "now-" + str(timeframe) + "m" } } } } }, "size": 0, "aggs": { "hostnames": { "terms": { "field": "targetEntryIp", "size": 100000 }, "aggs": { "peerIdents": { "terms": { "field": "peerIdent.keyword" } } } } } }) for i in range(len(res2['aggregations']['hostnames']['buckets'])): for j in res2['aggregations']['hostnames']['buckets'][i][ 'peerIdents']['buckets']: listoutput += ( "[" + res2['aggregations']['hostnames']['buckets'][i]['key'] + "]" + "[" + j['key'] + "] : " + str(j['doc_count']) + "\n") if ipaddress.ip_address( res2['aggregations']['hostnames']['buckets'][i] ['key']) in ipaddress.ip_network('172.16.0.0/12'): #print("interne docker ip addresse : " + res2['aggregations']['hostnames']['buckets'][i]['key']+ " ---> " + str(res2['aggregations']['hostnames']['buckets'][i]['peerIdents']['buckets'])) internalDocker += len( res2['aggregations']['hostnames']['buckets'][i] ['peerIdents']['buckets']) numHoneypotDaemonsOld += len( res2['aggregations']['hostnames']['buckets'][i] ['peerIdents']['buckets']) else: #print(res2['aggregations']['hostnames']['buckets'][i]['key'] + " ---> " + str(res2['aggregations']['hostnames']['buckets'][i]['peerIdents']['buckets'])) numHoneypotDaemonsOld += len( res2['aggregations']['hostnames']['buckets'][i] ['peerIdents']['buckets']) numHoneypotsOld = len(res2['aggregations']['hostnames']['buckets']) print( "COMMUNITY < 17.10 --> " + str(numHoneypotsOld) + " T-Pot installations with a total of " + str(numHoneypotDaemonsOld) + " honeypot daemons, accounting for " + str(res2['hits']['total']) + " alerts - including " + str(internalDocker) + " hosts with internal docker ip (might be counted only once)") # total sum print("COMMUNITY TOTAL : " + str(numHoneypots + numHoneypotsOld) + " T-Pot installations, with a total of " + str(numHoneypotDaemons + numHoneypotDaemonsOld) + " honeypot daemons, accounting for " + str(res['hits']['total'] + res2['hits']['total']) + " alerts.") if args.verbose: print(listoutput) return numHoneypots + numHoneypotsOld, numHoneypotDaemons + numHoneypotDaemonsOld, res[ 'hits']['total'] + res2['hits']['total'] except ElasticsearchException as err: print('ElasticSearch error: %s' % err) else: listoutput += "\n ------ detailed DTAG honeypot statistics ------\n\n" try: res = es.search(index=esindex, body={ "query": { "range": { "recievedTime": { "gte": "now-" + str(timeframe) + "m" } } }, "aggs": { "communityfilter": { "filter": { "term": { "clientDomain": clientDomain } }, "aggs": { "hostnames": { "terms": { "field": "peerIdent.keyword", "size": 100000 } } } } }, "size": 0 }) print("DTAG TOTAL --> " + str( len(res['aggregations']['communityfilter']['hostnames'] ['buckets'])) + " honeypot daemons, accounting for " + str(res['aggregations']['communityfilter']['doc_count']) + " alerts.") for i in res['aggregations']['communityfilter']['hostnames'][ 'buckets']: listoutput += "[DTAG][" + i['key'] + "] : " + str( i['doc_count']) + "\n" if args.verbose: print(listoutput) return "unknown", len( res['aggregations']['communityfilter']['hostnames']['buckets'] ), res['aggregations']['communityfilter']['doc_count'] except ElasticsearchException as err: print('ElasticSearch error: %s' % err) return False
g += "// skip point-to-point; if numbered, same edge is a transit network. #provemewrong\n" elif link_type == 2: # transit network g += "\"Router {0}\" -> \"DR {1}\" [label=\"{2}\"];\n".format( link_state_id, link_id, metric) elif link_type == 3: # stub network g += "\"Router {0}\" -> \"{1}/{2}\" [label=\"{3}\"];\n".format( link_state_id, link_id, link_data, metric) else: assert False # this should never happen elif header[2] == 2: # network LSA # https://tools.ietf.org/html/rfc2328#page-210 num_attached = (len(lsa) - 24) // 4 assert(num_attached > 0) netmask = ipaddress.IPv4Address( struct.unpack_from("!I", lsa, position)[0]) position += 4 g += "\"DR {0}\" [label=\"{1}\" shape=rect];\n".format( link_state_id, ipaddress.ip_network(str(link_state_id) + "/" + str(netmask), False)) for i in range(num_attached): attached_router = ipaddress.IPv4Address( struct.unpack_from("!I", lsa, position)[0]) position += 4 g += "\"DR {0}\" -> \"Router {1}\";\n".format( link_state_id, attached_router) g += "}" print(g)
def scan(self, scan_name, address_range, target_port, version, request_hexdump, packets_per_second, cron_str=''): # set task status started self.update_state(state=states.STARTED, meta={"task_name": self.name, "scan_name": scan_name}) logger.debug('Request: {0!r}'.format(self.request)) logger.info('Scan "%s" has started\n' % scan_name) logger.info(('Scan Name: {0}\n' 'Address Range: {1}\n' 'UDP Port: {2}\n' 'IP Version:{3}\n' 'Hex Dump: {4}\n').format(scan_name, address_range, target_port, version, request_hexdump)) if version not in [4, 6]: raise ValueError("Invalid IP Address Version %s specified " % version) request_size = len(request_hexdump)/2.0; if request_size == 0: # avoid division by zero in BAF calculation request_size = 1 zmap_udp_probe = "udp" if version == 4 else "ipv6_udp" amps = dict() cmd = ('{7} ' '-M {0} ' '-p {1} ' '--probe-args=hex:{2} ' '-f {3} ' '-r {4} ' '--output-module={5} ' '--output-filter={6} ').format(zmap_udp_probe, str(target_port), request_hexdump, 'saddr,daddr,ipid,ttl,sport,dport,udp_pkt_size,data,classification', str(packets_per_second), 'csv', '"success = 1"', ZMAP_COMMAND) if version == 4: addresses = ' '.join(address_range) cmd += addresses else: if IPV6_SRC_ADDR is None: raise Exception("IPV6_SRC_ADDR is not set in configuration") cmd += '--ipv6-source-ip=%s ' % IPV6_SRC_ADDR cmd += '--ipv6-target-file=- ' process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, stdin=PIPE) logger.info("ZMap Command: %s" % cmd) stdout = '' stderr = '' if version == 4: stdout, stderr = process.communicate() else: addresses = "" for net in address_range: for host in ip_network(net): addresses += "%s\n" % str(host) stdout, stderr = process.communicate(input=addresses.encode()) if process.returncode != 0: raise Exception(stderr.decode()) stdout = stdout.decode() logger.info(stdout) logger.info(stderr.decode()) scanned_addresses_list = [int(host) for net in address_range for host in ip_network(net).hosts()] scanned_addresses_list.sort() stdout = stdout.split('\n') for row in stdout[1:]: if not row: continue amplifier, daddr, ipid, ttl, sport, dport, response_size, response_data, classification = row.split(',') if classification != "udp": continue if amplifier not in amps: amps[amplifier] = dict() amps[amplifier]["responses"] = list() amps[amplifier]["total_response_size"] = 0 amps[amplifier]["destination_address"] = daddr amps[amplifier]["private_address"] = ip_address(amplifier).is_private amps[amplifier]["unsolicited_response"] = is_unsolicited_response(scanned_addresses_list, int(ip_address(amplifier))) # subtract udp header size (8 bytes) response_size = int(response_size) - 8 amps[amplifier]["total_response_size"] += response_size amps[amplifier]["amplification_factor"] = round(amps[amplifier]["total_response_size"]/request_size, 2) response = dict() response["response_ipid"] = int(ipid) response["response_ttl"] = int(ttl) response["response_sport"] = int(sport) response["response_dport"] = int(dport) response["response_hex_data"] = response_data response["response_size"] = response_size amps[amplifier]["responses"].append(response) # filters the amps dict for hosts with BAF greater than 1 amplifiers = { k:v for k,v in amps.items() if v["amplification_factor"]>1 } result= dict() result["scan_name"] = scan_name result["request_size"] = request_size result["active_amplifiers_count"] = len(amplifiers) result["amplifiers"] = amplifiers return result
def check_gcp(self, ip_addr): self.utility.print_message(NOTE, 'Check GCP IP range.') self.utility.write_log( 20, '[In] Check GCP IP range [{}].'.format(self.file_name)) # Get Domain in SPF record using nslookup command. raw_domains = '' nslookup_cmd = self.gcp_nslookup_cmd + ' ' + self.gcp_content_srv + ' ' + self.gcp_content_ip try: self.utility.write_log(20, 'Execute : {}'.format(nslookup_cmd)) raw_domains = subprocess.check_output(nslookup_cmd, shell=True) except Exception as e: msg = 'Executing {} is failure.'.format(nslookup_cmd) self.utility.print_exception(e, msg) self.utility.write_log(30, msg) self.utility.write_log( 20, '[Out] Check GCP IP range [{}].'.format(self.file_name)) return False # Set character code. char_code = '' if os.name == 'nt': char_code = 'shift-jis' else: char_code = 'utf-8' # Get Network addresses from each domain. gcp_domain_list = re.findall(self.gcp_get_domain_regex, raw_domains.decode(char_code)) gcp_nw_addres = [] for gcp_domain in gcp_domain_list: nslookup_cmd = self.gcp_nslookup_cmd + ' ' + gcp_domain + ' ' + self.gcp_content_ip try: self.utility.write_log(20, 'Execute : {}'.format(nslookup_cmd)) raw_ip = subprocess.check_output(nslookup_cmd, shell=True) except Exception as e: msg = 'Executing {} is failure.'.format(nslookup_cmd) self.utility.print_exception(e, msg) self.utility.write_log(30, msg) continue gcp_nwaddres_from_one_domain = re.findall( self.gcp_get_nwaddr_regex, raw_ip.decode(char_code)) for nwaddr in gcp_nwaddres_from_one_domain: gcp_nw_addres.append(nwaddr) # Check all gcp ip_address. target_ip = ipaddress.ip_address(ip_addr) for gcp_nw_addr in gcp_nw_addres: if target_ip in ipaddress.ip_network(gcp_nw_addr): msg = 'Detect : service=GCP target={} prefix={}'.format( target_ip, gcp_nw_addr) self.utility.print_message(OK, msg) self.utility.write_log(20, msg) self.utility.write_log( 20, '[Out] Check GCP IP range [{}].'.format(self.file_name)) return True else: self.utility.print_message( FAIL, 'Not include : service=GCP target={} prefix={}'.format( target_ip, gcp_nw_addr)) self.utility.write_log( 20, '[Out] Check GCP IP range [{}].'.format(self.file_name)) return False
def unique_addresses(data=None, file_path=None): """ The function to search an input string and/or file, extracting and counting IPv4/IPv6 addresses/networks. Summarizes ports with sub-counts. If both a string and file_path are provided, it will process them both. Args: data (:obj:`str`): The data to process. file_path (:obj:`str`): An optional file path to process. Returns: dict: The addresses/networks mapped to ports and counts: :: { '1.2.3.4' (dict) - Each address or network found is a dictionary: { 'count' (int) - Total number of times seen. 'ports' (dict) - Mapping of port numbers as keys and the number of times seen for this ip as values. } } Raises: ValueError: Arguments provided are invalid. """ if not data and not file_path: raise ValueError('No data or file path provided.') ret = {} base = {'count': 0, 'ports': {}} file_data = None if file_path: log.debug('Opening file for unique address analysis: {0}'.format( str(file_path))) f = open(str(file_path), 'r') # Read the file. file_data = f.read() pattern = re.compile(str(IP_REGEX), re.DOTALL) # Check if there is data. log.debug('Analyzing input/file data'.format(str(file_path))) for input_data in [data, file_data]: if input_data: # Search for IPs. for match in pattern.finditer(input_data): is_net = False port = None try: found = match.group('ip') if '.' in found and ':' in found: split = found.split(':') ip_or_net = split[0] port = split[1] elif '[' in found: split = found.split(']:') ip_or_net = split[0][1:] port = split[1] elif '/' in found: is_net = True ip_or_net = found else: ip_or_net = found if is_net: ip_obj = ip_network(ip_or_net) else: ip_obj = ip_address(ip_or_net) obj_str = ip_obj.__str__() if obj_str not in ret.keys(): ret[obj_str] = copy.deepcopy(base) ret[obj_str]['count'] += 1 if port: try: ret[obj_str]['ports'][str(port)] += 1 except KeyError: ret[obj_str]['ports'][str(port)] = 1 except (KeyError, ValueError): continue return ret
#!/usr/bin/env python3 import ipaddress net6 = ipaddress.ip_network('12:3456:78:90ab:cd:ef01:23:30/125') for ip in net6: print(ip)
def is_ip_address_in_network(addr: str, ip_network) -> bool: for ipn in ip_network: if ipaddress.ip_address(addr) in ipaddress.ip_network(ipn): return True return False
print('Error in command') return result except: print('>>>netmiko_return_error', device['ip']) try: vendor = sys.argv[1] ipaddr = sys.argv[2] command = '''enable lldp\n\r config lldp ports 9-10 admin_status tx_and_rx\n\r config lldp ports 9-10 basic_tlvs port_description system_name system_description enable\n\r save\n\r y\r''' print('Vendor:', vendor, '\nipaddr', ipaddress.ip_network(ipaddr), '\nCommand:', command) except: print('''неверные аргументы - первый аргумент Vendor : cisco_like - второй аргумент диапазон адресов, если нужен один адрес , то указываем /32 маску - третий аргумент 'show version' / 'show switch' ''') subnet = ipaddress.ip_network(ipaddr) #подсеть для изучения #default_param={'device_type':'cisco_ios_telnet','username':'******','password':'******'}#параметры для подключения к оборудованию default_param = { 'device_type': 'cisco_ios_telnet', 'username': '******', 'password': '******' } #параметры для подключения к оборудованию
if __name__ == "__main__": try: # argment parser used to handle the inputs given to the script parser = argparse.ArgumentParser( description= "Prints a table with network information for the given IP address or network. Compatible with IPv4 & IPv6, with or with out CIDR notation." ) parser.add_argument( "ip_str", help="IPv4/IPv6 Address or Network. Can be in CIDR notation.") args = parser.parse_args() network = ipaddress.ip_network(args.ip_str, strict=False) net_version = str(network.version) network_ip = str(network.network_address) broadcast = str(network.broadcast_address) netmask = str(network.netmask) hostmask = str(network.hostmask) hosts = network.hosts() total_hosts = 0 for host in hosts: total_hosts += 1 table = texttable.Texttable() column_header = [ "Network IP", "Broadcast IP", "Subnet Mask", "Hostmask",
def is_secure_origin(self, location): # type: (Link) -> bool # Determine if this url used a secure transport mechanism parsed = urllib.parse.urlparse(str(location)) origin_protocol, origin_host, origin_port = ( parsed.scheme, parsed.hostname, parsed.port, ) # The protocol to use to see if the protocol matches. # Don't count the repository type as part of the protocol: in # cases such as "git+ssh", only use "ssh". (I.e., Only verify against # the last scheme.) origin_protocol = origin_protocol.rsplit('+', 1)[-1] # Determine if our origin is a secure origin by looking through our # hardcoded list of secure origins, as well as any additional ones # configured on this PackageFinder instance. for secure_origin in self.iter_secure_origins(): secure_protocol, secure_host, secure_port = secure_origin if origin_protocol != secure_protocol and secure_protocol != "*": continue try: addr = ipaddress.ip_address( None if origin_host is None else six.ensure_text(origin_host) ) network = ipaddress.ip_network( six.ensure_text(secure_host) ) except ValueError: # We don't have both a valid address or a valid network, so # we'll check this origin against hostnames. if ( origin_host and origin_host.lower() != secure_host.lower() and secure_host != "*" ): continue else: # We have a valid address and network, so see if the address # is contained within the network. if addr not in network: continue # Check to see if the port matches. if ( origin_port != secure_port and secure_port != "*" and secure_port is not None ): continue # If we've gotten here, then this origin matches the current # secure origin and we should return True return True # If we've gotten to this point, then the origin isn't secure and we # will not accept it as a valid location to search. We will however # log a warning that we are ignoring it. logger.warning( "The repository located at %s is not a trusted or secure host and " "is being ignored. If this repository is available via HTTPS we " "recommend you use HTTPS instead, otherwise you may silence " "this warning and allow it anyway with '--trusted-host %s'.", origin_host, origin_host, ) return False
def run(self, func, **kwargs): dbc = self.mm.db.cursor() if func == "list": dbc.execute("SELECT * FROM networks;") results = dbc.fetchall() return None, { "rows": results, "columns": ["ID", "Range", "Description", "Switch", 'Is Hop Network'] } elif func == "remove_network": perror, _ = self.validate_params(self.__FUNCS__[func], kwargs) if perror is not None: return perror, None net_id = kwargs['id'] dbc.execute("SELECT * FROM networks WHERE net_id=?", (net_id, )) result = dbc.fetchone() if result is None: return "Network does not exist", None dbc.execute("DELETE FROM networks WHERE net_id=?", (net_id, )) self.mm.db.commit() switch = result[3] if switch != "": # Ensure the switch exists try: subprocess.check_output([ "/usr/bin/sudo", "/usr/bin/ovs-vsctl", "br-exists", switch ]) except subprocess.CalledProcessError: return "Switch {} does not exit".format(switch), None try: subprocess.check_output([ "/usr/bin/sudo", "/usr/bin/ovs-vsctl", "del-br", switch ]) except subprocess.CalledProcessError: return "Could not delete switch {}".format(switch), None return None, True elif func == "add_hop_network": perror, _ = self.validate_params(self.__FUNCS__[func], kwargs) if perror is not None: return perror, None new_network = kwargs['net_addr'] switch = kwargs['switch'] description = "HOP NETWORK: " + kwargs['description'] dbc.execute("SELECT * FROM networks WHERE switch_name=?", (switch, )) if dbc.fetchone(): return "Switch of that name already exists", None if validate.is_ipnetwork(new_network): # Check if the network already exists dbc.execute("SELECT * FROM networks;") results = dbc.fetchall() new_network_obj = ipaddress.ip_network(new_network) for network in results: network_obj = ipaddress.ip_network(network[1]) if new_network_obj.overlaps(network_obj): return "{} network is already part of network {}".format( new_network, str(new_network_obj)), None # Insert our new network dbc.execute( 'INSERT INTO networks (net_address, net_desc, switch_name, is_hop_network) VALUES (?, ?, ?, ?)', (new_network, description, switch, 1)) self.mm.db.commit() if switch == "": return "Switch name is blank", None # Ensure the switch exists try: subprocess.check_output([ "/usr/bin/sudo", "/usr/bin/ovs-vsctl", "br-exists", switch ]) except: try: subprocess.check_output([ "/usr/bin/sudo", "/usr/bin/ovs-vsctl", "add-br", switch ]) except: return "Failed to create OVS bridge", None try: subprocess.check_output([ "/usr/bin/sudo", "/sbin/ip", 'link', 'set', switch, 'up' ]) except subprocess.CalledProcessError: return "Failed to set hop switch to up", None return None, True else: "Invalid network", None elif func == "add_network": perror, _ = self.validate_params(self.__FUNCS__['add_network'], kwargs) if perror is not None: return perror, None new_network = kwargs['net_addr'] switch = kwargs['switch'] description = kwargs['description'] if switch != "": dbc.execute("SELECT * FROM networks WHERE switch_name=?", (switch, )) if dbc.fetchone(): return "Switch of that name already exists", None if validate.is_ipnetwork(new_network): # Check if the network already exists dbc.execute("SELECT * FROM networks;") results = dbc.fetchall() new_network_obj = ipaddress.ip_network(new_network) for network in results: network_obj = ipaddress.ip_network(network[1]) if new_network_obj.overlaps(network_obj): return "{} network is already part of network {}".format( new_network, str(new_network_obj)), None # Insert our new network dbc.execute( 'INSERT INTO networks (net_address, net_desc, switch_name, is_hop_network) VALUES (?, ?, ?, ?)', (new_network, description, switch, 0)) self.mm.db.commit() network_id = dbc.lastrowid # A blank switch means we don't want one if switch != "": # Ensure the switch exists try: subprocess.check_output([ "/usr/bin/sudo", "/usr/bin/ovs-vsctl", "br-exists", switch ]) except subprocess.CalledProcessError: try: subprocess.check_output([ "/usr/bin/sudo", "/usr/bin/ovs-vsctl", "add-br", switch ]) except subprocess.CalledProcessError: return "Failed to create OVS bridge", None self._set_switch_ip( switch, str(list(new_network_obj.hosts())[0]) + "/" + str(new_network_obj.prefixlen)) return None, network_id else: return "Invalid network address", None elif func == "get_network_switch": pass elif func == "get_network_by_switch": perror, _ = self.validate_params(self.__FUNCS__[func], kwargs) if perror is not None: return perror, None switch = kwargs['switch'] dbc.execute( "SELECT net_id, net_address, net_desc, switch_name, is_hop_network FROM networks WHERE switch_name=?", (switch, )) result = dbc.fetchone() if not result: return "Switch does not exist", None return None, { "net_id": result[0], "net_address": result[1], "net_desc": result[2], "is_hop": result[3] == 1 } elif func == "get_ip_switch": perror, _ = self.validate_params(self.__FUNCS__['get_ip_switch'], kwargs) if perror is not None: return perror, None ip = kwargs['ip_addr'] dbc.execute("SELECT * FROM networks") results = dbc.fetchall() for network in results: if validate.is_ip_in_network(ip, network[1]): return None, network[3] return "Could not find network", None elif func == "get_ip_network": perror, _ = self.validate_params(self.__FUNCS__['get_ip_network'], kwargs) if perror is not None: return perror, None ip = kwargs['ip_addr'] dbc.execute("SELECT * FROM networks") results = dbc.fetchall() for network in results: if validate.is_ip_in_network(ip, network[1]): return None, ipaddress.ip_network(network[1]) return None, True elif func == "is_hop_network_by_switch": perror, _ = self.validate_params( self.__FUNCS__['is_hop_network_by_switch'], kwargs) if perror is not None: return perror, None switch = kwargs['switch'] dbc.execute( "SELECT is_hop_network FROM networks WHERE switch_name=?", (switch, )) result = dbc.fetchone() if not result: return "Switch does not exist", None return None, result[0] == 1 else: return "Invalid function '{}.{}'".format(self.__SHORTNAME__, func), None
def create_multiregional_vpcs(self, cidr_block="10.0.0.0/16"): if self.vpcs_data is not None: raise PermissionError( "the experiment vpc is already created: {}".format( self.vpcs_data)) for region in self.list_of_regions: # check if the resource are available in all the regions before starting the experiment self.cloud_utils.check_if_it_is_possible_to_create_a_new_vpc_in_the_region( region=region, vpc_needed=1) self.cloud_utils.check_if_maximum_it_possible_to_run_instances_in_the_region( region=region, instances_needed=1) subnetwork_pool_generator = ipaddress.ip_network(cidr_block).subnets( new_prefix=24) experiment_id = self.cloud_utils.generate_experiment_id() vpcs_data = {region: dict() for region in self.list_of_regions} vpcs_data["cidr_block"] = cidr_block vpcs_data["experiment_id"] = experiment_id for region in self.list_of_regions: subnet_pool = str(next(subnetwork_pool_generator)) vpc_id = self.cloud_utils.create_vpc(vpc_name=experiment_id, region=region, cidr_block=subnet_pool) self.cloud_utils.modify_EnableDnsSupport(vpc_id=vpc_id, region=region, value=True) self.cloud_utils.modify_EnableDnsHostnames(vpc_id=vpc_id, region=region, value=True) internet_gateway_id = self.cloud_utils.create_internet_gateway( region=region) self.cloud_utils.attach_internet_gateway_to_vpc( vpc_id=vpc_id, region=region, internet_gateway_id=internet_gateway_id) public_route_table_id = self.cloud_utils.create_route_table( vpc_id=vpc_id, region=region, table_name=experiment_id) security_group_id = self.cloud_utils.create_security_group( vpc_id=vpc_id, region=region, security_group_name=experiment_id, description=experiment_id) self.cloud_utils.authorize_security_group_traffic( region=region, security_group_id=security_group_id, ip_permissions=IP_PERMISSION, directions=["ingress"]) self.cloud_utils.add_route(region=region, route_table_id=public_route_table_id, gateway_id=internet_gateway_id, destination_cidr_block='0.0.0.0/0') az = self.az_mapping[region] public_subnet_id = self.cloud_utils.create_subnet( vpc_id=vpc_id, region=region, az=az, subnet_name="Public Subnet", cidr_block=subnet_pool, route_table_id=public_route_table_id) self.cloud_utils.modify_MapPublicIpOnLaunch( subnet_id=public_subnet_id, region=region, value=True) vpcs_data[region] = [{ "vpc_id": vpc_id, "internet_gateway_id": internet_gateway_id, "public_route_table_id": public_route_table_id, "security_group_id": security_group_id, "availability_zone": [az], "public_subnet": [public_subnet_id] }] if self.network_optimized: self.enable_network_optimized() self.vpcs_data = vpcs_data return vpcs_data
def parse_dns_response(ip_packet, ts): # Check if it is in the allowed or banned IP lists clientIP = socket.inet_ntoa(ip_packet.dst) cip_object = ipaddress.ip_network(clientIP) allowed = False for ip in allowed_ips: if is_subnet_of(cip_object, ip): allowed = True break if (not allowed): return for ip in banned_ips: if is_subnet_of(cip_object, ip): return try: dns = dpkt.dns.DNS(ip_packet.data.data) except: return answers = dns.an if len(answers) <= 0: return domain = answers[0].name domain_name = domain.split('.') for t in range(0, 310, 10): # Parser limitations parser_test = True if (len(domain_name) > 4): parser_test = False continue for part in domain_name: if (len(part) > 15): parser_test = False break if (parser_test == False): continue for d in known_domains: if (matchDomain(d, domain)): for rr in answers: if (rr.type != 1): continue if (rr.type == 1): #DNS.A entry = knownlistDicts_timeout[t][d] knownlistDicts_timeout[t][d][ 0] = knownlistDicts_timeout[t][d][0] + 1 serverIP = socket.inet_ntoa(rr.rdata) key = clientIP + serverIP netassayTables_timeout[t][key] = [d, ts] break break # Parser limitations if (len(domain_name) > 4): return for part in domain_name: if (len(part) > 15): return for d in known_domains: if (matchDomain(d, domain)): for rr in answers: if (rr.type != 1): continue if (rr.type == 1): #DNS.A entry = unlimitedKnownDict[d] unlimitedKnownDict[d][0] = unlimitedKnownDict[d][0] + 1 serverIP = socket.inet_ntoa(rr.rdata) key = clientIP + serverIP unlimitedNetTable[key] = d break break for u in range(0, 550, 50): modulo = int((2**MEM) / STAGES) for d in known_domains: if (matchDomain(d, domain)): for rr in answers: if (rr.type != 1): continue if (rr.type == 1): #DNS.A entry = knownlistDicts_stages[u][d] knownlistDicts_stages[u][d][ 0] = knownlistDicts_stages[u][d][0] + 1 serverIP = socket.inet_ntoa(rr.rdata) serverIP32 = np.uint64( int.from_bytes(socket.inet_aton(serverIP), byteorder='big')) clientIP32 = np.uint64( int.from_bytes(socket.inet_aton(clientIP), byteorder='big')) #serverIP32 = int.from_bytes(socket.inet_aton(serverIP), byteorder='big') #clientIP32 = int.from_bytes(socket.inet_aton(clientIP), byteorder='big') salts = [ np.uint64(134140211), np.uint64(187182238), np.uint64(187238), np.uint64(1853238), np.uint64(1828), np.uint64(12238), np.uint64(72134), np.uint64(152428), np.uint64(164314534), np.uint64(223823) ] key = clientIP + serverIP for z in range(0, 8): if modulo > 0: hashz = (zlib.crc32( np.uint64(serverIP32 + clientIP32 + salts[z])) & 0xffffffff) % modulo #hashz = hash_function(serverIP32, clientIP32, salts[z]) % modulo else: hashz = 0 if (not hashz in usedHashes[u][z]): usedHashes[u][z][hashz] = [ts, key, domain] elif (ts - usedHashes[u][z][hashz][0] > u): # timestamp expires netassayTables_stages[u][z].pop( usedHashes[u][z][hashz][1]) usedHashes[u][z][hashz] = [ts, key, domain] elif (usedHashes[u][z][hashz][1] == key ): # update timestamp for existing entry usedHashes[u][z][hashz] = [ts, key, domain] elif (STAGES < z + 2): knownlistDicts_stages[u][d][ 3] = knownlistDicts_stages[u][d][3] + 1 break else: continue netassayTables_stages[u][z][key] = d break break break
from ipify import get_ip from troposphere import ( Base64, ec2, GetAtt, Join, Output, Parameter, Ref, Template, ) ApplicationPort = "3000" PublicCidrIp = str(ip_network(get_ip())) t = Template() t.add_description("Effective DevOps in AWS: HelloWorld web application") t.add_parameter( Parameter( "KeyPair", Description="Name of an existing EC2 KeyPair to SSH", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be the name of an existing EC2 KeyPair.", )) t.add_resource( ec2.SecurityGroup( "SecurityGroup",
def cast_network(s, cur=None): if s is None: return None return ipaddress.ip_network(str(s))
def udp_sender(): ''' Sprays out UDP dragrams with the message chosen to port 65212''' with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sender: for ip in ipaddress.ip_network(SUBNET).hosts(): sender.sendto(bytes(MESSAGE, 'utf8'), (str(ip), 65212))
try: IP_class = ipaddress.ip_address(IP_str) except: continue # una vez que tenemos un objeto de la clase IP, nos quedamos con las IPv4, que no sean PIPA ni loopback if (IP_class.version == 4) and (not IP_class.is_link_local) and (not IP_class.is_loopback) : # vamos a preparar una lista net [ip, mascara, bitmask, cdir] para cada una de las ips validas net.append(info_red[i][ip_so][ip_ip]) # añadimos la ip net.append(info_red[i][ip_so][ip_mk]) # añadimos la mascara ip_prefix_str = '0.0.0.0/'+ info_red[i][ip_so][ip_mk] ip_prefix = ipaddress.IPv4Network(ip_prefix_str).prefixlen # buscar longitud de la mascara net.append(ip_prefix) # añadimos logitud de la mascara IP2= net[0] + '/' + str(net[1]) cdir = ipaddress.ip_network (IP2, strict=False) net.append(cdir) # añadimos CDIR # Ejemplo de un valor de 'net' # ['172.25.32.1', '255.255.240.0', 20, IPv4Network('172.25.32.0/20')] # host mask cdir network ips.append(net) # Ejemplo de un valor de ips # [['172.25.32.1', '255.255.240.0', 20, IPv4Network('172.25.32.0/20')], # ['172.22.224.1', '255.255.240.0', 20, IPv4Network('172.22.224.0/20')], # ['172.20.176.1', '255.255.240.0', 20, IPv4Network('172.20.176.0/20')], # ['192.168.1.120', '255.255.255.0', 24, IPv4Network('192.168.1.0/24')], # ['172.22.16.1', '255.255.240.0', 20, IPv4Network('172.22.16.0/20')]] print ("jose angel - @jabaselga") print ("Mi Sistema Operativo: {}".format (SO))
# parse the command line argument and open the file specified if __name__ == '__main__': if len(argv) != 5: print( 'usage: python netassay_python3_p4sim.py knownlist.txt allowed_dns_dst.txt banned_dns_dst.txt' ) exit(-1) # Parse allowed IP and banned IP files allowed_ip_file = open(argv[3], 'r') allowed_ip_list = allowed_ip_file.read().split() allowed_ip_file.close() for ip in allowed_ip_list: allowed_ips.append(ipaddress.ip_network(ip)) banned_ip_file = open(argv[4], 'r') banned_ip_list = banned_ip_file.read().split() banned_ip_file.close() for ip in banned_ip_list: banned_ips.append(ipaddress.ip_network(ip)) # Create knownlist knownlist = open(argv[2], 'r') known_domains = knownlist.read().split() knownlist.close() for d in known_domains: unlimitedKnownDict[d] = [0, 0, 0, 0, 0, 0]
def netmask(self): # ip = IPv4Network(u'%s/%s' % (self.ip, self.mask), strict=False) ip = ipaddress.ip_network(u'%s/%s' % (self.ip, self.mask), strict=False) return '%s' % ip.netmask
CSS_FILES = CSS_FILES(_DEFAULT_CSS_FILES) if SIDEBAR_PANELS is None: SIDEBAR_PANELS = _DEFAULT_SIDEBAR_PANELS elif callable(SIDEBAR_PANELS): SIDEBAR_PANELS = SIDEBAR_PANELS(_DEFAULT_SIDEBAR_PANELS) if ACCOUNT_EXPIRES_DAYS is not None: if ACCOUNT_EXPIRES_NOTIFICATION_DAYS is None: ACCOUNT_EXPIRES_NOTIFICATION_DAYS = ACCOUNT_EXPIRES_DAYS - 7 ACCOUNT_EXPIRES_DAYS = timedelta(days=ACCOUNT_EXPIRES_DAYS) ACCOUNT_EXPIRES_NOTIFICATION_DAYS = timedelta( days=ACCOUNT_EXPIRES_NOTIFICATION_DAYS) SPAM_BLACKLIST = set([ipaddress.ip_network(addr) for addr in SPAM_BLACKLIST]) # set social media text defaults for key, value in _DEFAULT_SOCIAL_MEDIA_TEXTS.items(): if key in SOCIAL_MEDIA_TEXTS: SOCIAL_MEDIA_TEXTS[key].update(value) else: SOCIAL_MEDIA_TEXTS[key] = value # set empty values by default, otherwise we might get VariableLookup issues SOCIAL_MEDIA_TEXTS[key].setdefault('title', '') SOCIAL_MEDIA_TEXTS[key].setdefault('meta_desc', '') # Make sure GPG home directories exist for backend, config in GPG_BACKENDS.items(): if config.get('HOME') and not os.path.exists(config['HOME']):
def main(): global single_target global only_scans_dir global port_scan_profile global heartbeat_interval global nmap global srvname global verbose _init() parser = argparse.ArgumentParser(description='Network reconnaissance tool to port scan and automatically enumerate services found on multiple targets.') parser.add_argument('targets', action='store', help='IP addresses (e.g. 10.0.0.1), CIDR notation (e.g. 10.0.0.1/24), or resolvable hostnames (e.g. foo.bar) to scan.', nargs="*") parser.add_argument('-t', '--targets', action='store', type=str, default='', dest='target_file', help='Read targets from file.') parser.add_argument('-ct', '--concurrent-targets', action='store', metavar='<number>', type=int, default=5, help='The maximum number of target hosts to scan concurrently. Default: %(default)s') parser.add_argument('-cs', '--concurrent-scans', action='store', metavar='<number>', type=int, default=10, help='The maximum number of scans to perform per target host. Default: %(default)s') parser.add_argument('--profile', action='store', default='default', dest='profile_name', help='The port scanning profile to use (defined in port-scan-profiles.toml). Default: %(default)s') parser.add_argument('-o', '--output', action='store', default='results', dest='output_dir', help='The output directory for results. Default: %(default)s') parser.add_argument('--single-target', action='store_true', default=False, help='Only scan a single target. A directory named after the target will not be created. Instead, the directory structure will be created within the output directory. Default: false') parser.add_argument('--only-scans-dir', action='store_true', default=False, help='Only create the "scans" directory for results. Other directories (e.g. exploit, loot, report) will not be created. Default: false') parser.add_argument('--heartbeat', action='store', type=int, default=60, help='Specifies the heartbeat interval (in seconds) for task status messages. Default: %(default)s') nmap_group = parser.add_mutually_exclusive_group() nmap_group.add_argument('--nmap', action='store', default='-vv --reason -Pn', help='Override the {nmap_extra} variable in scans. Default: %(default)s') nmap_group.add_argument('--nmap-append', action='store', default='', help='Append to the default {nmap_extra} variable in scans.') parser.add_argument('-v', '--verbose', action='count', default=0, help='Enable verbose output. Repeat for more verbosity.') parser.add_argument('--disable-sanity-checks', action='store_true', default=False, help='Disable sanity checks that would otherwise prevent the scans from running. Default: false') parser.add_argument('--song', action='store_true', default=False, help='Be a 1337 h4x0r with this one simple trick!') parser.error = lambda s: fail(s[0].upper() + s[1:]) args = parser.parse_args() single_target = args.single_target only_scans_dir = args.only_scans_dir errors = False if args.song == True: threading.Thread(target=playsound, args=('NORADNuclearMissleTones.mp3',), daemon=True).start() #https://github.com/TaylorSMarks/playsound/issues/38 I need to thread the song in the background due to this error. if args.concurrent_targets <= 0: error('Argument -ch/--concurrent-targets: must be at least 1.') errors = True concurrent_scans = args.concurrent_scans if concurrent_scans <= 0: error('Argument -ct/--concurrent-scans: must be at least 1.') errors = True port_scan_profile = args.profile_name found_scan_profile = False for profile in port_scan_profiles_config: if profile == port_scan_profile: found_scan_profile = True for scan in port_scan_profiles_config[profile]: if 'service-detection' not in port_scan_profiles_config[profile][scan]: error('The {profile}.{scan} scan does not have a defined service-detection section. Every scan must at least have a service-detection section defined with a command and a corresponding pattern that extracts the protocol (TCP/UDP), port, and service from the result.') errors = True else: if 'command' not in port_scan_profiles_config[profile][scan]['service-detection']: error('The {profile}.{scan}.service-detection section does not have a command defined. Every service-detection section must have a command and a corresponding pattern that extracts the protocol (TCP/UDP), port, and service from the results.') errors = True else: if '{ports}' in port_scan_profiles_config[profile][scan]['service-detection']['command'] and 'port-scan' not in port_scan_profiles_config[profile][scan]: error('The {profile}.{scan}.service-detection command appears to reference a port list but there is no port-scan section defined in {profile}.{scan}. Define a port-scan section with a command and corresponding pattern that extracts port numbers from the result, or replace the reference with a static list of ports.') errors = True if 'pattern' not in port_scan_profiles_config[profile][scan]['service-detection']: error('The {profile}.{scan}.service-detection section does not have a pattern defined. Every service-detection section must have a command and a corresponding pattern that extracts the protocol (TCP/UDP), port, and service from the results.') errors = True else: if not all(x in port_scan_profiles_config[profile][scan]['service-detection']['pattern'] for x in ['(?P<port>', '(?P<protocol>', '(?P<service>']): error('The {profile}.{scan}.service-detection pattern does not contain one or more of the following matching groups: port, protocol, service. Ensure that all three of these matching groups are defined and capture the relevant data, e.g. (?P<port>\d+)') errors = True if 'port-scan' in port_scan_profiles_config[profile][scan]: if 'command' not in port_scan_profiles_config[profile][scan]['port-scan']: error('The {profile}.{scan}.port-scan section does not have a command defined. Every port-scan section must have a command and a corresponding pattern that extracts the port from the results.') errors = True if 'pattern' not in port_scan_profiles_config[profile][scan]['port-scan']: error('The {profile}.{scan}.port-scan section does not have a pattern defined. Every port-scan section must have a command and a corresponding pattern that extracts the port from the results.') errors = True else: if '(?P<port>' not in port_scan_profiles_config[profile][scan]['port-scan']['pattern']: error('The {profile}.{scan}.port-scan pattern does not contain a port matching group. Ensure that the port matching group is defined and captures the relevant data, e.g. (?P<port>\d+)') errors = True break if not found_scan_profile: error('Argument --profile: must reference a port scan profile defined in {port_scan_profiles_config_file}. No such profile found: {port_scan_profile}') errors = True heartbeat_interval = args.heartbeat nmap = args.nmap if args.nmap_append: nmap += " " + args.nmap_append outdir = args.output_dir srvname = '' verbose = args.verbose raw_targets = args.targets targets = [] if len(args.target_file) > 0: if not os.path.isfile(args.target_file): error('The target file {args.target_file} was not found.') sys.exit(1) try: with open(args.target_file, 'r') as f: lines = f.read() for line in lines.splitlines(): line = line.strip() if line.startswith('#') or len(line) == 0: continue if line not in raw_targets: raw_targets.append(line) except OSError: error('The target file {args.target_file} could not be read.') sys.exit(1) for target in raw_targets: try: ip = str(ipaddress.ip_address(target)) if ip not in targets: targets.append(ip) except ValueError: try: target_range = ipaddress.ip_network(target, strict=False) if not args.disable_sanity_checks and target_range.num_addresses > 256: error(target + ' contains ' + str(target_range.num_addresses) + ' addresses. Check that your CIDR notation is correct. If it is, re-run with the --disable-sanity-checks option to suppress this check.') errors = True else: for ip in target_range.hosts(): ip = str(ip) if ip not in targets: targets.append(ip) except ValueError: try: ip = socket.gethostbyname(target) if target not in targets: targets.append(target) except socket.gaierror: error(target + ' does not appear to be a valid IP address, IP range, or resolvable hostname.') errors = True if len(targets) == 0: error('You must specify at least one target to scan!') errors = True if single_target and len(targets) != 1: error('You cannot provide more than one target when scanning in single-target mode.') sys.exit(1) if not args.disable_sanity_checks and len(targets) > 256: error('A total of ' + str(len(targets)) + ' targets would be scanned. If this is correct, re-run with the --disable-sanity-checks option to suppress this check.') errors = True if errors: sys.exit(1) with ProcessPoolExecutor(max_workers=args.concurrent_targets) as executor: start_time = time.time() futures = [] for address in targets: target = Target(address) futures.append(executor.submit(scan_host, target, concurrent_scans, outdir)) try: for future in as_completed(futures): future.result() except KeyboardInterrupt: for future in futures: future.cancel() executor.shutdown(wait=False) sys.exit(1) elapsed_time = calculate_elapsed_time(start_time) info('{bgreen}Finished scanning all targets in {elapsed_time}!{rst}')
def validatePreferedIpNetworks(self, mngIpNetwork, hostIpNetwork): mngNetwork = ipaddress.ip_network(mngIpNetwork) hostNetwork = ipaddress.ip_network(hostIpNetwork) return mngNetwork.overlaps(hostNetwork)
FILE_OPPIO_AUTH = Path(SUPERVISOR_DATA, "auth.json") FILE_OPPIO_CONFIG = Path(SUPERVISOR_DATA, "config.json") FILE_OPPIO_DISCOVERY = Path(SUPERVISOR_DATA, "discovery.json") FILE_OPPIO_DOCKER = Path(SUPERVISOR_DATA, "docker.json") FILE_OPPIO_OPENPEERPOWER = Path(SUPERVISOR_DATA, "openpeerpower.json") FILE_OPPIO_INGRESS = Path(SUPERVISOR_DATA, "ingress.json") FILE_OPPIO_SERVICES = Path(SUPERVISOR_DATA, "services.json") FILE_OPPIO_UPDATER = Path(SUPERVISOR_DATA, "updater.json") MACHINE_ID = Path("/etc/machine-id") SOCKET_DBUS = Path("/run/dbus/system_bus_socket") SOCKET_DOCKER = Path("/run/docker.sock") RUN_SUPERVISOR_STATE = Path("/run/supervisor") DOCKER_NETWORK = "oppio" DOCKER_NETWORK_MASK = ip_network("172.30.32.0/23") DOCKER_NETWORK_RANGE = ip_network("172.30.33.0/24") DNS_SUFFIX = "local.opp.io" LABEL_ARCH = "io.opp.arch" LABEL_MACHINE = "io.opp.machine" LABEL_TYPE = "io.opp.type" LABEL_VERSION = "io.opp.version" META_ADDON = "addon" META_OPENPEERPOWER = "openpeerpower" META_SUPERVISOR = "supervisor" JSON_DATA = "data" JSON_MESSAGE = "message"
def getLastIP(subnet, mask): return list(ip_network(subnet + "/" + mask, strict=False).hosts())[-1]
def finalize(env, provider_conf, gateway, servers, keyfnc, extra_ips=None): def build_roles(resources, env, servers, keyfnc): result = {} pools = mk_pools(servers, keyfnc) machines = resources["machines"] for desc in machines: flavor = desc["flavor"] nb = desc["number"] roles = get_roles_as_list(desc) nodes = pick_things(pools, flavor, nb) for role in roles: result.setdefault(role, []).extend(nodes) return result # Distribute the machines according to the resource/topology # specifications os_roles = build_roles(provider_conf["resources"], env, servers, keyfnc) extra = {} network_name = provider_conf['network']['name'] if provider_conf['gateway']: gw_floating_ip = [ n for n in gateway.addresses[network_name] if n['OS-EXT-IPS:type'] == 'floating' ] gw_floating_ip = gw_floating_ip[0]['addr'] user = provider_conf.get('user') gw_user = provider_conf.get('gateway_user', user) extra.update({ 'gateway': gw_floating_ip, 'gateway_user': gw_user, 'forward_agent': True }) extra.update({'ansible_become': 'yes'}) # build the enos roles roles = {} for os_role, servers in os_roles.items(): for server in servers: roles.setdefault(os_role, []).append(Host( server.addresses[network_name][0]['addr'], # NOTE(msimonin): the alias is used by ansible and thus # must be an ascii hostname alias=str(server.name), user=provider_conf['user'], extra=extra)) # build the network extra_ips = extra_ips or [] net = ipaddress.ip_network(env['subnet']['cidr']) network = { 'cidr': env['subnet']["cidr"], 'start': str(net[100]), 'end': str(net[-3]), 'extra_ips': extra_ips, 'gateway': env['subnet']["gateway_ip"], 'dns': '8.8.8.8', 'roles': provider_conf["resources"].get( "networks", ["default_network"] ) } return roles, [network]
def in_subnet(validator, value, instance, schema): if not ipaddress.ip_address(instance) in ipaddress.ip_network(value): yield ValidationError("{0} not in subnet {1}".format(instance, value))
#!/usr/bin/env python # encoding: utf-8 # # Copyright (c) 2016 Doug Hellmann. All rights reserved. # Written for https://pymotw.com # """ """ #end_pymotw_header import ipaddress NETWORKS = [ '10.9.0.0/24', 'fdfd:87b5:b475:5e3e::/64', ] for n in NETWORKS: net = ipaddress.ip_network(n) print('{!r}'.format(net)) for i, ip in zip(range(3), net.hosts()): print(ip) print()
def _cidr_notation(value): try: ipaddress.ip_network(value) except ValueError as e: assert False, str(e) return True
def networkscanner(self): if len(self.ip) == 0: network = '192.168.43.1/24' else: network = self.ip + '24' print("scanning plz wait...") nm = nmap.PortScanner() nm.scan(hosts=network, arguments='-sP') host_list = [(x, nm[x]['status']['state']) for x in nm.all_hosts()] hosts = [] for host, status in host_list: print("Host\t{}".format(host)) hosts.append(host) time.sleep(15) time.sleep(5) i = 1 with open("Network.csv", "w", newline="") as file: writer = csv.writer(file) writer.writerow([ "Router ip", "HOST IP", "HOST NAME", "MAC Address", "NetBiosname", "Manufacturer", "OS", "net mask", "subnet", "Ports open" ]) while i < len(hosts): try: print( 'MAC of', hosts[i], "is ", getmac.get_mac_address(ip=hosts[i], network_request=True)) time.sleep(20) n = NetBIOS() nbname = n.queryIPForName(hosts[i]) print(nbname) ip = IPNetwork(str(hosts[i])) print(hosts[i], " has net mask ", ip.netmask) net = ipaddress.ip_network('192.168.43.1/255.255.255.0', strict=False) print(net.network_address) time.sleep(20) ttl_values = { 32: "Windows", 60: "MAC OS", 64: "MAC OS / Linux", 128: "Windows", 255: "Linux 2.4 Kernal" } ans = sr1(IP(dst=str(hosts[i])) / ICMP(), timeout=2, verbose=0) if ans: if ans.ttl in ttl_values: print("Host", hosts[i], "has ", ttl_values.get(ans.ttl)) else: print("TBC") else: print("TBC") for addr in [ getmac.get_mac_address(ip=hosts[i], network_request=True) ]: vendor = requests.get('http://api.macvendors.com/' + addr).text print(hosts[i], "vendor is", vendor) host_name = socket.gethostbyaddr(hosts[i]) print("host ip is", hosts[i], "host name is", host_name[0]) time.sleep(10) ##port scan socket.setdefaulttimeout(0.25) print_lock = threading.Lock() Total_ports_open = [] target = str(hosts[i]) t_IP = socket.gethostbyname(target) print('Starting scan on host: ', t_IP) def portscan(port): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: con = s.connect((t_IP, port)) with print_lock: print(port, 'is open') Total_ports_open.append(port) con.close() except: pass def threader(): while True: worker = q.get() portscan(worker) q.task_done() q = Queue() startTime = time.time() for x in range(100): t = threading.Thread(target=threader) t.daemon = True t.start() for worker in range(1, 5000): q.put(worker) q.join() print('Time taken:', time.time() - startTime) print(Total_ports_open) writer.writerow([ hosts[0], hosts[i], host_name[0], getmac.get_mac_address(ip=hosts[i], network_request=True), n.queryIPForName(hosts[i]), vendor, ttl_values.get(ans.ttl), ip.netmask, net.network_address, Total_ports_open ]) except: pass i += 1
#!/usr/local/bin/python __author__ = '*****@*****.**' import argparse import ipaddress parser = argparse.ArgumentParser(description='This script will output required SVI interface config') parser.add_argument('-n', '--network', help='Network to be used, i.e. 192.168.0.1/24', required=True) parser.add_argument('-v', '--vlan', help='VLAN index to be used, i.e. 201', type=int, required=True) parser.add_argument('-vg', '--vrrp', help='VRRP group to be used, i.e. 41', type=int, required=True) parser.add_argument('-d', '--description', help='Description', type=int, required=True) args = parser.parse_args() print ('Network is: %s' % args.network) print ('Vlan is: %s' % args.vlan) print ('VRRP group is: %s\n\n' % args.vrrp) net4 = ipaddress.ip_network(args.network) vlan = args.vlan vrrp = args.vrrp print("# Primary router:") print("vlan %s" % str(vlan)) print("\tname %s" % str(net4)) def print_int(): print("\ninterface vlan%s" % str(vlan) "\tdescription")