def __init__(self, ip='', port=22, user='******', password='', private_key_file='', debug=False): # Set debug level first if debug: self.debug = True log.basicConfig(level='DEBUG') # create logger log.basicConfig( format='%(asctime)s,%(msecs)3.3d %(levelname)-8s[%(module)-\ 7.7s.%(funcName)-30.30s:%(lineno)5d] %(message)s', datefmt='%Y%m%d:%H:%M:%S', filename='debug.log', level=log.NOTSET) log.debug("Constructor with ip={}, port={}, user={}, password={}, private_key_file={}, debug={}". format(ip, port, user, password, private_key_file, debug)) # public class attributs self.ip = ip self.port = port self.user = user self.password = password self.private_key_file = private_key_file self.timeout = 3 self.moke_exception = '' self.moke_context = '' self.ssh = Ssh(ip=ip, port=port, user=user, password=password, private_key_file=private_key_file, debug=debug)
def __init__(self, host_type='Linux', hypervisor_type='kvm', ip='', port=22, user='******', password='******', private_key_file='', mock=False, debug=0): ''' Constructor ''' # logger log.basicConfig( format= '%(asctime)s,%(msecs)3.3d %(levelname)-8s[%(module)-7.7s.%(funcName)-30.30s:%(lineno)5d] %(message)s', datefmt='%Y%m%d:%H:%M:%S', filename='debug.log', level=log.NOTSET) # Set debug level first if debug: self.debug = True log.basicConfig(level='DEBUG') log.debug( "Enter with host_type={} hypervisor_type={} ip={}, port={}, user={}, password={}, private_key_file={}, debug={}" .format(host_type, hypervisor_type, ip, port, user, password, private_key_file, debug)) # public class attributs self.host_type = host_type self.hypervisor_type = hypervisor_type self.ip = ip self.port = port self.user = user self.password = password self.private_key_file = private_key_file self.mock_context = '' self.ssh = Ssh(ip=ip, port=port, user=user, password=password, private_key_file=private_key_file, debug=debug) # private class attributes self._statistics = {} # Internal representation of statistics self._vms = [] # Internal representation of each VMs self._vms_total = {} # Total VMs statistics self._vms_disks = [] # Internal representation of each VM disks info self._vms_system = [] # Internal representation of each VM systen info self._vms_disks_dict = {} # temp object self._vms_esx_id_map = {} # wid to vm_name mapping self._vms_esx_cpu = {} # dict of vm_id containing nb_cpu self._vms_esx_memory = {} # dict of vm_id containing memory size self._vms_esx_disks = {} # dict of vm_id containing disk size in
def __init__(self, ip='', port=22, user='******', password='******', private_key_file='', traffic_policy='WAN', mock=False, debug=False): ''' Constructor ''' # create logger log.basicConfig( format= '%(asctime)s,%(msecs)3.3d %(levelname)-8s[%(module)-7.7s.%(funcName)-30.30s:%(lineno)5d] %(message)s', datefmt='%Y%m%d:%H:%M:%S', filename='debug.log', level=log.NOTSET) if debug: self.debug = True log.basicConfig(level='DEBUG') log.debug( "Constructor with ip={}, port={}, user={}, password={}, private_key_file={}, traffic_policy={}, debug={}" .format(ip, port, user, password, private_key_file, traffic_policy, debug)) # public attributs self.ip = ip self.port = port self.user = user self.password = password self.traffic_policy = traffic_policy self.moke_exception = '' self.moke_context = '' self.debug = debug self.ssh = Ssh(ip=ip, port=port, user=user, password=password, private_key_file=private_key_file, debug=debug) # private attributs self._config = {} # Internal representation of config
class Fpoc(object): """ main class """ def __init__(self, ip='', port=22, user='******', password='', private_key_file='', debug=False): # Set debug level first if debug: self.debug = True log.basicConfig(level='DEBUG') # create logger log.basicConfig( format='%(asctime)s,%(msecs)3.3d %(levelname)-8s[%(module)-\ 7.7s.%(funcName)-30.30s:%(lineno)5d] %(message)s', datefmt='%Y%m%d:%H:%M:%S', filename='debug.log', level=log.NOTSET) log.debug("Constructor with ip={}, port={}, user={}, password={}, private_key_file={}, debug={}". format(ip, port, user, password, private_key_file, debug)) # public class attributs self.ip = ip self.port = port self.user = user self.password = password self.private_key_file = private_key_file self.timeout = 3 self.moke_exception = '' self.moke_context = '' self.ssh = Ssh(ip=ip, port=port, user=user, password=password, private_key_file=private_key_file, debug=debug) def connect(self): self.ssh.connect() def close(self): if self.ssh: self.ssh.close() # Tracing wrapper on ssh def trace_open(self, filename="tracefile.log"): self.ssh.trace_open(filename=filename) def trace_write(self, line): self.ssh.trace_write(line) def trace_mark(self, mark): self.ssh.trace_mark(mark) def set_poc_link_status(self, device='', link='', status=''): """ Set fortipoc link UP or DOWN for the given device and link Device is the device name in FortiPoc (like 'FGT-1') and link is the port name for the device in FortiPoc """ log.debug("Enter with device={} link={} status={}". format(device, link, status)) # sanity checks if (status != 'up') and (status != 'down'): print("status values can only be 'up' or 'down'") return ("ERROR: status values can only be 'up' or 'down'") if (device == ''): print("device is missing") return("ERROR: device missing") if (link == ''): print("link is required") return("ERROR: link missing") # Send command to FortiPoc if not self.ssh.connected: self.ssh.connect() cmd = "poc link " + status + " " + device + " " + link log.debug("cmd={}".format(cmd)) self.ssh.commands([cmd]) return(self.ssh.output) def get_poc_link_status(self, device=''): """ Returns a json object representing fortipoc link status for given device. Keys are device port name, values are 'UP' or 'DOWN' example of return : { "port1": "UP", "port10": "UP", "port2": "UP", "port3": "UP", "port4": "UP", "port5": "UP", "port6": "UP", "port7": "UP", "port8": "UP", "port9": "UP" } Uses FPOC command '# poc link list' ex : radon-trn-kvm12 # poc link list Clients: eth0 (prt0209720C0104): 02:09:72:0C:01:04 (192.168.0.11/255.255.255.0 STA): ['UP'] eth1 (prt0209720C0202): 02:09:72:0C:02:02 (10.0.1.11/255.255.255.0 STA): ['UP'] Controller: eth0 (prt0209720C010B): 02:09:72:0C:01:0B (192.168.0.253/255.255.255.0 STA): ['UP'] ... """ log.debug("Enter with device={}".format(device)) if not self.ssh.connected: self.ssh.connect() self.ssh.commands(['poc link list']) log.debug("output:{}".format(self.ssh.output)) # our dictionary to return port status as json # the key is the port name return_dic = {} # Parse output and catch our line flag_device = False status = '' for line in self.ssh.output.splitlines(): # if the device is found and line does not start with a space, then # we have hit the next device, time to leave the loop if (flag_device and line[0] != " "): log.debug("end of our device port list - line={}". format(line)) flag_device = False # Raise device_flag when we see our device name if not(flag_device) and (re.search("^("+device+"):", line)): log.debug("found device {} in FPOC returned list". format(device)) flag_device = True # If device is found, catch the line with the port we need # Get port status and feedback in return_dict if (flag_device): log.debug("line:"+line) match_port = re.search("^(?:\s+)(?P<port>.\S+)(?:\s\()", line) if match_port: port = match_port.group('port') # Get port status match_status = re.search("(?:\[')(?P<status>UP|DOWN)(?:'\])$", line) if match_status: status = match_status.group('status') log.debug("extracted port={} status={}". format(port, status)) return_dic[port] = status # Return our dictionary as json object return(json.dumps(return_dic, indent=4, sort_keys=True))
class Fortiswitch(object): """ main class """ def __init__(self, ip='', port=22, user='******', password='', private_key_file='', debug=False): if debug: self.debug = True log.basicConfig(level='DEBUG') log.basicConfig( format='%(asctime)s,%(msecs)3.3d %(levelname)-8s[%(module)-\ 7.7s.%(funcName)-30.30s:%(lineno)5d] %(message)s', datefmt='%Y%m%d:%H:%M:%S', filename='debug.log', level=log.NOTSET) log.debug( "Constructor with ip={}, port={}, user={}, password={}, private_key_file={}, debug={}" .format(ip, port, user, password, private_key_file, debug)) self.ip = ip self.port = port self.user = user self.password = password self.private_key_file = private_key_file self.timeout = 3 self.moke_exception = '' self.moke_context = '' self.ssh = Ssh(ip=ip, port=port, user=user, password=password, private_key_file=private_key_file, debug=debug) def connect(self): self.ssh.connect() def close(self): if self.ssh: self.ssh.close() # Tracing wrapper on ssh def trace_open(self, filename="tracefile.log"): self.ssh.trace_open(filename=filename) def trace_write(self, line): self.ssh.trace_write(line) def trace_mark(self, mark): self.ssh.trace_mark(mark) def set_port_status(self, port='', status=''): """ Set fortiswitch given port UP or DOWN. Using: config switch physical-port edit <port> set status <status> next end """ log.debug("Enter with port={} status={}".format(port, status)) # sanity checks if (status != 'up') and (status != 'down'): print("status values can only be 'up' or 'down'") return ("ERROR: status values can only be 'up' or 'down'") if (port == ''): print("port is missing") return ("ERROR: port missing") if not self.ssh.connected: self.ssh.connect() cmds = ( "config switch physical-port\n", "edit " + port + "\n", "set status " + status + "\n", "next\n", "end\n", ) for cmd in cmds: log.debug("send: {}".format(cmd)) self.run_op_mode_command(cmd) def get_port_status(self, port=''): """ Returns status for given port : 'up' or 'down' Using: 'diag switch physical-port summary <port>' Sample of output: SW10G1-2-D-10 # diagnose switch physical-ports summary port21 Portname Status Tpid Vlan Duplex Speed Flags Discard __________ ______ ____ ____ ______ _____ ____________ _________ port21 down 8100 1021 full 10G , , none Flags: QS(802.1Q) QE(802.1Q-in-Q,external) QI(802.1Q-in-Q,internal) TS(static trunk) TF(forti trunk) TL(lacp trunk); MD(mirror dst) MI(mirror ingress) ME(mirror egress) MB(mirror ingress and egress) CF (Combo Fiber), CC (Combo Copper) LL(LoopBack Local) LR(LoopBack Remote) SW10G1-2-D-10 # """ log.debug("Enter with port={}".format(port)) if (port == ''): print("port is missing") return ("ERROR: port missing") if not self.ssh.connected: self.ssh.connect() cmd = "diagnose switch physical-ports summary " + port + "\n" self.run_op_mode_command(cmd) for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) match = re.search( '^(?:\s+)(?P<port>port\d+)(?:\s+)(?P<status>\S+)(?:\s+)', line) if match: m_port = match.group('port') if port == m_port: m_status = match.group('status') log.debug("found expected port={}, status={}".format( m_port, m_status)) return m_status def run_op_mode_command(self, cmd): """ Use netcontrol shell to send commands """ log.debug("Enter with cmd={}".format(cmd)) self.ssh.shell_send([cmd]) return (self.ssh.output)
class Vm(object): ''' Using logger for debugging, log file named Vm.log' Default user : root Default password : fortinet Default ssh port : 22 If given, the ssh key is prefered over password host_type : Linux (default) or KVM hypervisor_type : kvm (default) or esx ''' def __init__(self, host_type='Linux', hypervisor_type='kvm', ip='', port=22, user='******', password='******', private_key_file='', mock=False, debug=0): ''' Constructor ''' # logger log.basicConfig( format= '%(asctime)s,%(msecs)3.3d %(levelname)-8s[%(module)-7.7s.%(funcName)-30.30s:%(lineno)5d] %(message)s', datefmt='%Y%m%d:%H:%M:%S', filename='debug.log', level=log.NOTSET) # Set debug level first if debug: self.debug = True log.basicConfig(level='DEBUG') log.debug( "Enter with host_type={} hypervisor_type={} ip={}, port={}, user={}, password={}, private_key_file={}, debug={}" .format(host_type, hypervisor_type, ip, port, user, password, private_key_file, debug)) # public class attributs self.host_type = host_type self.hypervisor_type = hypervisor_type self.ip = ip self.port = port self.user = user self.password = password self.private_key_file = private_key_file self.mock_context = '' self.ssh = Ssh(ip=ip, port=port, user=user, password=password, private_key_file=private_key_file, debug=debug) # private class attributes self._statistics = {} # Internal representation of statistics self._vms = [] # Internal representation of each VMs self._vms_total = {} # Total VMs statistics self._vms_disks = [] # Internal representation of each VM disks info self._vms_system = [] # Internal representation of each VM systen info self._vms_disks_dict = {} # temp object self._vms_esx_id_map = {} # wid to vm_name mapping self._vms_esx_cpu = {} # dict of vm_id containing nb_cpu self._vms_esx_memory = {} # dict of vm_id containing memory size self._vms_esx_disks = {} # dict of vm_id containing disk size in def connect(self): self.ssh.connect() # Tracing wrapper on ssh def trace_open(self, filename="tracefile.log"): self.ssh.trace_open(filename="tracefile.log") def trace_write(self, line): self.ssh.trace_write(line) def trace_mark(self, mark): self.ssh.trace_mark(mark) def close(self): self.ssh.close() def get_statistics(self): """ Get server CPU, MEMORY and DISK usage Commands to run depends on host_type Return: json """ log.debug('Enter') self._get_nbcpu() self._get_loadavg() if self.host_type == 'Linux': self._get_memory_kvm() self._get_disk_kvm() elif self.host_type == 'ESX': self._get_memory_esx() self._get_disk_esx() return (json.dumps(self._statistics)) def get_vms_statistics(self): """ Get server VMS related statistics Return: json """ log.debug('Enter') if self.hypervisor_type == 'kvm': self._get_processes_kvm() self._get_vms_system_kvm() self._get_vms_disk_kvm() elif self.hypervisor_type == 'esx': self._build_vms_esx_cpu() self._build_vms_esx_memory() self._build_vms_esx_disk() self._get_processes_esx() result = {} result['vms'] = self._vms result['vms_total'] = self._vms_total result['vms_disks'] = self._vms_disks result['vms_system'] = self._vms_system return (json.dumps(result)) def _get_nbcpu(self): """ Fills self._statistics with the number of CPU on the server Same command used for for Linux and ESX system """ log.debug("Enter") self.ssh.shell_send(["cat /proc/cpuinfo | grep processor | wc -l\n"]) log.debug("output={}".format(self.ssh.output)) # This is the first line with a single number in the line nb_cpu_match = re.search("(\d+)\n", str(self.ssh.output)) if nb_cpu_match: nb_cpu = int(nb_cpu_match.groups(0)[0]) log.debug("nb_cpu={}".format(nb_cpu)) self._statistics['nb_cpu'] = nb_cpu def _get_loadavg(self): """ Fills self._statistics with cpu load average information Using key 'load', average load is given in 1m, 5m and 15mn interval Ex : 'load': { '1mn': ... '5mn': ... '15mn': ... } Different commands for Linux system (cat /proc/loadavg) and ESX (uptime) """ log.debug("Enter") load_1mn = "" load_5mn = "" load_15mn = "" log.debug("host_type={}".format(self.host_type)) if self.host_type == 'Linux': cmd = "cat /proc/loadavg\n" # load average (1mn 5mn 15mn) typical output : # 13.14 13.65 13.96 20/1711 38763 elif self.host_type == 'ESX': cmd = "uptime\n" # 9:43:24 up 141 days, 03:33:10, load average: 0.06, 0.07, 0.07 self.ssh.shell_send([cmd]) load_match = re.search( "(\d+\.?\d?\d?)\,?\s+(\d+\.?\d?\d?)\,?\s+(\d+\.?\d?\d?)", str(self.ssh.output)) if load_match: load_1mn = load_match.groups(0)[0] load_5mn = load_match.groups(0)[1] load_15mn = load_match.groups(0)[2] self._statistics['load'] = {} self._statistics['load']['1mn'] = load_1mn self._statistics['load']['5mn'] = load_5mn self._statistics['load']['15mn'] = load_15mn log.debug("load_1mn={} load_5mn={} load_15mn={}".format( load_1mn, load_5mn, load_15mn)) else: log.error("Could not extract system load for type={}".format( self.host_type)) def _get_memory_kvm(self): """ Fills self._statistics with memory load information Using cat /proc/meminfo Using first key 'memory' and subkeys 'total', 'free' and available Note : better to use available than free because of caches Unit : KB Ex: 'memory': { 'total': ... 'free' : ... 'available': ... } """ log.debug("Enter") # Memory load typical output (skip unecessary lines): # MemTotal: 264097732 kB # MemFree: 5160488 kB # MemAvailable: 108789520 kB # Note: MemAvailable considers the swap that we don't want to use so use MemTotal-MemFree for used self._statistics['memory'] = {} self.ssh.shell_send(["cat /proc/meminfo\n"]) memory_total = 0 memory_free = 0 memory_available = 0 mem_total_match = re.search("MemTotal:\s+(\d+) kB", str(self.ssh.output)) if mem_total_match: memory_total = int(mem_total_match.groups(0)[0]) self._statistics['memory']['total'] = memory_total mem_free_match = re.search("MemFree:\s+(\d+) kB", str(self.ssh.output)) if mem_free_match: memory_free = int(mem_free_match.groups(0)[0]) self._statistics['memory']['free'] = memory_free mem_available_match = re.search("MemAvailable:\s+(\d+) kB", str(self.ssh.output)) if mem_available_match: memory_available = int(mem_available_match.groups(0)[0]) self._statistics['memory']['available'] = memory_available log.debug( "memory_total={}, memory_free={}, memory_available={}".format( memory_total, memory_free, memory_available)) def _get_memory_esx(self): """ Fills self._statistics with memory load information Using memstats -r comp-stats Unit : KB Ex: 'memory': { 'total': ... 'free' : ... 'available': ... } 211202: format changed in vers. 6.7 compare to 6.0 ('critical' removed) """ log.debug("Enter") # command has several values : total, minFree, free and some others. # use 'total', 'free' and consider available as free (we don't use it anyway) # this is how % is shown in vcenter for free so it matches self._statistics['memory'] = {} self.ssh.shell_send(["memstats -r comp-stats\n"]) memory_total = 0 memory_free = 0 memory_available = 0 # Memory regexp for version 6.0.0 and 6.7.0 mem_re_v60 = "(?P<total>\d+)\s+"\ + "(?P<discarded>\d+)\s+"\ + "(?P<managedByMemMap>\d+)\s+"\ + "(?P<reliableMem>\d+)\s+"\ + "(?P<kernelCode>\d+)\s+"\ + "(?P<critical>\d+)\s+"\ + "(?P<dataAndHeap>\d+)\s+"\ + "(?P<buddyOvhd>\d+)\s+"\ + "(?P<rsvdLow>\d+)\s+"\ + "(?P<managedByMemSched>\d+)\s+"\ + "(?P<minFree>\d+)\s+"\ + "(?P<vmkClientConsumed>\d+)\s+"\ + "(?P<otherConsumed>\d+)\s+"\ + "(?P<free>\d+)\s+" mem_re_v67 = "(?P<total>\d+)\s+"\ + "(?P<discarded>\d+)\s+"\ + "(?P<managedByMemMap>\d+)\s+"\ + "(?P<reliableMem>\d+)\s+"\ + "(?P<kernelCode>\d+)\s+"\ + "(?P<dataAndHeap>\d+)\s+"\ + "(?P<buddyOvhd>\d+)\s+"\ + "(?P<rsvdLow>\d+)\s+"\ + "(?P<managedByMemSched>\d+)\s+"\ + "(?P<minFree>\d+)\s+"\ + "(?P<vmkClientConsumed>\d+)\s+"\ + "(?P<otherConsumed>\d+)\s+"\ + "(?P<free>\d+)\s+" esx_vers = '6.7' for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) # 'critical' is the marker of the 6.0 version match_version = re.search("critical", line) if match_version: esx_vers = '6.0' log.debug("found 6.0 like version") if esx_vers == '6.7': match_memory = re.search(mem_re_v67, line) else: match_memory = re.search(mem_re_v60, line) if match_memory: memory_total = int(match_memory.group('total')) memory_free = int(match_memory.group('free')) memory_available = memory_free log.debug("memory_total={} memory_free={} computed memory_available={}"\ .format(memory_total, memory_free, memory_available)) self._statistics['memory']['total'] = memory_total self._statistics['memory']['free'] = memory_free self._statistics['memory']['available'] = memory_available def _get_disk_kvm(self): """ Fills self._statistics with disk usage information The goal is to get the remaining free space on the /home Using first key 'disk', subkeys 'home', subkeys 'used' 'available' 'used_percent' Unit is in MB Ex : 'disk': { <mount>': { # <mount> could be /home or others 'dev' : xxx # dev is the Filesystem 'used': xxx (in G) 'used_percent' : xxx (in percent) 'available': xxx (in G) } } """ log.debug("Enter") # Typical output (skip unessary lines): # Filesystem 1G-blocks Used Available Use% Mounted on # udev 126G 0G 126G 0% /dev # tmpfs 26G 1G 26G 1% /run # /dev/sda1 10G 4G 5G 45% / # /dev/sda6 1751G 1167G 496G 71% /home self.ssh.shell_send(["df -BG\n"]) self._statistics['disk'] = {} for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) home_re = "(?P<dev>[A-Za-z0-9\/]+)(?:\s+)(\d+)G\s+(?P<used>\d+)G\s+"\ + "(?P<available>\d+)G\s+(?P<used_percent>\d+)%\s+"\ + "(?P<mounted>[A-Za-z0-9\/]+)" home_match = re.search(home_re, line) if home_match: dev = home_match.group('dev') used = home_match.group('used') available = home_match.group('available') used_percent = home_match.group('used_percent') mounted = home_match.group('mounted') log.debug( "dev={} used={} available={} used_percent={} mounted={}". format(dev, used, available, used_percent, mounted)) self._statistics['disk'][mounted] = {} self._statistics['disk'][mounted]['dev'] = dev self._statistics['disk'][mounted]['used'] = used self._statistics['disk'][mounted]['available'] = available self._statistics['disk'][mounted][ 'used_percent'] = used_percent self._statistics['disk'][mounted]['type'] = 'KVM' def _get_disk_esx(self): """ Fills self._statistics with disk usage information The goal is to get the remaining free space on the /home Using first key 'disk', subkeys 'home', subkeys 'used' 'available' 'used_percent' Unit is in MB Ex : 'disk': { <mount>': { # <mount> could be /home or others 'dev' : xxx # dev is the Filesystem 'used': xxx (in G) 'used_percent' : xxx (in percent) 'available': xxx (in G) } } """ log.debug("Enter") # Typical output (skip unessary lines) # not esx does not support -BG options # [root@uranium:~] df -m # Filesystem 1M-blocks Used Available Use% Mounted on # NFS41 28032 8744 19287 31% /vmfs/volumes/Farm2-nfs # VMFS-5 1899008 1880057 18951 99% /vmfs/volumes/datastore-Uranium # vfat 249 174 75 70% /vmfs/volumes/69f4af7a-8fef67ee-19a8-73d14778d37d # vfat 4094 32 4061 1% /vmfs/volumes/58f72d54-99c8b477-1ff9-d4ae52e8199a # vfat 249 175 74 70% /vmfs/volumes/42497372-e8f357aa-1697-4021215e5aa2 # vfat 285 262 23 92% /vmfs/volumes/58f72d4b-3d836623-207e-d4ae52e8199a self.ssh.shell_send(["df -m\n"]) self._statistics['disk'] = {} for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) datastore_re = "(?P<dev>[A-Za-z0-9\/-]+)(?:\s+)(\d+)\s+(?P<used>\d+)\s+"\ + "(?P<available>\d+)\s+(?P<used_percent>\d+)%\s+"\ + "(?P<mounted>[A-Za-z0-9\/]+)" datastore_match = re.search(datastore_re, line) if datastore_match: dev = datastore_match.group('dev') used = datastore_match.group('used') available = datastore_match.group('available') used_percent = datastore_match.group('used_percent') mounted = datastore_match.group('mounted') log.debug( "dev={} used={} available={} used_percent={} mounted={}". format(dev, used, available, used_percent, mounted)) self._statistics['disk'][mounted] = {} self._statistics['disk'][mounted]['dev'] = dev self._statistics['disk'][mounted]['used'] = used self._statistics['disk'][mounted]['available'] = available self._statistics['disk'][mounted][ 'used_percent'] = used_percent self._statistics['disk'][mounted]['type'] = 'ESXI' def _get_processes_esx(self): """ Retrieve esxi process from 'esxcli process list' to fill _vms and _vms_total attributs _vms_system is also filled but in ESX template is the system Sample of 1 process: root@uranium:~] esxcli vm process list uranium-esx36 [knagaraju] FGT_VM64_ESXI World ID: 2557323 Process ID: 0 VMX Cartel ID: 2557322 <<< to store UUID: cc cf 7f af 0f 3d 45 23-8a 69 1f 43 4e c2 97 56 Display Name: uranium-esx36 [knagaraju] FGT_VM64_ESXI Config File: /vmfs/volumes/58f72d53-3d2f86ee-e2b8-d4ae52e8199a/machines/uranium-esx36 [knagaraju] FGT_VM64_ESXI/uranium-esx36 [knagaraju] FGT_VM64_ESXI.vmx Note: the vmid used in other commands is the "World ID" (need to be extracted) We use Display name as VM id """ log.debug("Enter") self.ssh.shell_send(["esxcli vm process list\n"]) self._vms = [] self._vms_total = {} self._vms_total['cpu'] = 0 self._vms_total['memory'] = 0 self._vms_total['number'] = 0 self._vms_total['disk'] = 0 self._vms_esx_id_map = {} esx_start = False esx_end = False esx_line = 0 vm_name = "" vm_memory = 0 ret = True for line in self.ssh.output.splitlines(): if esx_start: esx_line = esx_line + 1 log.debug("esx_line={} line={}".format(esx_line, line)) if esx_line == 1: match_name = re.search( "(?P<vm_name>\S+)\s\[(?P<create_user>\S+)\]\s(?P<system>\S+)", line) if match_name: vm_name = match_name.group('vm_name') create_user = match_name.group('create_user') system = match_name.group('system') self._vms_total['number'] += 1 log.debug( "Found new vm_name={} create_user={} system={} total_number={}" .format(vm_name, create_user, system, self._vms_total['number'])) if esx_start: match_esxid = re.search("VMX\sCartel\sID:\s(?P<vm_esxid>\d+)", line) if match_esxid: vm_esxid = match_esxid.group('vm_esxid') log.debug("Found {} vm_esxid={}".format(vm_name, vm_esxid)) self._vms_esx_id_map[vm_esxid] = vm_name if vm_esxid in self._vms_esx_memory: vm_memory = int( int(self._vms_esx_memory[vm_esxid]) / 1024) self._vms_total['memory'] += int(vm_memory) else: log.error( "Could not find vm memory for vm_esxi={}".format( vm_esxid)) ret = False if (not esx_start and (line.find('esxcli vm process list') != -1) or (line == "")): log.debug("Found esx process line start") esx_start = True esx_end = False esx_line = 0 if (esx_start and (line.find('Config File') != -1)): log.debug("Found esx process line end") esx_end = True esx_start = False vm = {} instance = self._get_vm_instance_from_name(name=vm_name) if re.search('\d+', instance): vm['id'] = instance vm['cpu'] = 0 vm['memory'] = vm_memory vm['template'] = system if system: system = system.replace('_ESXI', '') else: log.warning("No _ESXI in system") vm['system'] = system self._vms_system.append({ 'id': instance, 'system': system, 'type': 'ESXI' }) if vm_name in self._vms_esx_cpu: vm['cpu'] = self._vms_esx_cpu[vm_name] self._vms_total['cpu'] += vm['cpu'] log.debug( "vm_name={} instance={} vms_total_cpu={}".format( vm_name, instance, self._vms_total['cpu'])) else: log.error( "Could not find nb of cpu for vm_name={}".format( vm_name)) ret = False if vm_name in self._vms_esx_disks: vm['disk'] = self._vms_esx_disks[vm_name] vm['type'] = 'ESXI' self._vms_total['disk'] += vm['disk'] log.debug( "vm_name={} instance={} vms_total_disk={}".format( vm_name, instance, self._vms_total['disk'])) else: log.error( "Could not find disk size for vm_name={}".format( vm_name)) ret = False self._vms.append(vm) else: log.warning( "got unexpected instance format instance={}".format( instance)) ret = False return ret def _get_vm_instance_from_name(self, name=""): """ VM instance is like 001, 011, 122 and so on. It should be extracted from server name (ex: uranium-tam-esx42) """ log.debug("Enter with name={}".format(name)) match_inst = re.search("(?P<inst>\d+)$", name) if match_inst: inst = match_inst.group('inst') result = str(inst).zfill(3) log.debug("formatted instance result={}".format(result)) else: log.warning( "Could not extract formatted instance from name={}".format( name)) return result def _build_vms_esx_cpu(self): """ To be run before _get_process_cpu_esx Update self._vms_esx_cpu with number of CPUs by VM using 'ps -u | grep vcpu' One line per cpu by vm so keep the last line and add +1 sample: 7446209 7446202 vmx-vcpu-0:neutron-esx04 [spathak] FGT_VM64_ESXI <- 1 cpu 7872663 7872656 vmx-vcpu-0:neutron-esx06 [atsakiridis] FGT_VM64_ESXI <- 1 cpu 5087640 5087630 vmx-vcpu-0:neutron-esx01 [apasta] FMG_VM64_ESXI 5087641 5087630 vmx-vcpu-1:neutron-esx01 [apasta] FMG_VM64_ESXI 5087642 5087630 vmx-vcpu-2:neutron-esx01 [apasta] FMG_VM64_ESXI 5087643 5087630 vmx-vcpu-3:neutron-esx01 [apasta] FMG_VM64_ESXI <- 4 cpus """ log.debug("Enter") self._vms_esx_cpu = {} self.ssh.shell_send(["ps -u\n"]) for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) match = re.search( "\d+\s+\d+\s+vmx-vcpu-(?P<cpu>\d+):(?P<vm_id>\S+)", line) if match: cpu = match.group('cpu') vm_id = match.group('vm_id') log.debug("found vm_id={} cpu={}".format(vm_id, cpu)) self._vms_esx_cpu[vm_id] = int(cpu) + 1 def _build_vms_esx_memory(self): """ To be run before _get_process_cpu_esx Update self._vms_esx_memory with VM memory using 'memstats -r vm-stats' sample: name b schedGrp parSchedGroup worldGrp memSizeLimit memSize min max minLimit shares ovhdResv ovhd allocTgt consumed balloonTgt ballooned swapTgt swapped mapped active zipped zipSaved shared zero sharedSaved useReliableMem swapScope ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ vm.48361 n 125647 4 48361 2097152 2097152 0 -1 -1 -3 48072 40392 2084412 2053692 0 0 0 0 2066432 20968 0 0 14076 10204 12740 n 1 using field 5 (worldGrp) and 6 (memSizeLimit) """ log.debug("Enter") self._vms_esx_memory = {} self.ssh.shell_send(["memstats -r vm-stats\n"]) for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) match = re.search( "vm\.\d+\s+\S+\s+\d+\s+\d+\s+(?P<esxid>\d+)\s+(?P<memory>\d+)\s", line) if match: esxid = match.group('esxid') memory = match.group('memory') log.debug("found esxid={} memory={}".format(esxid, memory)) self._vms_esx_memory[esxid] = memory def _get_processes_kvm(self): """ Retrieve qemu processes from KVM server Fills _vms and _vms_total attributs Workaround 200824 : it has been seen that sometimes the output buffer splits a line in 2 chunks so it is not possible to get all VM attributes from the same line. To workaround this, we need to make sure that each KVM line start contains the starting token and the ending one (timestamp=). If the ending one is not there, lines need to be concatenated in one before it is tokenized """ log.debug("Enter") self.ssh.shell_send(["ps -xww | grep qemu-system\n"]) self._vms_total = {} self._vms_total['cpu'] = 0 self._vms_total['memory'] = 0 self._vms_total['number'] = 0 full_line = "" kvm_start = False kvm_end = False for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) need_tokenize = False # Looking for kvm process starting line (qemu-system-x86_64) if line.find('qemu-system-x86_64') != -1: log.debug("Found kvm process line start") kvm_start = True kvm_end = False # Looking for kvm process ending line (\stimestamp=on) if line.find('timestamp=on') != -1: log.debug("Found kvm process line end") kvm_start = False kvm_end = True # Dealing with all possibilities if kvm_start: if kvm_end: log.debug("Full line seen") full_line = line kvm_start = False kvm_end = False need_tokenize = True else: log.debug( "Start without end, line is split, first fragment seen" ) full_line = line else: if kvm_end: log.debug( "End without start, last chunk of multiline seen, tokenize" ) full_line = full_line + line kvm_start = False kvm_end = False need_tokenize = True else: log.debug("No start, no end, do nothing") if need_tokenize: result = self._tokenize(full_line) full_line = "" log.debug("Recording result") self._vms.append(result) # Record total for all VMs if 'cpu' in result: # Count number of VMs based on the cpu token self._vms_total['number'] += 1 self._vms_total['cpu'] += int(result['cpu']) log.debug("vms_total_cpu={}".format( self._vms_total['cpu'])) if 'memory' in result: self._vms_total['memory'] += int(result['memory']) log.debug("vms_total_memory={}".format( self._vms_total['memory'])) def _get_vms_system_kvm(self): """ Retrieve the running system code from virsh list --title _KVM is removed so system is the same with esx Fills _vms_system ex: root@radon-trn:~# virsh list --title --all Id Name State Title -------------------------------------------------------- 2 006 running 006 [vbharat] LinuxMint18_KVM 3 004 running 004 [vbharat] Windows7_KVM 4 009 running 009 [tgirard] FPOC-17_VM64_KVM - 006 shut off 006 [sindreschulstad] Windows10_KVM - 008 shut off 008 [sharmap] FGT_VM64_KVM - 010 shut off 010 [spathak] Windows10_KVM Notes: - can't use 'virsh list --title --all' because the combination is not supported by all servers ion does not output anything at all. - match with ESX behavior for which we don't extract OS on shutdown systems (based on process) """ log.debug("Enter") self.ssh.shell_send(["virsh list --title\n"]) for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) system_match = re.search( "\s+\S+\s+(?P<id>\S+)\s+(?:running|idle|paused|in\sshutdown|shut\soff|crashed|pmsuspended)\s+\S+\s+\S+\s+(?P<system>\S+)", line) if system_match: id = system_match.group('id') system = system_match.group('system') if system: system = system.replace('_KVM', '') else: log.warning("No _KVM in system") log.debug("Found id={} system={}".format(id, system)) self._vms_system.append({ 'id': id, 'system': system, 'type': 'KVM' }) def _tokenize(self, line): """ Tokenise ps lines has run into a dictionnary where the key is the option (the -xxxx). Only tokenize tokens we are interested in 210316 : manually started vm need to be exculded (the id does not match guest=\d+), seen on radon return: dictionary like { 'id': ... 'cpu' ... 'memory': ... 'template': ... } """ log.debug("Enter with line={}".format(line)) vm_id = None cpu = None memory = None template = None # VM id only digit if launched from labsetup # other if launched manually from a user id_match = re.search("\sguest=([A-Za-z0-9_\-\.\/\s]+)(?:,|\s)", line) if id_match: vm_id = id_match.groups(0)[0] log.debug("id={}".format(vm_id)) # Number of CPU assigned to the VM cpu_match = re.search("\s-smp\s(\d+)(?:,|\s)", line) if cpu_match: cpu = int(cpu_match.groups(0)[0]) log.debug("cpu={}".format(cpu)) # Allocated memory in Mb memory_match = re.search("\s-m\s(\d+)(?:,|\s)", line) if memory_match: memory = int(memory_match.groups(0)[0]) log.debug("memory={}".format(memory)) # Running template template_match = re.search("\s-drive\sfile=([A-Za-z0-9_\-\.\/\s]+)", line) if template_match: template = template_match.groups(0)[0] log.debug("template={}".format(template)) vm = {} if template_match and memory_match and cpu_match and id_match: log.debug( "tokenize succesful : id={} cpu={} memory={} template={}". format(vm_id, cpu, memory, template)) vm['id'] = vm_id vm['cpu'] = cpu vm['memory'] = memory vm['template'] = template return vm elif memory_match and cpu_match and id_match: # This case was seen with windows VM created without disk (stay in # boot failure) log.debug( "tokenize succesful without template : id={} cpu={} memory={}". format(vm_id, cpu, memory)) vm['id'] = vm_id vm['cpu'] = cpu vm['memory'] = memory return vm else: log.warning( "tokenize failed (maybe a manually started VM) vm_id={} cpu={} memory={}" .format(vm_id, cpu, memory)) # Need to return an empty dictionnary return {} def _get_vms_disk_kvm(self, vmpath='/home/virtualMachines'): """ Retrieve VM disk usage. Retrieve all VMs disk usage located in vmpath. Make sure to retrieve the provisioned disk size and not the current usage of a qcow For this 'ls' or 'du' can't be used, however 'file' can do the job. Ex: # for i in `virsh list | awk '{ print $2}'`; do file /home/virtualMachines/$i/*; done /home/virtualMachines/Name/*: cannot open `/home/virtualMachines/Name/*' (No such file or directory) /home/virtualMachines/045/boot.qcow2: QEMU QCOW Image (v3), 1073741824 bytes /home/virtualMachines/045/datadrive.qcow2: QEMU QCOW Image (v3), 64424509440 bytes /home/virtualMachines/008/win10.qcow2: QEMU QCOW Image (v2), has backing file (path /home/templates/Windows10/20201014/sop.qcow2), 85899345920 bytes /home/virtualMachines/097/fmg.qcow2: QEMU QCOW Image (v2), 2147483648 bytes /home/virtualMachines/097/storage.qcow2: QEMU QCOW Image (v2), 85899345920 bytes /home/virtualMachines/002/fortios.qcow2: QEMU QCOW Image (v3), 2147483648 bytes ../.. Need to addition for each VM the size of each disks in bytes """ log.debug('Enter with vmpath={}'.format(vmpath)) cmd = "for i in `virsh list --all | awk '{print $2}'`; do file " + vmpath + "/$i/* ; done" self.ssh.shell_send([cmd + "\n"]) for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) self._extract_vms_disk(vmpath, line) for id in self._vms_disks_dict: size = self._vms_disks_dict[id] self._vms_disks.append({'id': id, 'size': size, 'type': 'KVM'}) def _build_vms_esx_disk(self): """ Parse datastore, retrieve disk usage for each VM. build self._vms_esx_disks using vm name as a key du -h /vmfs/volumes/datastore-Uranium/ | grep esx may be in different locations /vmfs/volumes/datastore-Uranium/machines/ or /vmfs/volumes/datastore-Uranium/ as seen on uranium 16.1G /vmfs/volumes/datastore-Uranium/machines/uranium-esx31 [iyotov] FMG_VM64_ESXI 22.9G /vmfs/volumes/datastore-Uranium/uranium-esx57 [bvata] Win2012R2_ESXI sample: [root@uranium:~] du -h /vmfs/volumes/datastore-Uranium/ | grep esx 20.7G /vmfs/volumes/datastore-Uranium/uranium-esx14 [vpalomo] FAD_VM64_ESXI 23.6G /vmfs/volumes/datastore-Uranium/uranium-esx10 [birendrakumar] Win10_ESXI 26.3G /vmfs/volumes/datastore-Uranium/uranium-esx22 [birendrakumar] Win2012R2_ESXI 19.5G /vmfs/volumes/datastore-Uranium/uranium-FSA-esx42 [vchauhan] FSA_VM64_ESXI 2.7G /vmfs/volumes/datastore-Uranium/uranium-esx18 [azaman] FGT_VM64_ESXI 29.2G /vmfs/volumes/datastore-Uranium/uranium-esx83 [grg] Win2019_ESXI 30.2G /vmfs/volumes/datastore-Uranium/uranium-esx06 [vkoodakandi] Win2016_ESXI 24.6G /vmfs/volumes/datastore-Uranium/uranium-esx11 [amarinos] Win10_ESXI 24.8G /vmfs/volumes/datastore-Uranium/uranium-esx05 [vchauhan] Win2012R2_ESXI 4.5G /vmfs/volumes/datastore-Uranium/uranium-esx08 [emouque] FGT_VM64_ESXI 42.5G /vmfs/volumes/datastore-Uranium/uranium-esx23 [azhunissov] Win2016_ESXI 16.9G /vmfs/volumes/datastore-Uranium/uranium-esx69 [tstribrny] DebianJessie_ESXI 23.8G /vmfs/volumes/datastore-Uranium/uranium-esx85 [abarushka] Win10_ESXI 34.7G /vmfs/volumes/datastore-Uranium/uranium-esx79 [spathak] Win2016_ESXI 2.9G /vmfs/volumes/datastore-Uranium/uranium-esx74 [ssener] FGT_VM64_ESXI 29.0G /vmfs/volumes/datastore-Uranium/uranium-esx53 [abarushka] Win2016_ESXI 15.5G /vmfs/volumes/datastore-Uranium/uranium-esx37 [emete] Debian9_ESXI 10.7G /vmfs/volumes/datastore-Uranium/uranium-esx86 [opetr] FWB_VM64_ESXI 3.3G /vmfs/volumes/datastore-Uranium/uranium-esx76 [flopez] FGT_VM64_ESXI 7.6G /vmfs/volumes/datastore-Uranium/uranium-esx78 [tstribrny] FAC_VM64_ESXI 77.4G /vmfs/volumes/datastore-Uranium/uranium-esx64 [bpozdena] FLG_VM64_ESXI 3.8G /vmfs/volumes/datastore-Uranium/machines/uranium-esx49 [vchauhan] FAC_VM64_ESXI 2.6G /vmfs/volumes/datastore-Uranium/machines/uranium-esx04 [fbegit] FGT_VM64_ESXI 2.3G /vmfs/volumes/datastore-Uranium/machines/uranium-esx60 [ncorreia] FGT_VM64_ESXI Result to be provided in MB Should be run before _get_processes_esx Note: 220223 on slower server, ouput ot the command be be seen on next one... Sending an empty line in th end seems to do the trick """ log.debug("Enter") cmd = "du -h /vmfs/volumes/*datastore*/ | grep esx | awk '// { print $1 \", \" $2}'" self.ssh.shell_send([cmd + "\n"]) for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) match_vm = re.search( "(?P<size>\d+\.?\d+?)(?P<unit>G|M|K|T),\s(?P<machine>\S+)", line) if match_vm: size = match_vm.group('size') unit = match_vm.group('unit') machine = match_vm.group('machine') log.debug("Found size={} unit={} machine={}".format( size, unit, machine)) # get machine id from full name # ex: /vmfs/volumes/datastore-Neutron/machines/neutron-esx36 or # ex: /vmfs/volumes/datastore-Uranium/uranium-esx69 (no machines) match_name = re.search("(machines)?/(?P<name>[A-Za-z0-9_-]+)$", machine) if match_name: name = match_name.group('name') log.debug("Found name={}".format(name)) if unit == 'G': value = int(float(size) * 1024) elif unit == 'M': value = int(float(size)) elif unit == 'T': value == int(float(size) * 1024 * 1024) elif unit == 'K': value == int(float(size) / 1024) else: log.error( "Unexpected disk size unit={} on esx machine={}". format(unit, name)) value = 0 self._vms_esx_disks[name] = value log.debug("name={} disk size={}".format(name, value)) match = re.search("esx(?P<id>\d+)", name) if match: id = match.group('id') fid = self.format_instance(id=id) size = value * 1024 * 1024 json = {'id': fid, 'size': size, 'type': 'ESXI'} log.debug("json={}".format(json)) self._vms_disks.append(json) else: log.warning("No disk for name={} fid={}".format( name, fid)) else: log.debug("Could not extract machine name from machine={}". format(machine)) # Sending an empty line in the end to temporize before next command # was needed on electron log.debug("end of processing, sending empty line") self.ssh.shell_send(["\n"]) def _extract_vms_disk(self, vmpath, line): """ Parse output to get all vms disk consumption """ log.debug("Enter with vmpath={} line={}".format(vmpath, line)) d_match = re.search(vmpath + "/(?P<id>[a-zA-Z0-9_\-\.\/s]+)/", line) if d_match: id = d_match.group("id") log.debug("id={}".format(id)) s_match = re.search("(?P<size>\d+) bytes", line) if s_match: size = s_match.group("size") if id not in self._vms_disks_dict: self._vms_disks_dict[id] = int(size) else: self._vms_disks_dict[id] = int( self._vms_disks_dict[id]) + int(size) log.debug("id={} file size={} disk total={} ".format( id, size, self._vms_disks_dict[id])) def format_instance(self, id=''): """ Common format for VM id (3 digit format, ex: 001 or 032 or 121 or 002) """ log.debug("Enter with id={}".format(id)) result = id try: result = str(id).zfill(3) except: log.debug("Could not format id={}".format(id)) return result def dump_statistics(self): """ For debugging purpose, returns a formated json of self._statistics """ log.debug('Enter') print(json.dumps(self._statistics, indent=4, sort_keys=True)) def dump_vms(self): """ For debugging purpose, returns a formated json of self._vms """ log.debug('Enter') print(json.dumps(self._vms, indent=4, sort_keys=True)) def dump_vms_total(self): """ For debugging purpose, returns a formated json of self._vms_total """ log.debug('Enter') print(json.dumps(self._vms_total, indent=4, sort_keys=True))
class Vyos(object): """ classdocs """ def __init__(self, ip='', port=22, user='******', password='******', private_key_file='', traffic_policy='WAN', mock=False, debug=False): ''' Constructor ''' # create logger log.basicConfig( format= '%(asctime)s,%(msecs)3.3d %(levelname)-8s[%(module)-7.7s.%(funcName)-30.30s:%(lineno)5d] %(message)s', datefmt='%Y%m%d:%H:%M:%S', filename='debug.log', level=log.NOTSET) if debug: self.debug = True log.basicConfig(level='DEBUG') log.debug( "Constructor with ip={}, port={}, user={}, password={}, private_key_file={}, traffic_policy={}, debug={}" .format(ip, port, user, password, private_key_file, traffic_policy, debug)) # public attributs self.ip = ip self.port = port self.user = user self.password = password self.traffic_policy = traffic_policy self.moke_exception = '' self.moke_context = '' self.debug = debug self.ssh = Ssh(ip=ip, port=port, user=user, password=password, private_key_file=private_key_file, debug=debug) # private attributs self._config = {} # Internal representation of config def connect(self): self.ssh.connect() # Tracing wrapper on ssh def trace_open(self, filename="tracefile.log"): self.ssh.trace_open(filename=filename) def trace_write(self, line): self.ssh.trace_write(line) def trace_mark(self, mark): self.ssh.trace_mark(mark) def close(self): self.ssh.close() def get_traffic_policy(self): """ Get network-emulator settings for the given interface Fills self._json with settings for the interfaces with keys like : 'network_delay' (in ms), 'packet_loss' (in %), 'packet-corruption (in %), 'packet_reordering' (in %) 'bandwidth in mbps (only mbps supported) -'0' means no limitation """ log.debug("Enter") # default values network_delay = 0 packet_corruption = 0 packet_loss = 0 packet_reordering = 0 bandwidth = 0 # Send command if not self.ssh.connected: self.ssh.connect() # issue command and capture output self.run_op_mode_command( "show configuration commands | grep network-emulator\n") log.debug("output={}".format(self.ssh.output)) # Ex of output (all or some lines may be missing if not defined # set traffic-policy network-emulator WAN burst '15k' # set traffic-policy network-emulator WAN network-delay '100' # set traffic-policy network-emulator WAN packet-loss '0' # set traffic-policy network-emulator WAN packet-reordering '0' # BW: if not in the config, it is not defined (there is no '0') # set traffic-policy network-emulator WAN bandwidth 100mbps # parse output and extract settings # delay search_delay = "(?:network-emulator\s" + self.traffic_policy + "\snetwork-delay\s')(\d+)(?:m?s?')" match_delay = re.search(search_delay, str(self.ssh.output)) if match_delay: network_delay = match_delay.group(1) log.debug("match network_delay={}".format(network_delay)) # packet-corruption search_corruption = "(?:network-emulator\s" + self.traffic_policy + "\spacket-corruption\s')(\d+)'" match_corruption = re.search(search_corruption, str(self.ssh.output)) if match_corruption: packet_corruption = match_corruption.groups(0)[0] log.debug("match packet_corruption={}".format(packet_corruption)) # packet-loss search_loss = "(?:network-emulator\s" + self.traffic_policy + "\spacket-loss\s')(\d+)'" match_loss = re.search(search_loss, str(self.ssh.output)) if match_loss: packet_loss = match_loss.groups(0)[0] log.debug("match packet_loss={}".format(packet_loss)) # packet-reordering search_reorder = "(?:network-emulator\s" + self.traffic_policy + "\spacket-reordering\s')(\d+)'" match_reorder = re.search(search_reorder, str(self.ssh.output)) if match_reorder: packet_reordering = match_reorder.groups(0)[0] log.debug("match packet_reordering={}".format(packet_reordering)) # Bandwidth search_bandwidth = "(?:network-emulator\s" + self.traffic_policy + "\sbandwidth\s')(\d+)" match_bandwidth = re.search(search_bandwidth, str(self.ssh.output)) if match_bandwidth: bandwidth = match_bandwidth.groups(0)[0] log.debug("match bandwidth={}".format(bandwidth)) # apply values self._config['network_delay'] = network_delay self._config['packet_corruption'] = packet_corruption self._config['packet_loss'] = packet_loss self._config['packet_reordering'] = packet_reordering self._config['bandwidth'] = bandwidth # If needed, return JSON return (json.dumps(self._config)) def set_traffic_policy(self, network_delay='', packet_loss='', packet_reordering='', packet_corruption='', bandwidth='', exit=True, save=True, commit=True, configure=True): """ Sets network-emulator settings optional arguments : - network_delay <number> in ms - packet_corruption <number> in % - packet_loss <number> in % - packet_reordering <number> in % - bandwidth <number> in mbps (only mbps supported) Following options are all enabled by default but it is made configurable to fasten processing when multiple config should be done successively on the same unit : - exit : Force a disconnection once done - save : Forces a saving of config - commit : Apply the configuration - configure : Enter configuration mode """ flag_configured = False command_list = [] log.debug( "Enter with network_delay={} packet_loss={} packet_reordering={} packet_corruption={} bandwidth={}" .format(network_delay, packet_loss, packet_reordering, packet_corruption, bandwidth)) # Process delay if (network_delay): log.debug('processing network_delay=%s' % (network_delay)) flag_configured = True # ex : set traffic-policy network-emulator WAN network-delay 80 cmd = "set traffic-policy network-emulator " + self.traffic_policy + " network-delay " + str( network_delay) + "\n" command_list.append(cmd) # Process packet_loss if (packet_loss): log.debug('processing packet_loss=%s' % (packet_loss)) flag_configured = True # set traffic-policy network-emulator WAN packet-loss 0 cmd = "set traffic-policy network-emulator " + self.traffic_policy + " packet-loss " + str( packet_loss) + "\n" command_list.append(cmd) # Process packet_corruption if (packet_corruption): log.debug('processing packet_corruption=%s' % (packet_corruption)) flag_configured = True # set traffic-policy network-emulator WAN packet-corruption 0 cmd = "set traffic-policy network-emulator " + self.traffic_policy + " packet-corruption " + str( packet_corruption) + "\n" command_list.append(cmd) # Process packet reordering if (packet_reordering): log.debug('processing packet_reordering=%s' % (packet_reordering)) flag_configured = True # set traffic-policy network-emulator WAN packet-reordering 2 cmd = "set traffic-policy network-emulator " + self.traffic_policy + " packet-reordering " + str( packet_reordering) + "\n" command_list.append(cmd) # Process bandwidth if (str(bandwidth)): log.debug('processing bandwidth=%s' % (bandwidth)) flag_configured = True # a value '0' means the config statement should be removed # value '0' is not supported in vyos configuration if (str(bandwidth) == '0'): log.debug('need config statement removal') cmd = "delete traffic-policy network-emulator " + self.traffic_policy + " bandwidth" + "\n" command_list.append(cmd) else: # set traffic-policy network-emulator WAN bandwidth 100mbps cmd = "set traffic-policy network-emulator " + self.traffic_policy + " bandwidth " + str( bandwidth) + "mbps" + "\n" command_list.append(cmd) # Processing commands if (flag_configured): # Enter configuration more if configure: self.ssh.shell_send(["configure\n"]) else: log.debug("configure is bypassed") # Issue our list of configuration commands self.ssh.shell_send(command_list) # Commit and save if commit: self.ssh.shell_send(["commit\n"]) else: log.debug("commit is bypassed") if save: self.ssh.shell_send(["save\n"]) else: log.debug("save is bypassed") # Exit from configuration mode if exit: self.ssh.shell_send(["exit\n"]) else: log.debug("exit is bypassed") def dump_config(self): """ For troubleshooting, dump internal representation for the configuration """ print(json.dumps(self._config, indent=4)) def run_op_mode_command(self, cmd): """ Use netcontrol shell to send commands to vyos """ log.debug("Enter run_op_mode_command with cmd={}".format(cmd)) self.ssh.shell_send([cmd]) return (self.ssh.output)
class Fortigate(object): """ classdocs """ def __init__(self, ip='', port=22, user='******', password='', private_key_file='', mock=False, debug=False): ''' Constructor ''' # create logger log.basicConfig( format= '%(asctime)s,%(msecs)3.3d %(levelname)-8s[%(module)-7.7s.%(funcName)-30.30s:%(lineno)5d] %(message)s', datefmt='%Y%m%d:%H:%M:%S', filename='debug.log', level=log.NOTSET) if debug: self.debug = True log.basicConfig(level='DEBUG') log.debug( "Constructor with ip={}, port={}, user={}, password={}, private_key_file={}, debug={}" .format(ip, port, user, password, private_key_file, debug)) # public attributs self.ip = ip self.port = port self.user = user self.password = password self.private_key_file = private_key_file self.moke_context = '' self.debug = debug self.ssh = Ssh(ip=ip, port=port, user=user, password=password, private_key_file=private_key_file, debug=debug) # private attributs def connect(self): self.ssh.connect() # Tracing wrapper on ssh def trace_open(self, filename="tracefile.log"): self.ssh.trace_open(filename=filename) def trace_write(self, line): self.ssh.trace_write(line) def trace_mark(self, mark): self.ssh.trace_mark(mark) def close(self): if self.ssh: self.ssh.close() def cli(self, commands=[]): """ Sends a list of commands to FortiGate CLI Commands are sent one after each others ex : myFgt.cli(commands=['exec date', 'exec time']) ex : myFgt.cli(commands=['get system status']) """ log.debug("Enter with commands={}".format(commands)) # Send command if not self.ssh.connected: self.ssh.connect() # issue command and capture output for command in commands: command = command + "\n" self.run_op_mode_command(command) log.debug("command={} output={}".format(command, self.ssh.output)) def enter_vdom(self, vdom=None): """ Enters a specific vdom Uses : end -> config vdom -> edit VDOM ex: FGT-1B2-9 # config vdom FGT-1B2-9 (vdom) # edit customer current vf=customer:1 FGT-1B2-9 (customer) # """ log.debug("Enter with vdom={}".format(str(vdom))) result = False if not vdom: log.error("please provide vdom name") raise SystemExit if not self.ssh.connected: self.ssh.connect() # Leave current vdom or global section self.run_op_mode_command("end\n") # Enter vdom self.run_op_mode_command("config vdom\n") self.run_op_mode_command("edit " + str(vdom) + "\n") for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) match_vdom = re.search("\s\((?P<vd>\S+)\)\s", line) if match_vdom: vd = match_vdom.group('vd') log.debug("Found vd={} in line={}".format(str(vd), line)) if vd == vdom: log.debug("Confirmed vdom prompt") result = True return result def enter_global(self): """ Enters global section Uses : end -> config global ex: FGT-1B2-9 # config global FGT-1B2-9 (global) # """ log.debug("Enter") result = False if not self.ssh.connected: self.ssh.connect() # Leave current vdom or global section self.run_op_mode_command("end\n") # Enter global self.run_op_mode_command("config global\n") for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) match_global = re.search("\s\(global\)\s", line) if match_global: log.debug("Confirmed global prompt") result = True return result def get_status(self): """ Returns a dictionary with FortiGate version, license status ex : v6.2.3,build1066,191219 Uses "get system status" return : { 'version' = 'v6.2.3,build1066,191219', 'license' = True|false } """ log.debug("Enter") result = {} result['version'] = "" result['license'] = "" found_version = False found_license = False if not self.ssh.connected: self.ssh.connect() self.run_op_mode_command( "get sys status | grep '^Version\|License St'\n") # # FGT-B1-1 # get sys status | grep '^Version\|License St' #Version: FortiGate-VM64-KVM v6.2.3,build8348,200304 (GA) # License Status: Valid match_version = re.search( "(?:Version:\s[A-Za-z0-9-]+)\s(?P<version>\S+)", self.ssh.output) if match_version: found_version = True result['version'] = match_version.group('version') log.debug("found version={}".format(result['version'])) match_license = re.search("(?:License\sStatus:\s)(?P<license>\S+)", self.ssh.output) if match_license: found_license = True result['license'] = False license = match_license.group('license') log.debug("found license={}".format(license)) if license == 'Valid': result['license'] = True if not found_version: log.error("Could not extract version") if not found_license: log.error("Could not extract license status") log.debug("result={}".format(result)) return result def get_ike_and_ipsec_sa_number(self): """ Returns a dictionary with the number of 'created' and 'connected' ike and ispec SA Uses diagnose vpn ike status FGT-B1-1 # diagnose vpn ike status connection: 3/348 IKE SA: created 3/348 established 3/3 times 0/2083/3220 ms IPsec SA: created 3/348 established 3/3 times 0/2083/3220 ms For each line 'IKE SA' and 'IPsec SA' we look at 'x' in established x/y ex : { 'ike': { 'created' : 3, 'established' : 3}, 'ipsec': { 'created' : 3, 'established' : 3}} """ log.debug("Enter") result = {'ike': {}, 'ipsec': {}} if not self.ssh.connected: self.ssh.connect() self.run_op_mode_command("diagnose vpn ike status\n") # FGT-B1-1 # diagnose vpn ike status #connection: 3/348 #IKE SA: created 3/348 established 3/3 times 0/2083/3220 ms #IPsec SA: created 3/348 established 3/3 times 0/2083/3220 ms # # FGT-B1-1 # match_ike_sa = re.search( "(?:IKE\sSA:\screated\s)(?P<created>\d+)(?:/\d+\s+established\s)(?P<established>\d+)", self.ssh.output) if match_ike_sa: ike_sa_created = match_ike_sa.group('created') ike_sa_established = match_ike_sa.group('established') log.debug("IKE SA : created={} established={}".format( ike_sa_created, ike_sa_established)) result['ike']['created'] = ike_sa_created result['ike']['established'] = ike_sa_established else: log.debug("Could not extract IKE SA numbers") match_ipsec_sa = re.search( "(?:IPsec\sSA:\screated\s)(?P<created>\d+)(?:/\d+\s+established\s)(?P<established>\d+)", self.ssh.output) if match_ipsec_sa: ipsec_sa_created = match_ipsec_sa.group('created') ipsec_sa_established = match_ipsec_sa.group('established') log.debug("IPsec SA : created={} established={}".format( ipsec_sa_created, ipsec_sa_established)) result['ipsec']['created'] = ipsec_sa_created result['ipsec']['established'] = ipsec_sa_established else: log.debug("Could not extract IPsec SA numbers") log.debug("result={}".format(result)) return result def get_bgp_routes(self, vrf='0'): """ Returns information on BGP routes for the given VRF like : result = { 'total' = 6, 'subnet' : ['10.0.0.0/24', '10.0.2.0/24'], 'nexthop' : ['10.255.0.253','10.255.1.253','10.255.2.253', '10.255.0.2','10.255.1.2','10.255.2.2'], 'interface' : ['vpn_mpls','vpn_isp1','vpn_isp2'] } For : FGT-B1-1 # get router info routing-table bgp Routing table for VRF=0 B 10.0.0.0/24 [200/0] via 10.255.0.253, vpn_mpls, 00:02:54 [200/0] via 10.255.1.253, vpn_isp1, 00:02:54 [200/0] via 10.255.2.253, vpn_isp2, 00:02:54 B 10.0.2.0/24 [200/0] via 10.255.0.2, vpn_mpls, 00:02:54 [200/0] via 10.255.1.2, vpn_isp1, 00:02:54 [200/0] via 10.255.2.2, vpn_isp2, 00:02:54 FGT-B1-1 # Case for recursive routes: FGT-1B2-9 (customer) # get router info routing-table bgp Routing table for VRF=0 B 10.1.1.0/24 [200/0] via 10.255.0.1, sgwn_mpls1, 05:02:33 [200/0] via 10.255.1.1, sgwn_inet1, 05:02:33 [200/0] via 10.255.2.1, sgwn_inet2, 05:02:33 [200/0] via 10.255.0.1, sgwn_mpls1, 05:02:33 B 10.2.1.0/24 [200/0] via 10.254.0.1 (recursive is directly connected, sgwn_mpls1), 00:28:01 [200/0] via 10.254.1.2 (recursive is directly connected, sgwn_inet1), 00:28:01 [200/0] via 10.254.2.2 (recursive is directly connected, sgwn_inet2), 00:28:01 [200/0] via 10.254.0.1 (recursive is directly connected, sgwn_mpls1), 00:28:01 B 10.2.2.0/24 [200/0] via 10.254.0.2 (recursive is directly connected, sgwn_mpls1), 03:14:43 [200/0] via 10.254.1.1 (recursive is directly connected, sgwn_inet1), 03:14:43 [200/0] via 10.254.2.1 (recursive is directly connected, sgwn_inet2), 03:14:43 [200/0] via 10.254.0.2 (recursive is directly connected, sgwn_mpls1), 03:14:43 """ log.debug("Enter with vrf={}".format(vrf)) result = {'total': {}, 'subnet': [], 'nexthop': [], 'interface': []} if not self.ssh.connected: self.ssh.connect() self.run_op_mode_command("get router info routing-table bgp\n") # Start checking routes when seeing "VRF=xxx" vrf_flag = False nb_route = 0 nb_recursive_route = 0 for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) if not vrf_flag: match_vrf = re.search("Routing\stable\sfor\sVRF=" + str(vrf), line) if match_vrf: log.debug("Found VRF={} in line={}".format(str(vrf), line)) vrf_flag = True else: # Look for a subnet match_subnet = re.search("^(?:B\s+)(?P<subnet>[0-9./]+)", line) if match_subnet: subnet = match_subnet.group('subnet') log.debug("found subnet={}".format(subnet)) result['subnet'].append(subnet) # Look for nexthop and interface + count number of routes # Track non recursive routes match_nexthop = re.search( "]\s+via\s+(?P<nexthop>[0-9.]+),\s+(?P<interface>\w+)", line) if match_nexthop: nexthop = match_nexthop.group('nexthop') interface = match_nexthop.group('interface') nb_route = nb_route + 1 log.debug( "found nexthop={} interface={} nb_route={} nb_recursive={}" .format(nexthop, interface, nb_route, nb_recursive_route)) if nexthop not in result['nexthop']: result['nexthop'].append(nexthop) if interface not in result['interface']: result['interface'].append(interface) # Track recursive routes match_nexthop = re.search( "]\s+via\s+(?P<nexthop>[0-9.]+)\s+\(recursive\s.+\,\s+(?P<interface>\w+)\),", line) if match_nexthop: nexthop = match_nexthop.group('nexthop') interface = match_nexthop.group('interface') nb_route = nb_route + 1 nb_recursive_route = nb_recursive_route + 1 log.debug( "found nexthop={} interface={} nb_route={} nb_recursive={}" .format(nexthop, interface, nb_route, nb_recursive_route)) if nexthop not in result['nexthop']: result['nexthop'].append(nexthop) if interface not in result['interface']: result['interface'].append(interface) result['total'] = nb_route result['recursive'] = nb_recursive_route log.debug("result={}".format(result)) return result def get_sdwan_service(self, service='1', version='6.4'): """ Returns a dictionary with information from diagnose sys viirtual-wan-link service <service> FGT-B1-1 # diagnose sys virtual-wan-link service 1 Service(1): Address Mode(IPV4) flags=0x0 Gen(1), TOS(0x0/0x0), Protocol(0: 1->65535), Mode(sla) Service role: standalone Member sub interface: Members: 1: Seq_num(1 vpn_isp1), alive, sla(0x1), cfg_order(0), cost(0), selected 2: Seq_num(2 vpn_isp2), alive, sla(0x1), cfg_order(1), cost(0), selected 3: Seq_num(3 vpn_mpls), alive, sla(0x1), cfg_order(2), cost(0), selected Src address: 10.0.1.0-10.0.1.255 Dst address: 10.0.2.0-10.0.2.255 FGT-B1-1 # 210623 : as of 6.4, command was changed to "config system sdwan". Add option version=6.4/6.2 for 6.2 compatibility """ log.debug("Enter with service={} version={}".format(service, version)) result = {'members': {}, 'mode': ''} members_flag = False mode = '' if not self.ssh.connected: self.ssh.connect() cmd = "diagnose sys sdwan service" if version == '6.2': log.debug("old 6.2 command required") cmd = 'diagnose sys virtual-wan-link service' self.run_op_mode_command("{} {}\n".format(cmd, service)) for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) # Get mode match_mode = re.search("(?:,\sMode\()(?P<mode>\S+)(?:\))", line) if match_mode: mode = match_mode.group('mode') log.debug("found mode={}".format(mode)) result['mode'] = mode # Get members details if members_flag: match_member = re.search( "(?:\s+)(?P<order>\d+)(?::\sSeq_num\()(?P<seq>\d+)(?:\s\S+)?(?:\),\s)(?P<status>alive|dead)", line) if match_member: order = match_member.group('order') seq = match_member.group('seq') status = match_member.group('status') log.debug("Found order={} seq={} status={}".format( order, seq, status)) result['members'][order] = {} result['members'][order]['seq_num'] = seq result['members'][order]['status'] = status # If sla mode, get sla value if mode == 'sla': log.debug("sla mode, get sla value") match_sla_value = re.search( "(?:,\ssla\()(?P<sla>0x[0-9a-z]+)(?:\),)", line) if match_sla_value: sla = match_sla_value.group('sla') log.debug("Found sla={}".format(sla)) result['members'][order]['sla'] = sla else: log.error( "Could not extract sla value from member") # Get members match_member_section = re.search("\s\sMembers\(|:", line) if match_member_section: log.debug("found start of members section") members_flag = True return result def get_session(self, filter={}): """ Filter and retrieve a session from the session list The provided filter dictionary is based on session filter keywords : FGT-CGUSTAVE # diagnose sys session filter vd Index of virtual domain. -1 matches all. sintf Source interface. dintf Destination interface. src Source IP address. nsrc NAT'd source ip address dst Destination IP address. proto Protocol number. sport Source port. nport NAT'd source port dport Destination port. policy Policy ID. expire expire duration duration proto-state Protocol state. session-state1 Session state1. session-state2 Session state2. ext-src Add a source address to the extended match list. ext-dst Add a destination address to the extended match list. ext-src-negate Add a source address to the negated extended match list. ext-dst-negate Add a destination address to the negated extended match list. clear Clear session filter. negate Inverse filter. Returns a dictionary with the elements of the returned get_sessions ex : { 'src' : '8.8.8.8', 'dst' : '10.10.10.1', 'sport' : 63440, 'dport' : 53, 'proto' : 17, 'state' : '01', 'flags' : ['may_dirty', 'dirty'], 'dev' : '7->8', 'gwy' : '10.10.10.1->8.8.8.8', 'duration' : 30, session sample : FGT-CGUSTAVE # diagnose sys session filter dport 222 FGT-CGUSTAVE # diagnose sys session list session info: proto=6 proto_state=01 duration=233 expire=3599 timeout=3600 flags=00000000 sockflag=00000000 sockport=0 av_idx=0 use=4 origin-shaper= reply-shaper= per_ip_shaper= class_id=0 ha_id=0 policy_dir=0 tunnel=/ vlan_cos=8/8 state=log local may_dirty statistic(bytes/packets/allow_err): org=11994/132/1 reply=12831/87/1 tuples=2 tx speed(Bps/kbps): 33/0 rx speed(Bps/kbps): 43/0 orgin->sink: org pre->in, reply out->post dev=28->24/24->28 gwy=10.199.3.1/0.0.0.0 hook=pre dir=org act=noop 10.199.3.10:36714->10.199.3.1:222(0.0.0.0:0) hook=post dir=reply act=noop 10.199.3.1:222->10.199.3.10:36714(0.0.0.0:0) pos/(before,after) 0/(0,0), 0/(0,0) misc=0 policy_id=4294967295 auth_info=0 chk_client_info=0 vd=0 serial=010d3b7f tos=ff/ff app_list=0 app=0 url_cat=0 rpdb_link_id = 00000000 dd_type=0 dd_mode=0 npu_state=00000000 no_ofld_reason: local total session 1 FGT-CGUSTAVE # """ log.debug("Enter with filter={}".format(filter)) result = {} allowed_keys = [ 'vd', 'sintf', 'dintf', 'src', 'nsrc', 'dst', 'proto', 'sport', 'nport', 'dport', 'policy', 'expire', 'duration', 'proto-state', 'session-state1', 'session-state2', 'ext-src', 'ext-dst', 'ext-src-negate', 'ext-dst-negate', 'negate' ] command_list = ["diagnose sys session filter clear\n"] for key in filter: log.debug("key={} value={}".format(key, filter[key])) if key not in allowed_keys: log.error("unknown session key={}".format(key)) raise SystemExit else: command_list.append("diagnose sys session filter " + key + " " + str(filter[key]) + "\n") command_list.append("diagnose sys session list\n") self.ssh.shell_send(command_list) result = self._session_analysis() return (result) def run_op_mode_command(self, cmd): """ Use netcontrol shell to send commands to vyos """ log.debug("Enter with cmd={}".format(cmd)) self.ssh.shell_send([cmd]) return (self.ssh.output) def _session_analysis(self): """ Returns a json reflecting the session Takes self.ssh.output as input """ log.debug("Enter") result = {} # Parse and build session json for line in self.ssh.output.splitlines(): log.debug("line={}".format(line)) # session info: proto=6 proto_state=01 duration=375 expire=3599 timeout=3600 flags=00000000 sockflag=00000000 sockport=0 av_idx=0 use=4 match_session_info = re.search( "^session\sinfo:\sproto=(?P<proto>\d+)\sproto_state=(?P<proto_state>\d+)\sduration=(?P<duration>\d+)\sexpire=(?P<expire>\d+)\stimeout=(?P<timeout>\d+)", line) if match_session_info: proto = match_session_info.group('proto') proto_state = match_session_info.group('proto_state') duration = match_session_info.group('duration') expire = match_session_info.group('expire') timeout = match_session_info.group('timeout') log.debug( "session-info : proto={} proto_state={} duration={} expire={} timeout={}" .format(proto, proto_state, duration, expire, timeout)) result['proto'] = proto result['proto_state'] = proto_state result['duration'] = duration result['expire'] = expire result['timeout'] = timeout # state=log local may_dirty match_state = re.search("^state=(?P<state>.+)", line) if match_state: states = [] session_states = match_state.group('state') log.debug("states: {}".format(session_states)) for flag in session_states.split(): log.debug("flag={}".format(flag)) states.append(flag) result['state'] = states # statistic(bytes/packets/allow_err): org=28670/369/1 reply=21275/200/1 tuples=2 match_statistic = re.search( "^statistic\(bytes/packets/allow_err\):\sorg=(?P<org_byte>\d+)/(?P<org_packet>\d+)/\d\sreply=(?P<reply_byte>\d+)/(?P<reply_packet>\d+)", line) if match_statistic: stats = {} org_byte = match_statistic.group('org_byte') org_packet = match_statistic.group('org_packet') reply_byte = match_statistic.group('reply_byte') reply_packet = match_statistic.group('reply_packet') log.debug( "org_byte={} org_packet={} reply_byte={} reply_packet={}". format(org_byte, org_packet, reply_byte, reply_packet)) stats['org_byte'] = org_byte stats['org_packet'] = org_packet stats['reply_byte'] = reply_byte stats['reply_packet'] = reply_packet result['statistics'] = stats # orgin->sink: org pre->in, reply out->post dev=28->24/24->28 gwy=10.199.3.1/0.0.0.0 match_dev_gw = re.search( "\sdev=(?P<dev>[0-9-/>]+)\sgwy=(?P<gwy>[0-9./]+)", line) if match_dev_gw: dev = match_dev_gw.group('dev') gwy = match_dev_gw.group('gwy') log.debug("dev={} gwy={}".format(dev, gwy)) # hook=pre dir=org act=noop 10.199.3.10:36990->10.199.3.1:222(0.0.0.0:0) match_ip = re.search( "^hook=pre\sdir=org\sact=noop\s(?P<src>[0-9.]+):(?P<sport>\d+)->(?P<dest>[0-9.]+):(?P<dport>\d+)", line) if match_ip: src = match_ip.group('src') sport = match_ip.group('sport') dest = match_ip.group('dest') dport = match_ip.group('dport') result['src'] = src result['sport'] = sport result['dest'] = dest log.debug("src={} sport={} dest={} dport={}".format( src, sport, dest, dport)) # Total session (should be 1 ideally) match_total_session = re.search("^total\ssession\s(?P<total>\d+)", line) if match_total_session: total = match_total_session.group('total') result['total'] = total log.debug("result={}".format(result)) return result
def connect(self, type=''): """ Connect to agent without sending any command This opens the ssh channel for data exchange and tracefile """ log.info("Enter with type={}".format(type)) ip = self.agent['ip'] port = self.agent['port'] login = self.agent['login'] password = self.agent['password'] ssh_key_file = self.agent['ssh_key_file'] log.debug("ip={} port={} login={} password={} ssh_key_file={}".format( ip, port, login, password, ssh_key_file)) success = True if not self.dryrun: if type == 'lxc': self._ssh = Ssh(ip=ip, port=port, user=login, password=password, private_key_file=ssh_key_file, debug=self.debug) elif type == 'vyos': self._ssh = Vyos(ip=ip, port=port, user=login, password=password, private_key_file=ssh_key_file, debug=self.debug) elif type == 'fortipoc': self._ssh = Fpoc(ip=ip, port=port, user=login, password=password, private_key_file=ssh_key_file, debug=self.debug) elif type == 'fortigate': self._ssh = Fortigate(ip=ip, port=port, user=login, password=password, private_key_file=ssh_key_file, debug=self.debug) elif type == 'fortiswitch': self._ssh = Fortiswitch(ip=ip, port=port, user=login, password=password, private_key_file=ssh_key_file, debug=self.debug) else: log.error("unknown type") raise SystemExit tracefile_name = self.get_filename(type='trace') self._ssh.trace_open(filename=tracefile_name) try: success = self._ssh.connect() self._connected = True success = True except: log.error("Connection to agent {} failed".format(self.name)) success = False else: log.debug("dryrun mode") return success