def main(): # Parse args parser = argparse.ArgumentParser( description='Create a VM with port and attach to neutron network', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-a', '--add', action='store_true', default=False, help='VM Name', required=False) parser.add_argument('-d', '--delete', action='store_true', default=False, help='VM Name', required=False) parser.add_argument('-v', '--vm-name', action='store', default=None, help='VM Name', required=True) parser.add_argument('-n', '--net', action='store', default=None, help='Net name or ID to start VM', required=False) parser.add_argument('-i', '--ip', action='store', default=None, help='IP to assign to VM eth0', required=False) parser.add_argument('-p', '--port-id', action='store', default=None, help='Port ID for the VM', required=False) args = parser.parse_args() if not args.vm_name: raise exceptions.ArgMismatchException( "Must specify a vm name to add or delete.") if args.add: if not args.net: raise exceptions.ArgMismatchException( "Must specify net name or ID to create a VM.") ipnetns_vm.create_vm(args.vm_name, args.net, args.ip) elif args.delete: ipnetns_vm.delete_vm(args.vm_name, args.port_id) else: raise exceptions.ArgMismatchException( "Must specify action to either add (-a) or delete (-d) a VM.")
def find_port_by_name_or_id(nc, port_name): ports_by_name = [ p for p in nc.list_ports()['ports'] if p['name'] == port_name ] if len(ports_by_name) == 0: try: port_by_id = nc.show_port(port_name)['port'] except nex.NotFound: raise exceptions.ArgMismatchException( 'Port not found with name or ID: ' + port_name) elif len(ports_by_name) > 1: raise exceptions.ArgMismatchException( 'Multiple ports of name found: ' + port_name + '; try with ID.') else: port_by_id = ports_by_name[0] return port_by_id
def find_net_by_name_or_id(nc, net_name): nets_by_name = [ net for net in nc.list_networks()['networks'] if net['name'] == net_name ] if len(nets_by_name) == 0: try: net_by_id = nc.show_network(net_name)['network'] except nex.NotFound: raise exceptions.ArgMismatchException( 'Net not found by ID or name: ' + net_name) elif len(nets_by_name) > 1: raise exceptions.ArgMismatchException('Multiple nets of name found: ' + net_name + '; try with ID.') else: net_by_id = nets_by_name[0] return net_by_id
def parse(self, parse_class_stack=None): """ :param parse_class_stack: list[class] Stack of classes to parse packet (highest layer first in list) :return: dict[str, PCAPEncapsulatedLayer] """ self.extra_data['parse_classes'] = [] self.extra_data['parse_types'] = [] # Start parsing with the whole packet (starting from Link-Layer) current_data = self.packet_data # By default, the parsing stack is None, which tells us to figure # it out automatically, so let's start with Ethernet_II, as it's # the most common link player protocol. If it is empty at any point, # that means to just use the recommended parser, which in this case # would mean to use Ethernet, since it's the first step. if parse_class_stack is None or len(parse_class_stack) == 0: parse_class_name = PCAPEthernet else: # Otherwise, let's pop the first parsing class and continue parse_class_name = parse_class_stack.pop() # If there are no more parsers to run in the stack, finish up while parse_class_name is not None: # Check type and set up some extra information about the classes # used to parse if not isinstance(parse_class_name(), PCAPEncapsulatedLayer): raise exceptions.ArgMismatchException( 'Parsing classes must be of type "PCAPEncapsulatedLayer"') self.extra_data['parse_classes'].append(parse_class_name.__name__) self.extra_data['parse_types'].append( parse_class_name.layer_name()) self.extra_data['parse_errors.' + parse_class_name.layer_name()] = [] # Instantiate the object based on the class given as the # "next parser" link_obj = parse_class_name() """ :type: PCAPEncapsulatedLayer""" try: # Parse the current packet data and set the result as the # new packet data for the # next layer to parse. current_data = link_obj.parse_layer(current_data) except exceptions.PacketParsingException as e: self.extra_data['parse_errors.' + parse_class_name.layer_name()].append(e.info) if e.fatal is True: raise e # Set the item in the data map with the parsed object keyed to # the name the object itself uses to access the data self.layer_data[parse_class_name.layer_name()] = link_obj # If the last parser recommended a parser for the rest of the # data and there were no other parsers configured manually to # run, then add the recommended parser for the next step, # otherwise, just use whatever we were told to use. # Get the next parser's class name. If stack is empty (or not # defined), use the next recommended parser. If the next # recommended parser is "None", that means we are finished # parsing and exit the loop. if parse_class_stack is None or len(parse_class_stack) == 0: parse_class_name = link_obj.next_parse_recommendation else: # If the stack has a next step, use that instead. If it's " # None", that signals us to stop parsing and finish the loop. parse_class_name = parse_class_stack.pop() return self.layer_data
if arg in ('-i', 'ip'): ip_addr = value elif arg in ('-p', 'port'): port = int(value) elif arg in ('-c', 'protocol'): protocol = value elif arg in ('-t', 'timeout'): timeout = float(value) elif arg in ('-o', 'out-str'): echo_request_string = value elif arg in ('-h', 'help'): usage() exit(0) else: usage() raise exceptions.ArgMismatchException("Option not recognized: " + arg) try: req = echo_request_string + TERMINATION_STRING if protocol == 'tcp': new_socket = socket.create_connection((ip_addr, port), timeout) new_socket.sendall(req) elif protocol == 'udp': new_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) new_socket.settimeout(timeout) new_socket.sendto(req, (ip_addr, port)) else: raise exceptions.ArgMismatchException('Unsupported self.protocol: ' + protocol) new_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) new_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
def read_packet(cli=LinuxCLI(), flag_set=None, interface='any', count=1, packet_type='', pcap_filter=None, max_size=0, packet_queues=None, callback=None, callback_args=None, save_dump_file=False, save_dump_filename=None): tmp_dump_filename = './.tcpdump.out.' + str(time.time()) tcp_processes = [] try: # If flag set provided, use them instead, for synch with # external functions tcp_ready = threading.Event() \ if flag_set is None else flag_set[0] tcp_error = threading.Event() \ if flag_set is None else flag_set[1] tcp_stop = threading.Event() \ if flag_set is None else flag_set[2] tcp_finished = threading.Event() \ if flag_set is None else flag_set[3] # If queue set provided, use them instead for synch # with external functions packet_queue = Queue.Queue() \ if packet_queues is None else packet_queues[0] status_queue = Queue.Queue() \ if packet_queues is None else packet_queues[1] cmd1 = ['tcpdump', '-n', '-xx', '-l'] cmd1 += ['-c', str(count)] \ if count > 0 else [] cmd1 += ['-i', interface] cmd1 += ['-s', str(max_size)] \ if max_size != 0 else [] cmd1 += ['-T', packet_type] \ if packet_type != '' else [] cmd1 += [pcap_filter.to_str()] \ if pcap_filter is not None else [] cmd2 = ['tee', '-a', tmp_dump_filename] # FLAG STATE: ready[clear], stop[clear], finished[clear] with open(name=tmp_dump_filename, mode='w') as f: f.write("--START--\n") tcp_processes = cli.cmd(cmd_list=[cmd1, cmd2], blocking=False) tcp_piped_process = tcp_processes.process tcp_actual_process = tcp_processes.process_array[0] # set current p.stderr flags to NONBLOCK # Note that as stderr is NOT redirected through pipes, we # must listen on the actual tcpdump process's stderr # (not the tee process!) flags_se = fcntl(tcp_actual_process.stderr, F_GETFL) fcntl(tcp_actual_process.stderr, F_SETFL, flags_se | os.O_NONBLOCK) err_out = '' while not tcp_ready.is_set(): try: line = os.read(tcp_actual_process.stderr.fileno(), 256) if line.find('listening on') != -1: # TODO(micucci): Replace sleep after TCPDump s # starts with a real check # This is dangerous, # and might not actually be enough to signal the # tcpdump is actually running. Instead, let's # create a Cython module that passes calls through # to libpcap (there are 0 good libpcap implementations # for Python that are maintained, documented, # and simple). time.sleep(1) tcp_ready.set() else: err_out += line if tcp_piped_process.poll() is not None: out, err = tcp_piped_process.communicate() status_queue.put( {'error': 'tcpdump exited abnormally', 'returncode': tcp_piped_process.returncode, 'stdout': out, 'stderr': err_out}) tcp_error.set() raise exceptions.SubprocessFailedException( 'tcpdump exited abnormally with status: ' + str(tcp_piped_process.returncode) + ', out: ' + out + ', err: ' + err + ', err_out: ' + err_out) time.sleep(0) except OSError: pass # FLAG STATE: ready[set], stop[clear], finished[clear] # tcpdump return output format: # hh:mm:ss.tick L3Proto <Proto-specific fields>\n # \t0x<addr>: FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF\n # \t0x<addr>: FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF\n # (eight quads of hexadecimal numbers representing 16 # bytes or 4 32-bit words) # # hh:mm:ss.tick L3Proto <Proto-specific fields>\n # \t0x<addr>: FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF\n # \t0x<addr>: FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF\n # (Next packet) packet_data = [] timestamp = '' with open(tmp_dump_filename, 'r+') as f: # Prepare for the first packet by reading the file # until the first packet's lines arrive (or until # stopped by a stop_capture call) if f.readline().rstrip() != '--START--': status_queue.put( {'error': 'Expected --START-- tag at ' 'beginning of dumpfile', 'returncode': tcp_piped_process.returncode, 'stdout': '', 'stderr': ''}) tcp_error.set() raise exceptions.ArgMismatchException( 'Expected --START-- tag at beginning of dumpfile') while True: # Read the lines and either append data, # start a new packet, or finish line = f.readline() if line == '': # EOF # Is the tcpdump process finished or # signaled to finish? if tcp_piped_process.poll() is not None \ or tcp_stop.is_set(): # If we finished with packet data buffered up, # append that packet to the queue if len(packet_data) > 0: # Create and parse the packet and push it # onto the return list, calling # the callback function if one is set. packet = pcap_packet.PCAPPacket( packet_data, timestamp) packet_queue.put(packet) if callback is not None: callback(packet, *(callback_args if callback_args is not None else ())) # Stop packet collection and exit break # Otherwise, we need to wait for data time.sleep(0) elif line.startswith('\t'): # Normal packet data: buffer into current packet packet_data += parse_line_to_byte_array(line) else: # We hit the end of the packet and will start # a new packet Only run if we had packet data # buffered if len(packet_data) > 0: # Create and parse the packet and push it onto # the return list, calling the callback function # if one is set. packet = pcap_packet.PCAPPacket( packet_data, timestamp) packet_queue.put(packet) if callback is not None: callback(packet, *(callback_args if callback_args is not None else [])) packet_data = [] # Start the new packet by reading the timestamp timestamp = line.split(' ', 2)[0] finally: # Save the tcpdump output (if requested), and delete the # temporary file if save_dump_file is True: LinuxCLI().copy_file( tmp_dump_filename, save_dump_filename if save_dump_filename is not None else 'tcp.out.' + str(time.time())) LinuxCLI().rm(tmp_dump_filename) tcp_processes.terminate() status_queue.put({'success': '', 'returncode': tcp_piped_process.returncode, 'stdout': tcp_piped_process.stdout, 'stderr': tcp_piped_process.stderr}) # FLAG STATE: ready[set], stop[set], finished[clear] tcp_finished.set() # FLAG STATE: ready[set], stop[set], finished[set] return packet_queue