def accept(self): try: if not is_cygwin: s, sockaddr = self.socket().accept() else: begin = time.time() s, sockaddr = self.socket().accept() end = time.time() if end - begin > 1: log.info(0, 'accept cost %f seconds', end - begin) exit(-1) except IOError as exc: err = exc.errno if err != errno.EAGAIN and err != errno.EINTR: log.error(exc, 'accept() fail') return None except Exception as exc: log.error(exc, 'accept() fail') return None c = Connection() c.socket(s) c.listening = self c.nonblocking() c.keepalive() c.addr = Addr(self.addr) c.addr.parse_sockaddr(sockaddr) log.debug(0, '*%d accept: %s', c.index, c.addr.text) self.handler(c) return self
def init(self): self.conn_proxy = Connection() self.conn_target = Connection() c = self.conn_proxy c.connect_nonblocking(self.addr_proxy) c.wev.handler = lambda: self.ready_connect_proxy() add_conn(c, WRITE_EVENT) c = self.conn_target c.connect_nonblocking(self.addr_target) c.wev.handler = lambda: self.ready_connect_target() add_conn(self.conn_target, WRITE_EVENT)
def test_neuron(): config.sigmoid_coef = 1 connections = { 0: Connection(0, 0, 2, 0.2, True), 1: Connection(1, 1, 2, 0.3, True), 2: Connection(2, 2, 3, 0.4, True), 3: Connection(3, 2, 2, 0.5, True) } neurons = { 0: Neuron(0, 0), 1: Neuron(1, 0), 2: Neuron(2, 0.2), 3: Neuron(3, 0.3) } neurons[0].add_outgoing(2) neurons[1].add_outgoing(2) return_path_exists = utility.check_if_path_exists_by_neurons( connections[0].to_key, connections[0].from_key, neurons) neurons[2].add_incoming(connections[0], return_path_exists) return_path_exists = utility.check_if_path_exists_by_neurons( connections[1].to_key, connections[1].from_key, neurons) neurons[2].add_incoming(connections[1], return_path_exists) return_path_exists = utility.check_if_path_exists_by_neurons( connections[3].to_key, connections[3].from_key, neurons) neurons[2].add_incoming(connections[3], return_path_exists) neurons[2].add_outgoing(3) return_path_exists = utility.check_if_path_exists_by_neurons( connections[2].to_key, connections[2].from_key, neurons) neurons[3].add_incoming(connections[2], return_path_exists) for neuron in neurons.values(): neuron.reset() neurons[0].set_value(1., neurons) neurons[1].set_value(1., neurons) res1 = neurons[3].value for neuron in neurons.values(): neuron.reset() neurons[0].set_value(1., neurons) neurons[1].set_value(1., neurons) res2 = neurons[3].value print('Neuron test:', round(res1, 4) == 0.6381 and round(res2, 4) == 0.6445)
def __init__(self, bot_name): # Get the bot-specific settings settings_file = 'bots.%s' % bot_name try: module = __import__(settings_file) except ImportError: raise IrcbotException("Error importing settings for %s" % bot_name) self.settings = getattr(module, bot_name) # Set up connection self.connection = Connection(self.settings, self) # Set up plugins self.plugins = [] if hasattr(self.settings, 'PLUGINS'): plugin_names = self.settings.PLUGINS for plugin_name in plugin_names: module = __import__('plugins.%s' % plugin_name) plugin = getattr(module, plugin_name) plugin_class = getattr(plugin, plugin_name) if not issubclass(plugin_class, PluginAbstract): raise IrcbotException("Plugin '%s' doesn't extend core.PluginAbstract!" % plugin_name) self.plugins.append(plugin_class(self.connection, self.settings)) # Find which plugin has the lowest ticker frequency ticker_frequencies = [] for plugin in self.plugins: ticker_freq = plugin.get_ticker_freq() if ticker_freq is not None: ticker_frequencies.append(ticker_freq) if len(ticker_frequencies): self.connection.set_ticker_freq(min(ticker_frequencies))
def new_node(self, generation_new_nodes, generation_new_connections): connections_values = list(self.connections.values()) # choose a random connection and disable it connection = random.choice(connections_values) connection.enabled = False key_pair = (connection.from_key, connection.to_key) # if a node has already been added between these two nodes assign existing innovation numbers and node key if key_pair in generation_new_nodes: new_node_key = generation_new_nodes[key_pair] innovation_number1 = generation_new_connections[( connection.from_key, new_node_key)] innovation_number2 = generation_new_connections[( new_node_key, connection.to_key)] # otherwise create new and remember structural innovation else: new_node_key = config.next_node_key generation_new_nodes[key_pair] = new_node_key config.next_node_key += 1 innovation_number1 = config.innovation_number generation_new_connections[(connection.from_key, new_node_key)] = innovation_number1 config.innovation_number += 1 innovation_number2 = config.innovation_number generation_new_connections[( new_node_key, connection.to_key)] = innovation_number2 config.innovation_number += 1 # create a new node with random bias bias = random.gauss(config.bias_new_mu, config.bias_new_sigma) new_node = Node(new_node_key, bias) self.nodes[new_node_key] = new_node # create a new connection and set it's value to 1.0 new_connection1 = Connection(innovation_number1, connection.from_key, new_node_key, 1.0, True) self.connections[innovation_number1] = new_connection1 # create a new connection and set it's value to the value of disabled connection new_connection2 = Connection(innovation_number2, new_node_key, connection.to_key, connection.weight, True) self.connections[innovation_number2] = new_connection2
def __init__(self, c, addr, opts): self.conn = c self.key = opts.key self.secret = opts.secret self.addr_proxy = addr self.conn_proxy = Connection() c = self.conn_proxy if not c.connect_nonblocking(addr): self.close() return c.wev.handler = lambda: self.send_connect() add_conn(c, WRITE_EVENT) self.timer = Timer() self.timer.handler = lambda: self.on_timeout() add_timer(self.timer, EXPIRE_TIME)
def new_connection(self, generation_new_connections): node_keys = list(self.nodes.keys()) num_nodes = len(node_keys) num_connections = len(self.connections.values()) num_inputs = len(config.input_keys) num_outputs = len(config.output_keys) # exit if it is not possible to create any new connections if num_connections == utility.max_num_edges(num_nodes) - ( utility.max_num_edges(num_inputs) + utility.max_num_edges(num_outputs)): return while True: # pick two random nodes node1_key = random.choice(node_keys[num_inputs + num_outputs:]) node2_key = random.choice(node_keys) # try again if chosen nodes are the same and self loops are disabled if config.disable_self_loops and node1_key == node2_key: continue # try again if there is already an existing connection between chosen nodes existing_connections = [ c for c in self.connections.values() if c.from_key == node1_key and c.to_key == node2_key or c.from_key == node2_key and c.to_key == node1_key ] if existing_connections: continue # switch node positions if adding this link would make network recurrent if node2_key in config.input_keys or utility.check_if_path_exists_by_connections( node2_key, node1_key, self.connections) or ( node2_key, node1_key) in generation_new_connections: temp = node1_key node1_key = node2_key node2_key = temp # assign new innovation number or assign existing if structural innovation has already occurred key_pair = (node1_key, node2_key) if key_pair in generation_new_connections: innovation_number = generation_new_connections[key_pair] else: innovation_number = config.innovation_number generation_new_connections[key_pair] = innovation_number config.innovation_number += 1 # create new connection with random weight new_connection = Connection( innovation_number, node1_key, node2_key, random.gauss(config.weight_new_mu, config.weight_new_sigma), True) self.connections[innovation_number] = new_connection return
class Ircbot(object): def __init__(self, bot_name): # Get the bot-specific settings settings_file = 'bots.%s' % bot_name try: module = __import__(settings_file) except ImportError: raise IrcbotException("Error importing settings for %s" % bot_name) self.settings = getattr(module, bot_name) # Set up connection self.connection = Connection(self.settings, self) # Set up plugins self.plugins = [] if hasattr(self.settings, 'PLUGINS'): plugin_names = self.settings.PLUGINS for plugin_name in plugin_names: module = __import__('plugins.%s' % plugin_name) plugin = getattr(module, plugin_name) plugin_class = getattr(plugin, plugin_name) if not issubclass(plugin_class, PluginAbstract): raise IrcbotException("Plugin '%s' doesn't extend core.PluginAbstract!" % plugin_name) self.plugins.append(plugin_class(self.connection, self.settings)) # Find which plugin has the lowest ticker frequency ticker_frequencies = [] for plugin in self.plugins: ticker_freq = plugin.get_ticker_freq() if ticker_freq is not None: ticker_frequencies.append(ticker_freq) if len(ticker_frequencies): self.connection.set_ticker_freq(min(ticker_frequencies)) def init_callback(self): for plugin in self.plugins: plugin.init() def msg_received_callback(self, user, msg): for plugin in self.plugins: plugin.msg_received(user, msg) def ticker_callback(self): for plugin in self.plugins: plugin.ticker() def execute(self): try: self.connection.run() # Catch KeyboardInterrupts except KeyboardInterrupt, e: logger.exception("KeyboardInterrupt...") self.connection.quit("Someone killed me manually via commandline!") # Catchall to quit as gracefully as possible except Exception, e: logger.exception("Something killed execution!") self.connection.quit("Ruh roh, something went wrong!")
def test_interface(): config.input_keys = [0, 1] config.output_keys = [2, 3] connections = { 0: Connection(0, 0, 4, 0.2, True), 1: Connection(1, 1, 4, 0.3, True), 2: Connection(2, 4, 2, 0.4, True), 3: Connection(3, 4, 3, 0.5, True), 4: Connection(4, 4, 4, 0.3, True) } nodes = { 0: Node(0, 0), 1: Node(1, 0), 2: Node(2, 0), 3: Node(3, 0), 4: Node(4, 0) } network_visualizer = NetworkVisualizer() network_visualizer.update_node_positions(connections, nodes) network_visualizer.visualize_network(connections)
def init(self): addr = self.addr_proxy while True: self.clear() self.conn = Connection() if self.conn.connect(addr): break time.sleep(3) # if not addr.next_sockaddr(): # sys.exit(-1) c = self.conn log.debug(0, '*%d connect: %s', c.index, c.addr.text) c.nonblocking() self.send_msg(['register req', self.key]) self.stimer = Timer() self.stimer.handler = lambda: self.on_stimer() add_timer(self.stimer, HEARTBEAT_INTERVAL) self.rtimer = Timer() self.rtimer.handler = lambda: self.on_rtimer() add_timer(self.rtimer, HEARTBEAT_TIMEOUT)
def configure_new(self): # create a node for every input and output defined by the problem for key in config.input_keys + config.output_keys: # pick random bias value with gaussian distribution bias = random.gauss(config.bias_new_mu, config.bias_new_sigma) node = Node(key, bias) self.nodes[key] = node next_innovation_number = 0 # fully connect inputs and outputs, i.e. create a connection between every input and output node for input_key in config.input_keys: for output_key in config.output_keys: # pick random connection weight value with gaussian distribution new_connection = Connection( next_innovation_number, input_key, output_key, random.gauss(config.weight_new_mu, config.weight_new_sigma), True) self.connections[next_innovation_number] = new_connection next_innovation_number += 1
class Enity(object): def __init__(self, c, addr, opts): self.conn = c self.key = opts.key self.secret = opts.secret self.addr_proxy = addr self.conn_proxy = Connection() c = self.conn_proxy if not c.connect_nonblocking(addr): self.close() return c.wev.handler = lambda: self.send_connect() add_conn(c, WRITE_EVENT) self.timer = Timer() self.timer.handler = lambda: self.on_timeout() add_timer(self.timer, EXPIRE_TIME) def on_timeout(self): c = self.conn_proxy log.warn(0, '*%d timeout', c.index) self.close() def close(self): if self.conn: self.conn.close() self.conn = None if self.conn_proxy: self.conn_proxy.close() self.conn_proxy = None if self.timer: del_timer(self.timer) self.timer = None def send_connect(self): c = self.conn_proxy log.debug(0, '*%d connect: %s', c.index, c.addr.text) timestamp = int(time.time()) rand = '%x_%d' % (random.randint(0, 0xffffffff), get_sequence()) sign = auth.get_sign(self.secret, [timestamp, rand]) msg = Message(['connect req', self.key, timestamp, rand, sign]) log.debug(0, '*%d send message: %s', c.index, msg) buff = msg.encode() if not buff: log.error(0, 'invalid message: %s', msg) return c.wev.buff = struct.pack('I', len(buff)) + buff c.wev.handler = lambda: self.send() self.send() def send(self): c = self.conn_proxy buff = c.wev.buff if len(buff) > 0: r, size = c.send(buff) if r != 0: if r == 1: log.debug(0, '*%d closed', c.index) self.close() return c.wev.buff = buff[size:] buff = c.wev.buff if len(buff) > 0: c.wev.handler = lambda: self.send() del_conn(c, READ_EVENT) add_conn(c, WRITE_EVENT) else: c.rev.handler = lambda: self.read_connect() del_conn(c, WRITE_EVENT) add_conn(c, READ_EVENT) def read_bin(self): c = self.conn_proxy r, buff = c.recv(4096) if r != 0: if r == 1: log.debug(0, '*%d closed', self.conn.index) self.close() return '' c.rev.buff += buff buff = c.rev.buff size0 = struct.calcsize('I') if len(buff) < size0: return '' size1, = struct.unpack('I', buff[0:size0]) if len(buff) < size1 + size0: return '' c.rev.buff = buff[size0 + size1:] return buff[size0:size0 + size1] def read_connect(self): buff = self.read_bin() if not buff: return msg = Message() r = msg.decode(buff) if r != 0: return if r < 0: self.close() return c = self.conn_proxy log.debug(0, '*%d read message: %s', c.index, msg) cmd = msg.get(0) if cmd != 'connect rsp': log.error(0, 'invalid command. msg:%s', msg) return err = msg.get(1) if err != 'ok': log.error(0, 'accept fail. msg:%s', msg) self.close() return Bridge(self.conn, self.conn_proxy) self.conn = None self.conn_proxy = None if self.timer: del_timer(self.timer) self.timer = None
def run(): connections1 = { 0: Connection(0, 0, 6, 0.2, True), 1: Connection(1, 0, 5, 0.3, True), 2: Connection(2, 1, 5, 0.4, True), 3: Connection(3, 1, 8, 0.5, True), 4: Connection(4, 2, 7, 0.3, True), 5: Connection(5, 2, 8, 0.7, True), 6: Connection(6, 5, 6, 0.2, True), 7: Connection(7, 5, 7, 0.1, True), 8: Connection(8, 7, 6, 0.1, True), 9: Connection(9, 6, 8, 0.3, True), 10: Connection(10, 6, 3, 0.6, True), 11: Connection(11, 6, 4, 0.7, True), 12: Connection(12, 5, 3, 0.3, True), 13: Connection(13, 7, 4, 0.7, True), 14: Connection(14, 8, 3, 0.3, True), 15: Connection(15, 8, 5, 0.8, False) } nodes1 = { 0: Node(0, 0), 1: Node(1, 0), 2: Node(2, 0), 3: Node(3, 0), 4: Node(4, 0), 5: Node(5, 0), 6: Node(6, 0), 7: Node(7, 0), 8: Node(8, 0) } individual1 = Individual(connections1, nodes1) connections2 = { 0: Connection(0, 0, 6, 0.9, True), 1: Connection(1, 0, 5, 0.4, True), 2: Connection(2, 1, 5, 0.7, True), 3: Connection(3, 1, 8, 0.6, True), 5: Connection(5, 2, 8, 0.4, True), 6: Connection(6, 5, 6, 0.7, True), 9: Connection(9, 6, 8, 0.1, True), 10: Connection(10, 6, 3, 0.4, True), 11: Connection(11, 6, 4, 0.1, True), 12: Connection(12, 5, 3, 0.4, True), 14: Connection(14, 8, 3, 0.5, True), 15: Connection(15, 8, 5, 0.5, False), 16: Connection(16, 1, 4, 0.5, True), 17: Connection(17, 2, 4, 0.8, True) } nodes2 = { 0: Node(0, 0), 1: Node(1, 0), 2: Node(2, 0), 3: Node(3, 0), 4: Node(4, 0), 5: Node(5, 0), 6: Node(6, 0), 7: Node(7, 0), 8: Node(8, 0) } individual2 = Individual(connections2, nodes2) test_sigmoid() test_check_if_path_exists_by_connections(connections1) test_check_if_path_exists_by_neurons(connections1, nodes1) test_distance(individual1, individual2) test_neuron() test_phenotype(connections1, nodes1) test_interface()
def connector(target, args, db, module, context, cmeserver): try: smb = SMBConnection(target, target, None, args.smb_port) #Get our IP from the socket local_ip = smb.getSMBServer().get_socket().getsockname()[0] #Get the remote ip address (in case the target is a hostname) remote_ip = smb.getRemoteHost() try: smb.login('' , '') except SessionError as e: if "STATUS_ACCESS_DENIED" in e.message: pass domain = smb.getServerDomain() servername = smb.getServerName() serveros = smb.getServerOS() if not domain: domain = servername db.add_host(remote_ip, servername, domain, serveros) logger = CMEAdapter(getLogger('CME'), {'host': remote_ip, 'port': args.smb_port, 'hostname': u'{}'.format(servername)}) logger.info(u"{} (name:{}) (domain:{})".format(serveros, servername, domain)) try: ''' DC's seem to want us to logoff first Windows workstations sometimes reset the connection, so we handle both cases here (go home Windows, you're drunk) ''' smb.logoff() except NetBIOSError: pass except socket.error: pass if args.mssql: instances = None logger.extra['port'] = args.mssql_port ms_sql = tds.MSSQL(target, args.mssql_port, logger) ms_sql.connect() instances = ms_sql.getInstances(10) if len(instances) > 0: logger.info("Found {} MSSQL instance(s)".format(len(instances))) for i, instance in enumerate(instances): logger.highlight("Instance {}".format(i)) for key in instance.keys(): logger.highlight(key + ":" + instance[key]) try: ms_sql.disconnect() except: pass if args.username and (args.password or args.hash): conn = None if args.mssql and (instances is not None and len(instances) > 0): conn = tds.MSSQL(target, args.mssql_port, logger) conn.connect() elif not args.mssql: conn = SMBConnection(target, target, None, args.smb_port) if conn is None: return if args.domain: domain = args.domain connection = Connection(args, db, target, servername, domain, conn, logger, cmeserver) if (connection.password is not None or connection.hash is not None) and connection.username is not None: if module is not None: module_logger = CMEAdapter(getLogger('CME'), {'module': module.name.upper(), 'host': remote_ip, 'port': args.smb_port, 'hostname': servername}) context = Context(db, module_logger, args) context.localip = local_ip cmeserver.server.context.localip = local_ip if hasattr(module, 'on_login'): module.on_login(context, connection) if hasattr(module, 'on_admin_login') and connection.admin_privs: module.on_admin_login(context, connection) else: if connection.admin_privs and (args.pscommand or args.command): get_output = True if args.no_output is False else False if args.mssql: args.exec_method = 'mssqlexec' if args.command: output = connection.execute(args.command, get_output=get_output, method=args.exec_method) if args.pscommand: output = connection.execute(create_ps_command(args.pscommand), get_output=get_output, method=args.exec_method) logger.success('Executed command via {}'.format(args.exec_method)) buf = StringIO(output).readlines() for line in buf: logger.highlight(line.strip()) if args.mssql and args.mssql_query: conn.sql_query(args.mssql_query) query_output = conn.printRows() logger.success('Executed MSSQL query') buf = StringIO(query_output).readlines() for line in buf: logger.highlight(line.strip()) elif not args.mssql: if connection.admin_privs and (args.sam or args.lsa or args.ntds): secrets_dump = DumpSecrets(connection, logger) if args.sam: secrets_dump.SAM_dump() if args.lsa: secrets_dump.LSA_dump() if args.ntds: secrets_dump.NTDS_dump(args.ntds, args.ntds_pwdLastSet, args.ntds_history) if connection.admin_privs and args.wdigest: w_digest = WDIGEST(logger, connection.conn) if args.wdigest == 'enable': w_digest.enable() elif args.wdigest == 'disable': w_digest.disable() if connection.admin_privs and args.uac: UAC(connection.conn, logger).enum() if args.spider: spider = SMBSpider(logger, connection, args) spider.spider(args.spider, args.depth) spider.finish() if args.enum_shares: ShareEnum(connection.conn, logger).enum() if args.enum_lusers or args.enum_disks or args.enum_sessions: rpc_connection = RPCQUERY(connection, logger) if args.enum_lusers: rpc_connection.enum_lusers() if args.enum_sessions: rpc_connection.enum_sessions() if args.enum_disks: rpc_connection.enum_disks() if args.pass_pol: PassPolDump(logger, args.smb_port, connection).enum() if args.enum_users: SAMRDump(logger, args.smb_port, connection).enum() if connection.admin_privs and args.wmi_query: WMIQUERY(logger, connection, args.wmi_namespace).query(args.wmi_query) if args.rid_brute: LSALookupSid(logger, args.smb_port, connection, args.rid_brute).brute_force() except socket.error: return
def __init__(self): # super(self.__class__, self).__init__() Connection.__init__(self) self.rev.handler = self.wev.handler = get_accept_handler( weakref.proxy(self))
def connector(target, args, db, module, context, cmeserver): try: smb = SMBConnection(target, target, None, args.smb_port) #Get our IP from the socket local_ip = smb.getSMBServer().get_socket().getsockname()[0] #Get the remote ip address (in case the target is a hostname) remote_ip = smb.getRemoteHost() try: smb.login('', '') except SessionError as e: if "STATUS_ACCESS_DENIED" in e.message: pass domain = smb.getServerDomain() servername = smb.getServerName() serveros = smb.getServerOS() if not domain: domain = servername db.add_host(remote_ip, servername, domain, serveros) logger = CMEAdapter( getLogger('CME'), { 'host': remote_ip, 'port': args.smb_port, 'hostname': u'{}'.format(servername) }) logger.info(u"{} (name:{}) (domain:{})".format( serveros, servername.decode('utf-8'), domain.decode('utf-8'))) try: ''' DC's seem to want us to logoff first Windows workstations sometimes reset the connection, so we handle both cases here (go home Windows, you're drunk) ''' smb.logoff() except NetBIOSError: pass except socket.error: pass if args.mssql: instances = None logger.extra['port'] = args.mssql_port ms_sql = tds.MSSQL(target, args.mssql_port, logger) ms_sql.connect() instances = ms_sql.getInstances(10) if len(instances) > 0: logger.info("Found {} MSSQL instance(s)".format( len(instances))) for i, instance in enumerate(instances): logger.highlight("Instance {}".format(i)) for key in instance.keys(): logger.highlight(key + ":" + instance[key]) try: ms_sql.disconnect() except: pass if args.username and (args.password or args.hash): conn = None if args.mssql and (instances is not None and len(instances) > 0): conn = tds.MSSQL(target, args.mssql_port, logger) conn.connect() elif not args.mssql: conn = SMBConnection(target, target, None, args.smb_port) if conn is None: return if args.domain: domain = args.domain connection = Connection(args, db, target, servername, domain, conn, logger, cmeserver) if (connection.password is not None or connection.hash is not None) and connection.username is not None: if module is not None: module_logger = CMEAdapter( getLogger('CME'), { 'module': module.name.upper(), 'host': remote_ip, 'port': args.smb_port, 'hostname': servername }) context = Context(db, module_logger, args) context.localip = local_ip if hasattr(module, 'on_request') or hasattr( module, 'has_response'): cmeserver.server.context.localip = local_ip if hasattr(module, 'on_login'): module.on_login(context, connection) if hasattr(module, 'on_admin_login') and connection.admin_privs: module.on_admin_login(context, connection) else: if connection.admin_privs and (args.pscommand or args.command): get_output = True if args.no_output is False else False if args.mssql: args.exec_method = 'mssqlexec' if args.command: output = connection.execute( args.command, get_output=get_output, method=args.exec_method) if args.pscommand: output = connection.execute( create_ps_command(args.pscommand), get_output=get_output, method=args.exec_method) logger.success( 'Executed command {}'.format('via {}'.format( args.exec_method) if args.exec_method else '')) buf = StringIO(output).readlines() for line in buf: logger.highlight(line.strip()) if args.mssql and args.mssql_query: conn.sql_query(args.mssql_query) query_output = conn.printRows() logger.success('Executed MSSQL query') buf = StringIO(query_output).readlines() for line in buf: logger.highlight(line.strip()) elif not args.mssql: if connection.admin_privs and (args.sam or args.lsa or args.ntds): secrets_dump = DumpSecrets(connection, logger) if args.sam: secrets_dump.SAM_dump() if args.lsa: secrets_dump.LSA_dump() if args.ntds: secrets_dump.NTDS_dump(args.ntds, args.ntds_pwdLastSet, args.ntds_history) if connection.admin_privs and args.wdigest: w_digest = WDIGEST(logger, connection.conn) if args.wdigest == 'enable': w_digest.enable() elif args.wdigest == 'disable': w_digest.disable() if connection.admin_privs and args.uac: UAC(connection.conn, logger).enum() if args.spider: spider = SMBSpider(logger, connection, args) spider.spider(args.spider, args.depth) spider.finish() if args.enum_shares: ShareEnum(connection.conn, logger).enum() if args.enum_lusers or args.enum_disks or args.enum_sessions: rpc_connection = RPCQUERY(connection, logger) if args.enum_lusers: rpc_connection.enum_lusers() if args.enum_sessions: rpc_connection.enum_sessions() if args.enum_disks: rpc_connection.enum_disks() if args.pass_pol: PassPolDump(logger, args.smb_port, connection).enum() if args.enum_users: SAMRDump(logger, args.smb_port, connection).enum() if connection.admin_privs and args.wmi_query: WMIQUERY(logger, connection, args.wmi_namespace).query(args.wmi_query) if args.rid_brute: LSALookupSid(logger, args.smb_port, connection, args.rid_brute).brute_force() except socket.error: return
class Cross(object): def __init__(self, proxy, target, ckey): self.addr_proxy = proxy self.addr_target = target self.conn_proxy = None self.conn_target = None self.ready_proxy = False self.ready_target = False self.key = ckey def init(self): self.conn_proxy = Connection() self.conn_target = Connection() c = self.conn_proxy c.connect_nonblocking(self.addr_proxy) c.wev.handler = lambda: self.ready_connect_proxy() add_conn(c, WRITE_EVENT) c = self.conn_target c.connect_nonblocking(self.addr_target) c.wev.handler = lambda: self.ready_connect_target() add_conn(self.conn_target, WRITE_EVENT) def close(self): if self.conn_proxy: self.conn_proxy.close() self.conn_proxy = None if self.conn_target: self.conn_target.close() self.conn_target = None def ready_connect_proxy(self): c = self.conn_proxy log.debug(0, '*%d connect: %s', c.index, c.addr.text) msg = Message(['accept req', self.key]) log.debug(0, '*%d send message: %s', c.index, msg) buff = msg.encode() if not buff: log.error(0, 'invalid message: %s', msg) return c.wev.buff = struct.pack('I', len(buff)) + buff c.wev.handler = lambda: self.send() self.send() def ready_connect_target(self): c = self.conn_target log.debug(0, '*%d connect: %s', c.index, c.addr.text) del_conn(c, WRITE_EVENT) self.ready_target = True self.check_ready() def send(self): c = self.conn_proxy buff = c.wev.buff if len(buff) > 0: r, size = c.send(buff) if r != 0: if r == 1: log.debug(0, '*%d closed', c.index) self.close() return c.wev.buff = buff[size:] buff = c.wev.buff if len(buff) > 0: c.wev.handler = lambda: self.send() del_conn(c, READ_EVENT) add_conn(c, WRITE_EVENT) else: c.rev.handler = lambda: self.read() del_conn(c, WRITE_EVENT) add_conn(c, READ_EVENT) def read(self): buff = self.read_bin() if not buff: return msg = Message() r = msg.decode(buff) if r != 0: return if r < 0: self.close() return c = self.conn_proxy log.debug(0, '*%d read message. msg:%s', c.index, msg) cmd = msg.get(0) if cmd != 'accept rsp': log.error(0, 'invalid message. msg:%s', msg) self.close() return err = msg.get(1) if err != 'ok': log.error(0, 'accept fail. msg:%s', msg) self.close() return self.ready_proxy = True del_conn(c, WRITE_EVENT) self.check_ready() def read_bin(self): c = self.conn_proxy r, buff = c.recv(4096) if r != 0: if r == 1: log.debug(0, '*%d closed', c.index) self.close() return '' c.rev.buff += buff buff = c.rev.buff size0 = struct.calcsize('I') if len(buff) < size0: return '' size1, = struct.unpack('I', buff[0:size0]) if len(buff) < size1 + size0: return '' c.rev.buff = buff[size0 + size1:] return buff[size0:size0 + size1] def check_ready(self): if self.ready_target: self.conn_target.wev.handler = None del_conn(self.conn_target, WRITE_EVENT) if self.ready_proxy: self.conn_proxy.wev.handler = None del_conn(self.conn_proxy, WRITE_EVENT) if self.ready_proxy and self.ready_target: Bridge(self.conn_proxy, self.conn_target) self.conn_proxy = None self.conn_target = None
class Enity(object): def __init__(self): self.DO_MAP = { 'heartbeat rsp': Enity.do_heartbeat, 'register rsp': Enity.do_register, 'cross req': Enity.do_cross, } def set_opts(self, opts): proxy = Addr() target = Addr() if not proxy.parse(opts.proxy): log.error(0, 'invalid proxy address. %s', opts.proxy) return None if not target.parse(opts.target): log.error(0, 'invalid target address. %s', opts.target) return None if not opts.key: log.error(0, 'empty key.') return None if not opts.secret: log.error(0, 'empty secret.') return None self.conn = None self.addr_proxy = proxy self.addr_target = target self.key = opts.key self.secret = opts.secret self.addr_proxy.set_tcp() self.addr_target.set_tcp() self.stimer = None self.rtimer = None self.registered = False return self def is_valid(self): return self.addr_proxy and self.addr_target and addr.key def clear(self): if self.conn: self.conn.close() self.conn = None if self.stimer: del_timer(self.stimer) self.stimer.prev = self.stimer.next = None self.stimer = None if self.rtimer: del_timer(self.rtimer) self.rtimer.prev = self.rtimer.next = None self.rtimer = None self.registered = False def close(self): self.init() def init(self): addr = self.addr_proxy while True: self.clear() self.conn = Connection() if self.conn.connect(addr): break time.sleep(3) # if not addr.next_sockaddr(): # sys.exit(-1) c = self.conn log.debug(0, '*%d connect: %s', c.index, c.addr.text) c.nonblocking() self.send_msg(['register req', self.key]) self.stimer = Timer() self.stimer.handler = lambda: self.on_stimer() add_timer(self.stimer, HEARTBEAT_INTERVAL) self.rtimer = Timer() self.rtimer.handler = lambda: self.on_rtimer() add_timer(self.rtimer, HEARTBEAT_TIMEOUT) def on_stimer(self): if not self.registered: self.close() return add_timer(self.stimer, HEARTBEAT_INTERVAL) self.send_msg(['heartbeat req']) def on_rtimer(self): log.warn(0, 'connect has close') self.close() def send(self): c = self.conn buff = c.wev.buff if len(buff) > 0: r, size = c.send(buff) if r != 0: if r == 1: log.debug(0, '*%d closed', c.index) self.close() return c.wev.buff = buff[size:] if len(buff) > 0: c.wev.handler = lambda: self.send() del_conn(c, READ_EVENT) add_conn(c, WRITE_EVENT) else: c.rev.handler = lambda: self.read() del_conn(c, WRITE_EVENT) add_conn(c, READ_EVENT) def send_bin(self, buff): c = self.conn c.wev.buff = ''.join([c.wev.buff, struct.pack('I', len(buff)), buff]) self.send() def send_msg(self, msg): c = self.conn if isinstance(msg, str) or isinstance(msg, list) or isinstance( msg, Message): msg = Message(msg) if msg.get(0) != 'heartbeat req': log.debug(0, '*%d send message: %s', c.index, msg) else: log.trace(0, '*%d send message: %s', c.index, msg) buff = msg.encode() if not buff: log.error(0, 'invalid message: %s', msg) return self.send_bin(buff) def read(self): buff = self.read_bin() if not buff: return msg = Message() r = msg.decode(buff) if r != 0: return if r < 0: self.close() return self.process(msg) def read_bin(self): c = self.conn r, buff = c.recv(4096) if r != 0: if r == 1: log.debug(0, '*%d closed', c.index) self.close() return '' c.rev.buff += buff buff = c.rev.buff size0 = struct.calcsize('I') if len(buff) < size0: return '' size1, = struct.unpack('I', buff[0:size0]) if len(buff) < size1 + size0: return '' c.rev.buff = buff[size0 + size1:] return buff[size0:size0 + size1] def process(self, msg): c = self.conn cmd = msg.get(0) if cmd != 'heartbeat rsp': log.debug(0, '*%d read message: %s', c.index, msg) else: log.trace(0, '*%d read message: %s', c.index, msg) if not self.DO_MAP.has_key(cmd): log.error(0, 'invalid command. msg:%s', msg) return add_timer(self.stimer, HEARTBEAT_INTERVAL) add_timer(self.rtimer, HEARTBEAT_TIMEOUT) self.DO_MAP[cmd](self, msg) def do_heartbeat(self, msg): return def do_register(self, msg): err = msg.get(1) if err != 'ok': log.error(0, 'register fail. msg:%s', msg) self.send_msg(['register req', self.key]) return self.registered = True log.debug(0, 'register succ. key:%s', self.key) def do_cross(self, msg): ckey = msg.get(1) if not ckey: log.error(0, 'invalid message. msg:%s', msg) self.send_msg(['cross rsp', 'error', 'empty connect key']) return now = int(time.time()) timestamp = int(msg.get(2)) if abs(now - timestamp) > TIMESTAMP_INTERVAL: log.error(0, 'invalid timestamp. msg:%s', msg) self.send_msg(['cross rsp', 'error', 'invalid timestamp', ckey]) return rand = msg.get(3) if RAND_CACHE.has_key(rand): log.error(0, 'duplicate rand str. msg:%s', msg) self.send_msg(['cross rsp', 'error', 'invalid rand', ckey]) return sign = auth.get_sign(self.secret, [timestamp, rand]) if sign != msg.get(4): log.error(0, 'check auth fail. msg:%s, expected sign:%s', msg, sign) self.send_msg(['cross rsp', 'error', 'auth fail', ckey]) return RAND_CACHE[rand] = 1 timer = Timer() timer.handler = lambda: RAND_CACHE.pop(rand) add_timer(timer, TIMESTAMP_INTERVAL * 2) t = Cross(self.addr_proxy, self.addr_target, ckey) t.init() self.send_msg(['cross rsp', 'ok', ckey])