def main(): args = _arg_parser_factory().parse_args() msg: str = args.msg use_udp: bool = args.udp host: str = args.host port: int = args.port coder = Base85Coder() logger = ConsoleLogger() # create socket if use_udp: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sender = UDPSender(sock, (host, port), coder) reader = UDPReader(sock, coder) else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, port)) sender = TCPSender(sock, coder) reader = TCPReader(sock, coder) sender.send(b'') sender.send(b'msg:' + msg.encode()) for line in reader.read(): logger.log(line.decode()) reader.close()
class WooferBot(irc.IRCClient): def __init__(self): self.logger = ConsoleLogger() self.nickname = config['nickname'] self.password = config['password'] self.lineRate = 0.67 def connectionMade(self): irc.IRCClient.connectionMade(self) self.logger.log("[connected at %s]" % time.asctime(time.localtime(time.time()))) def connectionLost(self, reason): irc.IRCClient.connectionLost(self, reason) self.logger.log("[disconnected at %s]" % time.asctime(time.localtime(time.time()))) # callbacks for events def signedOn(self): """Called when bot has successfully signed on to server.""" for c in self.factory.channels: self.join(c) def joined(self, channel): """This will get called when the bot joins the channel.""" self.logger.log("[I have joined %s]" % channel) def privmsg(self, user, channel, msg): """This will get called when the bot receives a message.""" user = user.split('!', 1)[0] self.logger.log("<%s> %s" % (user, msg)) WooferHandler.handleMessage(self, user, channel.replace('#', ''), msg) # simple commands can be handled here locally, but ones that # require external lookups should be scheduled on the dispatcher def action(self, user, channel, msg): """This will get called when the bot sees someone do an action.""" user = user.split('!', 1)[0] self.logger.log("* %s %s" % (user, msg)) # irc callbacks def alterCollidedNick(self, nickname): """ Generate an altered version of a nickname that caused a collision in an effort to create an unused related name for subsequent registration. """ return self.nickname + '^'
def main(): """ Start the logging example. Set up the console logger and then find a device to gather data from. If anything goes wrong, try to re-estable a connection to let the log continue. An address may be explicitly provided on the command-line, or if none is given the program will scan for devices. """ backend = LogBackend() console_log = ConsoleLogger(backend) console_thread = Thread(target = console_log.spin, daemon=True) console_thread.start() # Set up logging to the GMC.MAP service. Be sure to fill in the IDs! #gmc_log = GmcmapLogger(backend, "--ID--", "--ID--") #gmc_thread = Thread(target = gmc_log.spin, daemon=True) #gmc_thread.start() # Set up logging to the Radmon service. Be sure to fill in the user/pass! #radmon_log = RadmonLogger(backend, "--USER--", "--PASSWORD--") #radmon_thread = Thread(target = radmon_log.spin, daemon=True) #radmon_thread.start() # Keep attempting to reconnect if anything goes wrong while True: if len(sys.argv) == 1: address = find_any() else: address = sys.argv[1] spin(address, backend)
def __init__(self, verbosity=0, max_tasks=512): """Constructor. Args: verbosity: set output verbosity: 0 (default) is none, 3 is debug max_tasks: the maximum number of tasks asyncio will queue (default 512) """ self.tasks = [] self.errors = [] self.fqdn = [] self.ignore_hosts = [] asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) self.loop = asyncio.get_event_loop() self.resolver = aiodns.DNSResolver(loop=self.loop, rotate=True) self.sem = asyncio.BoundedSemaphore(max_tasks) self.max_tasks = max_tasks self.verbosity = verbosity self.logger = ConsoleLogger(verbosity)
def lambda_handler(event, context): dbPers = DBPersistence("config.yaml") twlogger = ConsoleLogger() #TweetLogger("config.yaml") logic = Logic(dbPers, twlogger) while (logic.IsGameFinished() is not True): logic.Update() else: print("Game already finished") return 200
def main(): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) args = _arg_parser_factory().parse_args() file_path = Path(args.file) host = args.host port = args.port sock.connect((host, port)) logger: Logger = ConsoleLogger() coder = Base85Coder() sender = TCPSender(sock, coder) reader = TCPReader(sock, coder) if not file_path.exists(): logger.error(f'{str(file_path)} is not exists.') exit(-1) if file_path.is_file(): files = [file_path] base_path = file_path.parent else: files = list(file_path.glob('./**/*.*')) base_path = file_path file_sender = FileSender(sender) # show all the files logger.log('The following files will be uploaded:') for file in files: logger.log(str(file)) proceed = input('proceed?(y/N):').lower() == 'y' if not proceed: logger.info('Operation stopped by user.') exit(0) for file in files: file_sender.send_file(file, base_path) file_sender.close() for response in reader.read(): if response == b'ok': logger.info('All file transmission has been done.') elif response.startswith(b'error'): logger.error(response.decode()) else: logger.error('Unknown error occurred.') reader.close()
class Main: logger: Logger = ConsoleLogger() def main(self): args = self._arg_parser_factory().parse_args() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) host = '' # all interfaces are available port = args.port backlog = args.backlog save = Path(args.save_to) sock.bind((host, port)) sock.listen(backlog) self.logger.log(f'Server is on. port={port}') if args.debug: self.logger.debug('Debugging mode is on.') else: self.logger.disable(LogType.debug) while True: self.recv_client(sock, save) @staticmethod def recv_client(sock: socket.socket, save: Path) -> threading.Thread: client, address = sock.accept() Main.logger.info(f"connection established from {address}.") t = ThreadingFileReceiver(client, save / f'{time()}{address}', Main.logger) t.start() return t @staticmethod def _arg_parser_factory(): parser = argparse.ArgumentParser() parser.add_argument('-p', '--port', type=int, default=8686) parser.add_argument('-b', '--backlog', type=int, default=10) parser.add_argument('-s', '--save_to', type=str, default='./download') parser.add_argument('-d', '--debug', default=False, action='store_true') return parser
from flask import Flask, jsonify, abort from config_class import Config from logger import FileLogger, ConsoleLogger from collector import DbHandler config = Config() # get db name from yaml file db_name = config.db['db_name'] file_logger = FileLogger() console_logger = ConsoleLogger() db_handler = DbHandler(db_name=db_name, index_list='') app = Flask(__name__) # get_collections @app.route('/api/indices', methods=['GET']) def get_collections(): return jsonify({'collections': db_handler.get_db_collections()}) # get_by_collection_name @app.route('/api/index/<index>', methods=['GET']) def get_collection_data(index): is_index_exist(index) return jsonify({'results': db_handler.get_collection_data(index)}) # get_last_entry_from_collection @app.route('/api/index/<index>/last', methods=['GET'])
try: subprocess.check_call(args + [INTERPRETER_AUTODETECTION_FLAG], stdout=stdout, stderr=STDOUT) except CalledProcessError: continue if INTERPRETER_AUTODETECTION_VERBOSE_FLAG in sys.argv or ALL_AUTODETECTION_VERSION_FLAG in sys.argv: print('Re-launching using command line: ' + ' '.join(args)) os.execv(args[0], args) print( 'Error: One or more required python libraries have not been installed: ' + str(e)) sys.exit(1) if not autodetecting: logger.register(ConsoleLogger()) logger.register(FileLogger()) if EXECUTABLE_AUTODETECTION_VERSION_FLAG in sys.argv or ALL_AUTODETECTION_VERSION_FLAG in sys.argv: import detect detect.VERBOSE_DETECT = True try: PaladinLinuxClient().run( mask_exceptions=(not EXCEPTION_BACKTRACE_FLAG in sys.argv and not ALL_EXCEPTIONS_BACKTRACE_FLAG in sys.argv), handle_errors=(not ALL_EXCEPTIONS_BACKTRACE_FLAG in sys.argv)) except KeyboardInterrupt as e: print('') sys.exit(1) finally:
__all__ = [ 'config', 'model_config', 'machine', 'MachineType' ] from os.path import dirname, abspath, join, exists from enum import Enum, auto import socket import yaml from easydict import EasyDict as edict from logger import ConsoleLogger from utils.io import ensure_dir, ROOT_DIR _LOGGER = ConsoleLogger('Config') class MachineType(Enum): """Machine type""" LOCAL = auto() AWS = auto() def load_config() -> dict: """Load configuration file Returns: dict: dictionary with project configuration information """
class aioDNSBrute(object): """aiodnsbrute implements fast domain name brute forcing using Python's asyncio module.""" def __init__(self, verbosity=0, max_tasks=512): """Constructor. Args: verbosity: set output verbosity: 0 (default) is none, 3 is debug max_tasks: the maximum number of tasks asyncio will queue (default 512) """ self.tasks = [] self.errors = [] self.fqdn = [] self.ignore_hosts = [] asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) self.loop = asyncio.get_event_loop() self.resolver = aiodns.DNSResolver(loop=self.loop, rotate=True) self.sem = asyncio.BoundedSemaphore(max_tasks) self.max_tasks = max_tasks self.verbosity = verbosity self.logger = ConsoleLogger(verbosity) async def _dns_lookup(self, name): """Performs a DNS request using aiodns, self.lookup_type is set by the run function. A query for A record returns <ares_query_a_result> which does not return metadata about when a CNAME was resolved (just host and ttl attributes) however it should be faster. The <ares_host_result> returned by gethostbyname contains name, aliases, and addresses, if name is different in response we can surmise that the original domain was a CNAME entry. Args: name: the domain name to resolve Returns: object: <ares_query_a_result> if query, <ares_host_result> if gethostbyname """ if self.lookup_type == "query": return await self.resolver.query(name, "A") elif self.lookup_type == "gethostbyname": return await self.resolver.gethostbyname(name, socket.AF_INET) def _dns_result_callback(self, name, future): """Handles the pycares object passed by the _dns_lookup function. We expect an errror to be present in the returned object because most lookups will be for names that don't exist. c-ares errors are passed through directly, error types can be identified in ares_strerror.c Args: name: original lookup name (because the query_result object doesn't contain it) future: the completed future (pycares dns result) """ # Record processed we can now release the lock self.sem.release() # Handle known exceptions, barf on other ones if future.exception() is not None: try: err_number = future.exception().args[0] err_text = future.exception().args[1] except IndexError: self.logger.error( f"Couldn't parse exception: {future.exception()}") # handle the DNS errors we expect to receive, show user unexpected errors if err_number == 4: # This is domain name not found, ignore it pass elif err_number == 12: # Timeout from DNS server self.logger.warn(f"Timeout for {name}") elif err_number == 1: # Server answered with no data pass else: self.logger.error( f"{name} generated an unexpected exception: {future.exception()}" ) # for debugging/troubleshoooting keep a list of errors # self.errors.append({'hostname': name, 'error': err_text}) # parse and output and store results. else: if self.lookup_type == "query": ips = [ip.host for ip in future.result()] cname = False row = f"{name:<30}\t{ips}" elif self.lookup_type == "gethostbyname": r = future.result() ips = [ip for ip in r.addresses] if name == r.name: cname = False n = f"""{name:<30}\t{f"{'':<35}" if self.verbosity >= 2 else ""}""" else: cname = True # format the name based on verbosity - this is kluge short_cname = f"{r.name[:28]}.." if len( r.name) > 30 else r.name n = f'{name}{"**" if self.verbosity <= 1 else ""}' n = f'''{n:<30}\t{f"CNAME {short_cname:<30}" if self.verbosity >= 2 else ""}''' row = f"{n:<30}\t{ips}" # store the result if set(ips) != set(self.ignore_hosts): self.logger.success(row) dns_lookup_result = {"domain": name, "ip": ips} if self.lookup_type == "gethostbyname" and cname: dns_lookup_result["cname"] = r.name dns_lookup_result["aliases"] = r.aliases self.fqdn.append(dns_lookup_result) self.logger.debug(future.result()) self.tasks.remove(future) if self.verbosity >= 1: self.pbar.update() async def _queue_lookups(self, wordlist, domain): """Takes a list of words and adds them to the async loop also passing the original lookup domain name; then attaches the processing callback to deal with the result. Args: wordlist: a list of names to perform lookups for domain: the base domain to perform brute force against """ for word in wordlist: # Wait on the semaphore before adding more tasks await self.sem.acquire() host = f"{word.strip()}.{domain}" task = asyncio.ensure_future(self._dns_lookup(host)) task.add_done_callback( functools.partial(self._dns_result_callback, host)) self.tasks.append(task) await asyncio.gather(*self.tasks, return_exceptions=True) def run(self, wordlist, domain, resolvers=None, wildcard=True, verify=True, query=True): """ Sets up the bruteforce job, does domain verification, sets resolvers, checks for wildcard response to lookups, and sets the query type to be used. After all this, open the wordlist file and start the brute force - with ^C handling to cleanup nicely. Args: wordlist: a string containing a path to a filename to be used as a wordlist domain: the base domain name to be used for lookups resolvers: a list of DNS resolvers to be used (default None, uses system resolvers) wildcard: bool, do wildcard dns detection (default true) verify: bool, check if domain exists (default true) query: bool, use query to do lookups (default true), false means gethostbyname is used. Returns: dict containing result of lookups """ self.logger.info( f"Brute forcing {domain} with a maximum of {self.max_tasks} concurrent tasks..." ) if verify: self.logger.info( f"Using local resolver to verify {domain} exists.") try: socket.gethostbyname(domain) except socket.gaierror as err: self.logger.error( f"Couldn't resolve {domain}, use the --no-verify switch to ignore this error." ) raise SystemExit( self.logger.error(f"Error from host lookup: {err}")) else: self.logger.warn("Skipping domain verification. YOLO!") if resolvers: self.resolver.nameservers = resolvers self.logger.info( f"Using recursive DNS with the following servers: {self.resolver.nameservers}" ) if wildcard: # 63 chars is the max allowed segment length, there is practically no chance that it will be a legit record random_sld = ( lambda: f'{"".join(random.choice(string.ascii_lowercase + string.digits) for i in range(63))}' ) try: self.lookup_type = "query" wc_check = self.loop.run_until_complete( self._dns_lookup(f"{random_sld()}.{domain}")) except aiodns.error.DNSError as err: # we expect that the record will not exist and error 4 will be thrown self.logger.info( f"No wildcard response was detected for this domain.") wc_check = None finally: if wc_check is not None: self.ignore_hosts = [host.host for host in wc_check] self.logger.warn( f"Wildcard response detected, ignoring answers containing {self.ignore_hosts}" ) else: self.logger.warn("Wildcard detection is disabled") if query: self.logger.info( "Using pycares `query` function to perform lookups, CNAMEs cannot be identified" ) self.lookup_type = "query" else: self.logger.info( "Using pycares `gethostbyname` function to perform lookups, CNAME data will be appended to results (** denotes CNAME, show actual name with -vv)" ) self.lookup_type = "gethostbyname" with open(wordlist, encoding="utf-8", errors="ignore") as words: w = words.read().splitlines() self.logger.info( f"Wordlist loaded, proceeding with {len(w)} DNS requests") try: if self.verbosity >= 1: self.pbar = tqdm(total=len(w), unit="rec", maxinterval=0.1, mininterval=0) self.loop.run_until_complete(self._queue_lookups(w, domain)) except KeyboardInterrupt: self.logger.warn("Caught keyboard interrupt, cleaning up...") asyncio.gather(*asyncio.Task.all_tasks()).cancel() self.loop.stop() finally: self.loop.close() if self.verbosity >= 1: self.pbar.close() self.logger.info(f"Completed, {len(self.fqdn)} subdomains found") return self.fqdn
def main(): args = _arg_parser_factory().parse_args() host = args.host port = args.port sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, port)) logger = ConsoleLogger() coder = Base85Coder() reader = TCPReader(sock, coder) sender = TCPSender(sock, coder) logger.info('Connected to chat server...') file = show_messages() msg_receiver = ThreadingMessageReceiver(file, reader) msg_receiver.start() closed = False while not closed and not sock.__getattribute__('_closed'): try: data = input('>> ') if data.startswith('/'): data = data[1:] if data == 'login': command = _login() elif data == 'register': command = _register() elif data == 'logout': command = _logout() elif data == 'create': command = _create() elif data == 'join': command = _join() elif data == 'leave': command = _leave() elif data == 'close': command = _close() else: logger.error('unknown command.') raise Exception() msg_receiver.command(data) else: command = _message(data) msg_receiver.message(data) msg_receiver.command('message') invoker = CommandInvoker() invoker.store_command(command) request = invoker.serialize().encode() sender.send(request) if isinstance(command, Close): sock.close() except Exception as e: if str(e) != '': print(str(e)) exit()
def __init__(self): self.logger = ConsoleLogger() self.nickname = config['nickname'] self.password = config['password'] self.lineRate = 0.67