def progresscontext(*args): stderr.writelines(args) stderr.flush() try: yield finally: print(file=stderr)
def handle(self, *args, **options): for ct in ContentType.objects.all(): try: m = ct.model_class() message = (m.__name__, m._default_manager.count() ) stdout.writelines("%s\t%d\n" % message) stderr.writelines("error:%s\t%d\n" % message) except: pass
def handle(self, *args, **options): if len(args) == 0: stderr.writelines("No files\n") return print options encoding = options['encoding'] codecs.lookup(encoding) filename = args[0] print "Importing healings from " + filename csvfile = open(filename, "rb") reader = csv.reader(csvfile, delimiter=';') print "Importing closed statuses..." reader = csv.reader(csvfile, delimiter=';') self.import_mu_data(reader, encoding) csvfile.close()
def parse_actions(actions, **kwargs): stderr.write("kwargs keys: {}\n".format(kwargs.keys())) to_review = [] action_callers = {'review': lambda x,**k: to_review.append(x), 'suggest': on_suggest, 'test': on_test} verbose = maybe(kwargs,'verbose',False) if verbose: stderr.writelines("action -- {}\n".format(action) for action in actions) for (action, args) in imap(lambda x: x.split(':'), actions): try: action_callers[action](args.strip(), **kwargs) except KeyError as e: stderr.write("Action '{}' is not implemented\n".format(action)) stderr.write("Reviewing: {}\n".format(to_review)) kwargs['nowait']=True for review in to_review[:-1]: on_review(review, **kwargs) kwargs['nowait']=False on_review(to_review[-1], **kwargs)
def linuxMemoryinfo(self,data,ipadd): """ 处理内存表信息 """ memoryInfokeys = self.memoryInfokeys table = 'memoryinfo' keys = memoryInfokeys[1:-1] value = [] values = [] try: for key in data.keys(): info = data[key] value.append(info['hostid']) mem = info['system']['mem'] for k in keys: value.append(mem[k]) value.append(info['time']) values.append(tuple(value)) value = [] except (IndexError,KeyError,TypeError),error: stderr.writelines("Ip:%s,linux mem info data error:%s\n" %(ipadd,error))
def linuxHostinfo(self,data,ipadd): """ 处理主机表信息 """ table = 'hostinfo' hostInfokeys = self.hostInfokeys value = [] values = [] try: for key in data.keys(): info = data[key] value.append(info['hostid']) value.append(info['osVersion']) value.append(info['osName']) value.append(info['kernel']) value.append(info['ipadd']) value.append(info['time']) values.append(tuple(value)) break except (IndexError,KeyError,TypeError),error: stderr.writelines("Ip:%s,linux host info data error:%s\n" %(ipadd,error))
def linuxProcessinfo(self,data,ipadd): """ 处理进程表信息 """ processInfokeys = self.processInfokeys table = 'processinfo' keys = processInfokeys[1:-1] value = [] values = [] try: for key in data.keys(): info = data[key] processinfo = info['system']['process'] value.append(info['hostid']) for k in keys: value.append(processinfo[k]) value.append(info['time']) values.append(tuple(value)) value = [] except (IndexError,KeyError,TypeError),error: stderr.writelines("Ip:%s,linux process info data error:%s\n" %(ipadd,error))
def show_data(data): all_uas = [] # avoid duplicates if data: print("") for u in data: analytics_id = u.split('-') analytics_id = "-".join(analytics_id[0:2]) if analytics_id not in all_uas: all_uas.append(analytics_id) print(f">> {analytics_id}") domains = get_domains(analytics_id) if domains: for domain in get_domains(analytics_id): print(f"|__ {domain}") else: print("|__ NOT FOUND") print("") stderr.writelines("\n[+] Done! \n") else: stderr.writelines("[-] Analytics ID not found...\n")
def linuxCpuinfo(self,data,ipadd): """ 处理CPU表信息 """ cpuInfokeys = self.cpuInfokeys table = 'cpuinfo' keys = cpuInfokeys[1:-1] value = [] values = [] try: for key in data.keys(): info = data[key] cpuinfo = info['system']['cpu'] value.append(info['hostid']) for k in keys: value.append(cpuinfo[k]) value.append(info['time']) values.append(tuple(value)) value = [] except (IndexError,KeyError,TypeError),error: stderr.writelines("Ip:%s,linux cpu info data error:%s\n" %(ipadd,error))
def banner(): stderr.writelines(""" ██╗ ██╗ █████╗ ██╗██████╗ ██║ ██║██╔══██╗ ██║██╔══██╗ ██║ ██║███████║█████╗██║██║ ██║ ██║ ██║██╔══██║╚════╝██║██║ ██║ ╚██████╔╝██║ ██║ ██║██████╔╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝╚═════╝ ██████╗ ██████╗ ███╗ ███╗ █████╗ ██╗███╗ ██╗███████╗ ██╔══██╗██╔═══██╗████╗ ████║██╔══██╗██║████╗ ██║██╔════╝ ██║ ██║██║ ██║██╔████╔██║███████║██║██╔██╗ ██║███████╗ ██║ ██║██║ ██║██║╚██╔╝██║██╔══██║██║██║╚██╗██║╚════██║ ██████╔╝╚██████╔╝██║ ╚═╝ ██║██║ ██║██║██║ ╚████║███████║ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝╚══════╝ > Get related domains / subdomains by looking at Google Analytics IDs > Python version > By @JosueEncinar """)
def linuxLoadinfo(self,data,ipadd): """ 处理负载表信息 """ loadInfokeys = self.loadInfokeys table = 'loadsinfo' keys = loadInfokeys[1:-1] value = [] values = [] try: for key in data.keys(): info = data[key] loadinfo = info['system']['uptime'] value.append(info['hostid']) for k in keys: value.append(loadinfo[k]) value.append(info['time']) values.append(tuple(value)) value = [] except (IndexError,KeyError,TypeError),error: stderr.writelines("Ip:%s,linux load info data error:%s\n" %(ipadd,error))
def linuxHardwareinfo(self,data,ipadd): """ 处理硬件表信息 """ hardwareInfokeys = self.hardwareInfokeys table = 'hardwareinfo' keys = hardwareInfokeys[1:-1] value = [] values = [] try: for key in data.keys(): info = data[key] hardwareinfo = info['hardware'] if hardwareinfo is None:continue value.append(info['hostid']) for k in keys: value.append(dumps(hardwareinfo[k])) value.append(info['time']) values.append(tuple(value)) value = [] except (IndexError,KeyError,TypeError),error: stderr.writelines("Ip:%s,linux hardware info data error:%s\n" %(ipadd,error))
def linuxIoinfo(self,data,ipadd): """ 处理io表信息 """ ioInfokeys = self.ioInfokeys table = 'ioinfo' keys = ioInfokeys[1:-1] value = [] values = [] try: for key in data.keys(): info = data[key] ioinfo = info['system']['io'] for dev in ioinfo: devinfo = ioinfo[dev] value.append(info['hostid']) for k in keys: value.append(devinfo[k]) value.append(info['time']) values.append(tuple(value)) value = [] except (IndexError,KeyError,TypeError),error: stderr.writelines("Ip:%s,linux io info data error:%s\n" %(ipadd,error))
def linuxDiskinfo(self,data,ipadd): """ 处理硬盘表信息 """ diskInfokeys = self.diskInfokeys table = 'diskinfo' keys = diskInfokeys[1:-1] value = [] values = [] try: for key in data.keys(): info = data[key] diskinfo = info['system']['partition'] for dev in diskinfo: devinfo = diskinfo[dev] value.append(info['hostid']) for k in keys: value.append(devinfo[k]) value.append(info['time']) values.append(tuple(value)) value = [] except (IndexError,KeyError,TypeError),error: stderr.writelines("Ip:%s,linux disk info data error:%s\n" %(ipadd,error))
def open_directory(self, path, base): if not self.rev.quiet: stderr.writelines(("\n M ", path, "/")) stderr.flush() return self
def commit(self, rev, date, author, *, init_export, base_rev, base_path, gitrev, path, prefix): self.log(":") edits = list() mergeinfo = dict() for (file, (action, _, _)) in self.paths.items(): if not file.startswith(prefix) or action not in "DR": continue file = file[len(prefix):] for p in self.ignore: if file == p or file.startswith((p + "/").lstrip("/")): break else: dir.delete_entry(file) r = None while True: if self._header is None: [header, self._content] = read_record(self.dump) else: header = self._header self._header = None # Tolerate concatenated dumps if header.items() == [("SVN-fs-dump-format-version", "3")]: [header, content] = read_record(self.dump) assert header.items() == [("UUID", self.uuid)] [header, self._content] = read_record(self.dump) if "Node-path" in header: continue r = int(header["Revision-number"]) if r >= rev: break if r != rev: raise LookupError(f"Revision {rev} not found in dump file") [revprops, text] = parse_content(header, self._content) assert revprops.keys() >= {b"svn:date", b"svn:log"} assert revprops.keys() <= {b"svn:author", b"svn:date", b"svn:log"} log = revprops[b"svn:log"] for p in self.ignore: reporter.set_path(p, INVALID_REVNUM, True, None, subvertpy.ra.DEPTH_EXCLUDE) while True: [self._header, self._content] = read_record(self.dump) p = self._header.get_all("Node-path") if not p: break [p] = p p = "/" + p [action, from_path, from_rev] = self.paths.pop(p) if not p.startswith(prefix) and p != path: continue assert frozenset(self._header.keys()) < { "Node-path", "Node-kind", "Node-action", "Node-copyfrom-path", "Node-copyfrom-rev", "Prop-delta", "Text-delta", "Text-delta-base-md5", "Text-content-md5", "Prop-content-length", "Text-content-length", "Content-length", } assert action == {"add": "A", "change": "M"}[self._header.get("Node-action")] assert from_path is from_rev is None [kind] = self._header.get_all("Node-kind") if kind == "dir": assert action == "A" if not self.quiet: stderr.writelines(("\n A ", p, "/")) else: assert kind == "file" if not self.quiet: stderr.write(f"\n {action} {p}") p = p[len(prefix):] [props, target] = parse_content(self._header, self._content) if action == "M": [source, mode] = self.output[p] else: assert props.items() >= {(b"svn:eol-style", "native"), (b"svn:keywords", "Author Date Id Revision")} assert props.items() <= { (b"svn:eol-style", "native"), (b"svn:executable", "*"), (b"svn:keywords", "Author Date Id Revision") } mode = props.get(b"svn:executable") mode = {None: "644", "*": "755"}[mode] if self._header.get("Text-delta") == "true": if action == "M": source = self.output.cat_blob(source) [hash] = self._header.get_all("Text-delta-base-md5") assert md5(source).hexdigest() == hash else: source = None delta = BytesIO(target) header = delta.read(4) assert header == b"SVN\x00" source_offset = read_int(delta) assert source_offset == 0 source_length = read_int(delta) target = read_int(delta) instr_length = read_int(delta) data = read_int(delta) instr_data = delta.read(instr_length) assert len(instr_data) == instr_length instr_data = BytesIO(instr_data) target = bytearray() while True: instr = instr_data.read(1) if not instr: break [instr] = instr copy = instr & 0x3F instr >>= 6 if not copy: copy = read_int(instr_data) SOURCE = 0 TARGET = 1 NEW = 2 if instr == SOURCE: offset = read_int(instr_data) data = source[offset:offset + copy] elif instr == TARGET: offset = read_int(instr_data) data = target[offset:offset + copy] # Repeat if length greater than existing target size data *= -(-copy // len(data)) data = data[:copy] else: assert instr == NEW data = delta.read(copy) assert len(data) == copy target.extend(data) assert not delta.read(1) [hash] = self._header.get_all("Text-content-md5") assert md5(target).hexdigest() == hash blob = self.output.blob(p, target) self.output[p] = (blob, mode) edits.append(f"M {mode} {blob} {p}") stderr.flush() if not edits: self.log("\n => commit skipped") return None assert not self.paths merges = list() if mergeinfo: self.log("\n") basehist = Ancestors(self) if base_rev: basehist.add_natural(base_path, base_rev) merged = RevisionSet() ancestors = Ancestors(self) merged.update(basehist) mergeinfo = mergeinfo.items() for (branch, ranges) in mergeinfo: for (start, end, _) in ranges: merged.add_segment(branch, start, end) ancestors.add_natural(branch, end) if merged != basehist and ancestors == merged: # TODO: minimise so that only independent branch heads are listed # i.e. do not explicitly merge C if also merging A and B, and C is an ancestor of both A and B for (branch, ranges) in mergeinfo: branch = branch.lstrip("/") for (_, end, _) in ranges: ancestor = self.export(self.git_ref, branch, end) if ancestor is not None: merges.append(ancestor) self.output.printf("commit {}", self.git_ref) mark = self.output.newmark() self.output.printf("mark {}", mark) date = datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ") date = int(date.replace(tzinfo=timezone.utc).timestamp()) if self.author_map is None: author = "{author} <{author}@{uuid}>".format( author=author, uuid=self.uuid) else: author = self.author_map[author] self.output.printf("committer {} {} +0000", author, date) if self.git_svn: log = "{}\n\ngit-svn-id: {}{}@{} {}\n".format( log, self.root, path.rstrip("/"), rev, self.uuid) log = log.encode("utf-8") self.output.printf("data {}", len(log)) self.output.file.write(log) self.output.printf("") if (init_export or merges) and gitrev is not None: self.output.printf("from {}", gitrev) for ancestor in merges: self.output.printf("merge {}", ancestor) for line in edits: self.output.printf("{}", line) self.output.printf("") return mark
base = 10 if args.bin: base = 2 elif args.oct: base = 8 elif args.hex: base = 16 return base def show_help(): Rpn().help() def main(): signal(SIGINT, sigint_handler) args = get_args() if args.command_help: show_help() return result = run(args).result print(result, file=stdout) if __name__ == '__main__': try: main() except BaseException as error: stderr.writelines([str(error)]) exit(1)
# -*- coding: utf-8 -*- import os from sys import stderr from eduid_scimapi.app import init_api __author__ = 'lundberg' DEBUG = os.environ.get('EDUID_APP_DEBUG', False) if DEBUG: stderr.writelines('----- WARNING! EDUID_APP_DEBUG is enabled -----\n') api = init_api()
from xml.dom import minidom from sys import stderr, exit, argv from time import ctime # Print help if not specified a file if len(argv) == 1 or '-h' in argv or '--help' in argv: print __doc__ exit(0) file = argv[1] # Try to open the file try: input = minidom.parse(file) except: stderr.writelines("Couldn't parse " + file + "\n") exit(1) # Get the info we need sender = str(input.getElementsByTagName('sender')[0].attributes['email'].value) smtp_host = str(input.getElementsByTagName('smtp')[0].attributes['host'].value) chores = input.getElementsByTagName('chore') people = input.getElementsByTagName('person') def advanceChore(choreNumber): return (choreNumber + 1) % len(chores) def choreByNumber(choresDomList, n): for c in choresDomList:
entry = input("Enter a number(To quit simply type quit or exit): ") try: f = bool num = int(entry) if num == 1 or num == 2: f = False for i in range(2, num + 1 // 2): if num % i == 0: f = True break else: f = False break if f: print("{0} is not prime".format(num)) else: print("{0} is prime".format(num)) except ValueError as e: if entry.lower() == "exit" or entry.lower() == 'quit': print("\n***\tExiting\t***") sleep(2) break else: stderr.writelines( "Your value threw the following error: {0}.\nPlease enter a correct value!" .format(e))
def add_directory(self, path): if not self.rev.quiet: stderr.writelines(("\n A ", path, "/")) stderr.flush() return self
import sys from sys import stdin from sys import stdout from sys import stderr import time import MySQLdb from MySQLdb import cursors db = MySQLdb.connect(host='172.16.130.56', user='******', passwd='password', db='ProductDB', cursorclass=cursors.SSCursor) cur = db.cursor() def get_result(cur): for row in cur: print(row[0]) n = 0 for line in stdin: pid = line.rstrip() sql_string = 'select product_name, brand from Products_Core where product_id = ' + pid cur.execute(sql_string) for row in cur: print(pid + '\t' + row[0]) if n % 100 == 0: stderr.writelines(str(n) + '\n') n+=1 time.sleep(0.02)
import argparse import os import subprocess if __name__=='__main__': parser = argparse.ArgumentParser(description='Compare the output of a program to a reference program') parser.add_argument("-v","--verbose", action='store_true', help="Be verbose") parser.add_argument("-p","--pretend", action='store_true', help="Run tests but don't write any data") parser.add_argument("--ref", type=argparse.FileType('r'), help="Path to reference file") parser.add_argument("basedir", nargs=1, help="Base path to search") args = parser.parse_args() problem_set = tl.load_set(args.ref,verbose=args.verbose, basedir=args.basedir) tests = problem_set['problems'] basedir = args.basedir[0].rstrip("/") + "/" if not os.path.isdir(basedir): stderr("{}: not a valid directory\n".format(basedir)) exit(1) if args.verbose: stderr.writelines("{}: {}\n".format(name, test['path']) for (name,test) in tests.items()) for (name,test) in tests: find_args = ['find', basedir, '-name', test['path'] ] if args.verbose: stderr.write("{}\n".format(find_args)) found = subprocess.check_output(find_args).split("\n") stdout.writelines(item+"\n" for item in found if len(item) > 1)
def delete_entry(self, path, rev=None): if not self.rev.quiet: stderr.writelines(("\n D ", path)) stderr.flush() self.rev.edits.append("D {}".format(path))
entry = input("Enter a number(To quit simply type quit or exit): ") try: f = bool num = int(entry) if num == 1 or num == 2: f = False for i in range(2, num+1 // 2): if num % i == 0: f = True break else: f = False break if f: print("{0} is not prime".format(num)) else: print("{0} is prime".format(num)) except ValueError as e: if entry.lower() == "exit" or entry.lower() == 'quit' : print("\n***\tExiting\t***") sleep(2) break else: stderr.writelines("Your value threw the following error: {0}.\nPlease enter a correct value!".format(e))
import fcntl, os from sys import stdout, stderr def to_blocking_mode(channel): flags = fcntl.fcntl(channel, fcntl.F_GETFL) if flags & os.O_NONBLOCK: fcntl.fcntl(channel, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) return True return False if to_blocking_mode(stderr): stderr.writelines(["Reset STDERR to blocking"]) if to_blocking_mode(stdout): stderr.writelines(["Reset STDOUT to blocking"])
print("|__ NOT FOUND") print("") stderr.writelines("\n[+] Done! \n") else: stderr.writelines("[-] Analytics ID not found...\n") if __name__ == "__main__": banner() parser = argparse.ArgumentParser() parser.add_argument('-u', '--url', help="URL to extract Google Analytics ID", required=True) args = parser.parse_args() url = args.url if not url.startswith("http"): url = "https://" + url stderr.writelines(f"[+] Analyzing url: {url}\n") tagmanager, data = get_googletagmanager(url) if tagmanager and data: stderr.writelines(f"[+] URL with UA: {data}\n") stderr.writelines( "[+] Obtaining information from builtwith and hackertarget\n") uas = get_UA(data) show_data(uas) elif data: show_data(data) else: stderr.writelines("[-] Tagmanager URL not fount\n")
def open_file(self, path, base): if not self.rev.quiet: stderr.writelines(("\n M ", path)) stderr.flush() return FileEditor(path, self.rev, original=self.rev.output[path])
from xml.dom import minidom from sys import stderr, exit, argv from time import ctime # Print help if not specified a file if len(argv) == 1 or '-h' in argv or '--help' in argv: print __doc__ exit(0) file = argv[1] # Try to open the file try: input = minidom.parse(file) except: stderr.writelines("Couldn't parse " + file + "\n") exit(1) # Get the info we need sender = str(input.getElementsByTagName('sender')[0].attributes['email'].value) smtp_host = str(input.getElementsByTagName('smtp')[0].attributes['host'].value) chores = input.getElementsByTagName('chore') people = input.getElementsByTagName('person') def advanceChore(choreNumber): return (choreNumber + 1) % len(chores) def choreByNumber(choresDomList, n): for c in choresDomList: if int(c.attributes['number'].value) == n:
def add_file(self, path): if not self.rev.quiet: stderr.writelines(("\n A ", path)) stderr.flush() return FileEditor(path, self.rev)
# SPDX-License-Identifier: BSD-2-Clause """\ DOCumentation injecTOR tool. Run with Python 3 as follows. - python3 -m doctor -o -i path/to/a/directory -i path/to/another/directory Overwrites all .h files anywhere under the specified directories. - python3 -m doctor -h Prints usage. """ # This file makes Doctor a runnable module, see: # https://docs.python.org/3/using/cmdline.html#cmdoption-m from sys import stderr, exit # Local module with command line switches and such. try: from doctor import main except: stderr.writelines( (__doc__, "\nCan only be run as a module, with the Python -m" " command line switch.\n\n")) exit(1) exit(main.default_main())
import fcntl from os import O_NONBLOCK as NONBLOCK from sys import stdout, stderr def get_flags(channel): return fcntl.fcntl(channel, fcntl.F_GETFL) def set_flags(channel, flags): fcntl.fcntl(channel, fcntl.F_SETFL, flags) flags = get_flags(stdout) if flags & NONBLOCK: set_flags(stdout, flags & ~NONBLOCK) stderr.writelines(["Reset STDOUT to blocking"]) flags = get_flags(stderr) if flags & NONBLOCK: set_flags(stderr, flags & ~NONBLOCK) stderr.writelines(["Reset STDERR to blocking"])