def rest(args, method, url, bytes): if args['--proxy'] != None: proxy = parse_dict(args['--proxy']) else: proxy = None if args['--cookie'] != None: cookies = parse_dict(args['--cookie']) else: cookies = None if args['--params'] != None: params = parse_dict(args['--params']) else: params = None if args['--HEADER'] != None: headers = parse_dict(args['--HEADER']) else: headers = None if args['--auth'] != None: auth = parse_dict(args['--auth']) try: if auth.has_key('user') and auth.has_key('password'): user = auth['user'] password = auth['password'] auth = HTTPBasicAuth(user,password) else: colors.print_warning("res warning: Authorization keys invalid (correct format=> \"{'user':'******', 'password':'******'}\")") auth = None except AttributeError, e: colors.print_error('res error: ' + str(e))
def main() -> None: args = parser.parse_args(namespace=CommandLineArgs()) versions = args.python_version or SUPPORTED_VERSIONS platforms = args.platform or [sys.platform] tested_directories = frozenset(args.dir) if args.dir else TYPESHED_DIRECTORIES code = 0 total_files_checked = 0 for (major, minor), platform in product(versions, platforms): config = TestConfig( verbose=args.verbose, dry_run=args.dry_run, exclude=args.exclude, major=major, minor=minor, directories=tested_directories, platform=platform, filter=args.filter, ) code, files_checked_this_version = test_typeshed(code, args=config) total_files_checked += files_checked_this_version if code: print_error(f"--- exit status {code}, {total_files_checked} files checked ---") sys.exit(code) if not total_files_checked: print_error("--- nothing to do; exit 1 ---") sys.exit(1) print(colored(f"--- success, {total_files_checked} files checked ---", "green"))
def fetch_all(self): # Init sources srcs = list() # Get path of all sources path = os.path.join(runPath, srcs_dir) # Import all Source_*.py modules for f in os.listdir(path): if os.path.isfile(srcs_dir + f): # Start with sources and end with py pattern_start = r"^Source_" pattern_end = r"\.py$" if re.findall(pattern_start, f) and re.findall(pattern_end, f): colors.print_warn("[-] Importing source %s" % f[7:-3]) module = __import__(f[:-3]) if not module: colors.print_error("[!] Couldn't load %s" % f) return 0 else: srcs.append( SourceModule(f[7:-3], runPath + "/" + srcs_dir + f, module)) # Prepare processes processes_list = list() for src in srcs: processes_list.append( processes.CProcess(src.name, self.fetch_source, src.module, src.name)) # Handle processes processes_running_limit = config.current_config.process_limit_update processes.handle_processes(processes_list, processes_running_limit, 0.01) return 1
def http(args, method, url, bytes, raw, json, encoding): if args['--proxy'] != None: proxy = parse_dict(args['--proxy']) else: proxy = None if args['--cookie'] != None: cookies = parse_dict(args['--cookie']) else: cookies = None if args['--params'] != None: params = parse_dict(args['--params']) else: params = None if args['--HEADER'] != None: headers = parse_dict(args['--HEADER']) else: headers = None if args['--auth'] != None: auth = parse_dict(args['--auth']) try: if auth.has_key('user') and auth.has_key('password'): user = auth['user'] password = auth['password'] auth = HTTPBasicAuth(user, password) else: colors.print_warning( "res warning: Authorization keys invalid (correct format=> \"{'user':'******', 'password':'******'}\")" ) auth = None except AttributeError, e: colors.print_error('res error: ' + str(e))
def run_mypy_as_subprocess(directory: StrPath, flags: Iterable[str]) -> ReturnCode: result = subprocess.run([sys.executable, "-m", "mypy", directory, *flags], capture_output=True) stdout, stderr = result.stdout, result.stderr if stderr: print_error(stderr.decode()) if stdout: print_error(stdout.decode()) return result.returncode
def run(args, allowed_methods, cmd=None): if any(args['<method>'] == val for val in allowed_methods): url = args['<url>'] headers(args, url) raw, bytes, json, encoding = parse_options(args) http(args, args['<method>'], url, bytes, raw, json, encoding) else: if not args['<method>'] == None: colors.print_error('res error: <method> not valid')
def insert_vulnerability(self, vulnerability, collection_name): collection = self.get_collection(collection_name) # TODO: See what kind of fields have been updated try: collection.insert_one(vulnerability) except pymongo.errors.PyMongoError as e: colors.print_error(e) stats = self.find_local_stats(collection_name) stats.vulnerability_inserts.append(vulnerability["_id"])
def main(): colors.print_info("[-] ProHacktive fetching running...") srcsmanager = SourcesManager() if srcsmanager.fetch_all(): colors.print_info("[-] Sources generated signatures:") for source in srcsmanager.read_srcs_sigs(): colors.print_info(" Source Name: %s -> %s" % (source["_id"], source["sig"])) colors.print_success("[x] ProHacktive fetching done!") else: colors.print_error("[!] ProHacktive fetching failed")
def fetch_source(self, module, module_name): # Get function from module fetch_func = getattr(module, self.str_fetch_func) # If it's present load it if fetch_func: # Fetch data from the source colors.print_warn("[-] Loading source %s" % module_name) fetch_func() colors.print_success("[x] Loaded source %s" % module_name) else: colors.print_error( "[!] fetch_handler function isn't avaiable on module %s" % module_name)
def update_src_dat(self, src_name, dat): collection = self.get_collection(self.get_srcs_dat_collection_name()) # Try to update first try: result = collection.update_one({"_id": src_name}, {"$set": { "dat": dat }}) except pymongo.errors.PyMongoError as e: colors.print_error(e) # If it doesn't exist, insert it if result.matched_count == 0: self.insert_src_dat(src_name, dat)
def update_vulnerability(self, vulnerability, collection_name): collection = self.get_collection(collection_name) # Try to update first try: result = collection.update_one({"_id": vulnerability["_id"]}, {"$set": vulnerability}) except pymongo.errors.PyMongoError as e: colors.print_error(e) # If it doesn't exist, insert it if result.matched_count == 0: self.insert_vulnerability(vulnerability, collection_name) else: stats = self.find_local_stats(collection_name) stats.vulnerability_updates.append(vulnerability["_id"])
def main(): allowed_methods = ['POST', 'GET', 'DELETE', 'PUT', 'post', 'get', 'delete', 'put'] args = docopt(__doc__, argv=None, help=True, version='res v0.1.0', options_first=False) #####For all methods##### if any(args['<method>'] == val for val in allowed_methods): url = args['<url>'] headers(args, url) bytes = get_bytes(args) rest(args, args['<method>'], url, bytes) else: colors.print_error('res error: <method> not valid') ######################### sys.exit(1)
def fetch(self) -> bytes: try: response = requests.get(self.url) except requests.RequestException as e: colors.print_error("[!]" + e) return False # Get response data = response.content # Check if data is compressed if is_compressed(data): colors.print_info("[-] Decompressing %s" % self.url) # Write to temporary file the response if not os.path.exists(temp_dir): os.mkdir(temp_dir) temp_filename = temp_dir + "tempfile" # Sadly we need to write it to a file because pyunpack can't yet # decompress from binary data directly from memory temp_file = open(temp_filename, "wb") temp_file.write(data) temp_file.close() # Decompress filename = temp_filename archive_dir = temp_dir + "archive/" if not os.path.exists(archive_dir): os.mkdir(archive_dir) # Sometimes it's compressed multiple times while (True): arch = pyunpack.Archive(filename) arch.extractall(archive_dir) os.remove(filename) filename = archive_dir + os.listdir(archive_dir)[0] compressed = is_file_compressed(filename) if not compressed: break temp_file = open(filename, "rb") data = bytes(temp_file.read()) temp_file.close() os.remove(filename) return data
def test_third_party_distribution(distribution: str, major: int, minor: int, args: argparse.Namespace) -> tuple[int, int]: """Test the stubs of a third-party distribution. Return a tuple, where the first element indicates mypy's return code and the second element is the number of checked files. """ files: list[str] = [] configurations: list[MypyDistConf] = [] seen_dists: set[str] = set() add_third_party_files(distribution, major, files, args, configurations, seen_dists) print(f"testing {distribution} ({len(files)} files)... ", end="") if not files: print_error("no files found") sys.exit(1) code = run_mypy(args, configurations, major, minor, files) return code, len(files)
def main(): allowed_methods = [ 'POST', 'GET', 'DELETE', 'PUT', 'post', 'get', 'delete', 'put' ] args = docopt(__doc__, argv=None, help=True, version='res v0.1.0', options_first=False) #####For all methods##### if any(args['<method>'] == val for val in allowed_methods): url = args['<url>'] headers(args, url) bytes = get_bytes(args) rest(args, args['<method>'], url, bytes) else: colors.print_error('res error: <method> not valid') ######################### sys.exit(1)
def __init__(self, host=config.current_config.db_host, port=config.current_config.db_port, db_name=config.current_config.db_name, user=config.current_config.db_user, password=config.current_config.db_user): self.port = port self.host = host self.db_name = db_name self.user = user self.password = password self.stats = list() try: self.db = pymongo.MongoClient(host, port, username=user, password=password, maxPoolSize=None) except pymongo.errors.PyMongoError as e: colors.print_error(e) self.collections = self.db[self.db_name]
def main() -> None: args = parser.parse_args() versions = [(3, 11), (3, 10), (3, 9), (3, 8), (3, 7), (3, 6), (2, 7)] if args.python_version: versions = [v for v in versions if any(("%d.%d" % v).startswith(av) for av in args.python_version)] if not versions: print_error("--- no versions selected ---") sys.exit(1) code = 0 total_files_checked = 0 for major, minor in versions: code, files_checked_this_version = test_typeshed(code, major, minor, args) total_files_checked += files_checked_this_version if code: print_error(f"--- exit status {code}, {total_files_checked} files checked ---") sys.exit(code) if not total_files_checked: print_error("--- nothing to do; exit 1 ---") sys.exit(1) print(colored(f"--- success, {total_files_checked} files checked ---", "green"))
systems = enum('LINUX', 'WINDOWS', 'DARWIN') osname = platform.system() system = None if osname == 'Windows' or osname.startswith('CYGWIN_NT'): system = systems.WINDOWS osname = 'mingw32' elif osname == 'Linux': system = systems.LINUX osname = 'linux' elif osname == 'Darwin': system = systems.DARWIN osname = 'darwin' else: print_error("Unsupported system '%s'" % osname) sys.exit(1) def platformFix(sboxPath): if system == systems.WINDOWS: # Otherwise Haskell Platform binaries (with cabal) will overwrite the cabal we install cabalBinariesPath = os.path.join(os.environ["APPDATA"], "cabal", "bin") #print_info("Prepending %s to PATH in order to overwrite old Haskell Platform binaries" % cabalBinariesPath) os.environ["PATH"] = cabalBinariesPath + ';' + os.environ["PATH"] os.environ["HOME"] = os.path.expanduser("~") descriptorUrl = "http://protobuf.googlecode.com/svn/trunk/src/google/protobuf/descriptor.proto" descriptorDstPath = os.path.join(sboxPath, "proto", "google", "protobuf", "descriptor.proto") if not os.path.exists(descriptorDstPath):
def parse_dict(dictionary): try: return ast.literal_eval(dictionary) except SyntaxError, e: colors.print_error("res error in ***" + dictionary + "***: " + str(e)) return None
data=data, headers=headers, auth=auth, cookies=cookies, proxies=proxy, stream=raw) if bytes == True: print r.content elif raw == True: print r.raw elif json == True: try: print r.json() except ValueError, e: colors.print_error('res error: ' + str(e)) elif encoding == True: print r.encoding else: print r.text def run(args, allowed_methods, cmd=None): if any(args['<method>'] == val for val in allowed_methods): url = args['<url>'] headers(args, url) raw, bytes, json, encoding = parse_options(args) http(args, args['<method>'], url, bytes, raw, json, encoding) else: if not args['<method>'] == None: colors.print_error('res error: <method> not valid')
def print_command_failure(message: str, e: subprocess.CalledProcessError) -> None: print_error("fail") print(file=sys.stderr) print(message, file=sys.stderr) print_command_output(e)
def insert_src_sig(self, src_name, sig): collection = self.get_collection(self.get_srcs_sigs_collection_name()) try: collection.insert_one({"_id": src_name, "sig": sig}) except pymongo.errors.PyMongoError as e: colors.print_error(e)
def parse_dict(dictionary): try: return ast.literal_eval(dictionary) except SyntaxError, e: colors.print_error("res error in ***"+dictionary+"***: " + str(e)) return None
"db_user": self.db_user, "db_pass": self.db_password} config_gen["VULNERS"] = {"vulners_api_key": self.vulners_api_key} config_gen["processes"] = { "process_limit_update": int(self.process_limit_update), "process_limit_fetch": int(self.process_limit_fetch)} return config_gen default_config = Config() current_config = default_config colors.print_info("[-] Parsing %s" % config_filename) config = configparser.ConfigParser() dataset = config.read(config_filename) if len(dataset) == 0: colors.print_error("[!] Couldn't read config file at %s" % config_filename) colors.print_warn("[!] Using default configuration") else: db = config["DATABASE"] vulners_api_key = config["VULNERS"]["vulners_api_key"] processes = config["PROCESSES"] current_config = Config( db["db_host"], db["db_port"], db["db_name"], db["db_user"], db["db_pass"], vulners_api_key, processes["process_limit_update"], processes["process_limit_fetch"])
def main() -> None: args = parser.parse_args() versions = [(3, 11), (3, 10), (3, 9), (3, 8), (3, 7), (3, 6), (2, 7)] if args.python_version: versions = [v for v in versions if any(("%d.%d" % v).startswith(av) for av in args.python_version)] if not versions: print_error("--- no versions selected ---") sys.exit(1) code = 0 total_files_checked = 0 for major, minor in versions: code, files_checked_this_version = test_typeshed(code, major, minor, args) total_files_checked += files_checked_this_version if code: print_error(f"--- exit status {code}, {total_files_checked} files checked ---") sys.exit(code) if not total_files_checked: print_error("--- nothing to do; exit 1 ---") sys.exit(1) print(colored(f"--- success, {total_files_checked} files checked ---", "green")) if __name__ == "__main__": try: main() except KeyboardInterrupt: print_error("\n\n!!!\nTest aborted due to KeyboardInterrupt\n!!!") sys.exit(1)
def run_stubtest(dist: Path, *, verbose: bool = False) -> bool: with open(dist / "METADATA.toml") as f: metadata = dict(tomli.loads(f.read())) print(f"{dist.name}... ", end="") stubtest_meta = metadata.get("tool", {}).get("stubtest", {}) if stubtest_meta.get("skip", False): print(colored("skipping", "yellow")) return True with tempfile.TemporaryDirectory() as tmp: venv_dir = Path(tmp) venv.create(venv_dir, with_pip=True, clear=True) pip_exe = str(venv_dir / "bin" / "pip") python_exe = str(venv_dir / "bin" / "python") dist_version = metadata["version"] assert isinstance(dist_version, str) dist_req = f"{dist.name}=={dist_version}" # If @tests/requirements-stubtest.txt exists, run "pip install" on it. req_path = dist / "@tests" / "requirements-stubtest.txt" if req_path.exists(): try: pip_cmd = [pip_exe, "install", "-r", str(req_path)] subprocess.run(pip_cmd, check=True, capture_output=True) except subprocess.CalledProcessError as e: print_command_failure("Failed to install requirements", e) return False # We need stubtest to be able to import the package, so install mypy into the venv # Hopefully mypy continues to not need too many dependencies # TODO: Maybe find a way to cache these in CI dists_to_install = [dist_req, get_mypy_req()] dists_to_install.extend(metadata.get("requires", [])) pip_cmd = [pip_exe, "install"] + dists_to_install try: subprocess.run(pip_cmd, check=True, capture_output=True) except subprocess.CalledProcessError as e: print_command_failure("Failed to install", e) return False ignore_missing_stub = ["--ignore-missing-stub"] if stubtest_meta.get("ignore_missing_stub", True) else [] packages_to_check = [d.name for d in dist.iterdir() if d.is_dir() and d.name.isidentifier()] modules_to_check = [d.stem for d in dist.iterdir() if d.is_file() and d.suffix == ".pyi"] stubtest_cmd = [ python_exe, "-m", "mypy.stubtest", # Use --custom-typeshed-dir in case we make linked changes to stdlib or _typeshed "--custom-typeshed-dir", str(dist.parent.parent), *ignore_missing_stub, *packages_to_check, *modules_to_check, ] allowlist_path = dist / "@tests/stubtest_allowlist.txt" if allowlist_path.exists(): stubtest_cmd.extend(["--allowlist", str(allowlist_path)]) try: subprocess.run(stubtest_cmd, env={"MYPYPATH": str(dist), "MYPY_FORCE_COLOR": "1"}, check=True, capture_output=True) except subprocess.CalledProcessError as e: print_error("fail") print_commands(dist, pip_cmd, stubtest_cmd) print_command_output(e) print("Ran with the following environment:", file=sys.stderr) ret = subprocess.run([pip_exe, "freeze", "--all"], capture_output=True) print_command_output(ret) if allowlist_path.exists(): print( f'To fix "unused allowlist" errors, remove the corresponding entries from {allowlist_path}', file=sys.stderr ) print(file=sys.stderr) else: print(f"Re-running stubtest with --generate-allowlist.\nAdd the following to {allowlist_path}:", file=sys.stderr) ret = subprocess.run(stubtest_cmd + ["--generate-allowlist"], env={"MYPYPATH": str(dist)}, capture_output=True) print_command_output(ret) return False else: print_success_msg() if verbose: print_commands(dist, pip_cmd, stubtest_cmd) return True
def run_mypy( args: argparse.Namespace, configurations: list[MypyDistConf], major: int, minor: int, files: list[str], *, custom_typeshed: bool = False, ) -> int: try: from mypy.api import run as mypy_run except ImportError: print_error("Cannot import mypy. Did you install it?") sys.exit(1) with tempfile.NamedTemporaryFile("w+") as temp: temp.write("[mypy]\n") for dist_conf in configurations: temp.write("[mypy-%s]\n" % dist_conf.module_name) for k, v in dist_conf.values.items(): temp.write(f"{k} = {v}\n") temp.flush() flags = get_mypy_flags(args, major, minor, temp.name, custom_typeshed=custom_typeshed) mypy_args = [*flags, *files] if args.verbose: print("running mypy", " ".join(mypy_args)) if args.dry_run: exit_code = 0 else: stdout_redirect, stderr_redirect = StringIO(), StringIO() with redirect_stdout(stdout_redirect), redirect_stderr(stderr_redirect): returned_stdout, returned_stderr, exit_code = mypy_run(mypy_args) if exit_code: print_error("failure\n") captured_stdout = stdout_redirect.getvalue() captured_stderr = stderr_redirect.getvalue() if returned_stderr: print_error(returned_stderr) if captured_stderr: print_error(captured_stderr) if returned_stdout: print_error(returned_stdout) if captured_stdout: print_error(captured_stdout, end="") else: print_success_msg() return exit_code
headers=headers, auth=auth, cookies=cookies, proxies=proxy, stream=raw ) if bytes == True: print r.content elif raw == True: print r.raw elif json == True: try: print r.json() except ValueError, e: colors.print_error('res error: ' + str(e)) elif encoding == True: print r.encoding else: print r.text def run(args, allowed_methods, cmd=None): if any(args['<method>'] == val for val in allowed_methods): url = args['<url>'] headers(args, url) raw, bytes, json, encoding = parse_options(args) http(args, args['<method>'], url, bytes, raw, json, encoding) else: if not args['<method>'] == None: colors.print_error('res error: <method> not valid')
def insert_src_dat(self, src_name, dat): collection = self.get_collection(self.get_srcs_dat_collection_name()) try: collection.insert_one({"_id": src_name, "dat": dat}) except pymongo.errors.PyMongoError as e: colors.print_error(e)