def convert_nb(nbpath, output_folder): from subprocess import check_call as sh # Currently using abspath as workaround for this bug: # https://github.com/jupyter/nbconvert/issues/293 basename = os.path.basename(nbpath) nb_abspath = os.path.abspath(nbpath) basename_stem = basename.rsplit('.',1)[0] html_out_path = os.path.join(output_folder, basename_stem+".html") html_out_path = os.path.abspath(html_out_path) # Convert to .html for Sphinx to pull in sh(["jupyter", "nbconvert", "--to", "html", "--execute", # '--template', 'basic', '--template', 'custom_nbconvert_template', nb_abspath, "--output", html_out_path, ]) # Clear notebook output in case this is being run locally by a dev, # preserves clean diffs. sh(["jupyter-nbconvert", "--to", "notebook", # "--inplace", "--ClearOutputPreprocessor.enabled=True", nb_abspath, "--output", nb_abspath, ])
def __verify(u): """ Verifies a given file with GPG. """ f = u.split('/')[-1] devnull = open(os.devnull, 'w') results = 0 try: if (sh(['gpg', '--verify', f + ".asc"], stdout=devnull, stderr=devnull) and sh(['gpg', '--verify', f + ".sig"], stdout=devnull, stderr=devnull)): print(clr.WARNING + "Verification Failed!" + clr.ENDC) results = 1 else: print(clr.OKGREEN + "Verified" + clr.ENDC) except OSError: print clr.WARNING + "GPG not found on system." + clr.ENDC results = 1 finally: # Remove any verification files if os.path.isfile(f + ".sig"): os.remove(f + ".sig") if os.path.isfile(f + ".asc"): os.remove(f + ".asc") return results
def __init__(self, results, results_number=10): color = Color() for i, f in enumerate(results[0:results_number]): g = f.split("/") print(color.GREEN + color.BOLD + str(i) + ". " + color.BLUE + g[-1] + color.END) print(color.GRAY + f + color.END + "\n") if results != []: choice = int(input("Select search result (by number): ")) sh(['xdg-open', results[choice]])
def test_main(self): cmd = "echo 'Lucy,Artist,16' | python xcut -f 1,3 -od '\t' -t index " out = sh(cmd) assert "1 3" in out assert "Lucy 16" in out cmd = "echo 'name=Lucy||job=Artist||age=16' | python xcut -f age -d '||' -od ',' -t kv" out = sh(cmd) assert "16" in out cmd = '''echo 'Lucy,"98,99",23' | python xcut -f scores,name --titles name,scores,age --from-csv --to-csv''' assert '"98,99",Lucy' in sh(cmd)
def convert_nb(nbname, execute=False): """Remove tags.""" if execute: # Execute the notebook sh([ "jupyter", "nbconvert", "--to", "notebook", "--execute", "--inplace", nbname ]) # Convert to .rst for Sphinx sh([ "jupyter", "nbconvert", "--to", "rst", nbname, "--TagRemovePreprocessor.remove_cell_tags={'hide'}", "--TagRemovePreprocessor.remove_input_tags={'hide-input'}", "--TagRemovePreprocessor.remove_all_outputs_tags={'hide-output'}" ]) if execute: # Clear notebook output sh([ "jupyter", "nbconvert", "--to", "notebook", "--inplace", "--ClearOutputPreprocessor.enabled=True", nbname ]) # Touch the .rst file so it has a later modify time than the source sh(["touch", nbname.replace('.ipynb', '') + ".rst"])
def convert_nb(nbname): # Execute the notebook exec_cmdline = [ "jupyter", "nbconvert", "--to", "notebook", "--execute", "--inplace", nbname, ] kernel = os.environ.get("NB_KERNEL", "") if kernel: exec_cmdline.append(f"--ExecutePreprocessor.kernel_name={kernel}") sh(exec_cmdline) # Convert to .rst for Sphinx sh([ "jupyter", "nbconvert", "--to", "rst", nbname, "--TagRemovePreprocessor.remove_cell_tags={'hide'}", "--TagRemovePreprocessor.remove_input_tags={'hide-input'}", "--TagRemovePreprocessor.remove_all_outputs_tags={'hide-output'}" ]) # Clear notebook output sh([ "jupyter", "nbconvert", "--to", "notebook", "--inplace", "--ClearOutputPreprocessor.enabled=True", nbname ]) # Touch the .rst file so it has a later modify time than the source sh(["touch", nbname + ".rst"])
def encrypt(self, f, passphrase, output): """GPG encrypt file at path f with a passphrase Args: f (str): path of the file to encrypt passphrase (str): secret key with which encrypt the file Returns: nothing """ return sh(['gpg', '--output', output, '--symmetric', '--batch', '--yes', '--passphrase', passphrase, '--cipher-algo', 'AES256', '--s2k-mode', '{}'.format(3), '--s2k-count', '{}'.format(65011712), '--s2k-digest-algo', 'SHA512', '--s2k-cipher-algo', 'AES256', f])
def run_exploit(): print "Running Example" ret = sh(['/home/a.out']) print "Return Code: %d" % (ret, ) if ret == 0: return 2 else: return 0
def create_repo(self, output=None, verb=None, quiet=None): ''' Creates metadata of rpm repository ''' opts = '' if verb: opts += ' --verbose' elif quiet: opts += ' --quiet' return sh('/bin/createrepo_c {} -d -x *.src.rpm {}'.format(opts, output))
def merge(files, dst, timestart=None, timeend=None): input = [] findhash = None outpoint = 0 Frame = namedtuple('Frame', 'stream, dts, pts, duration, size, hash') _files = [] for file in sorted(files, key=tstamp): begin = datetime.utcfromtimestamp(tstamp(file) + 3 * 3600) # MSK if timestart and begin < timestart: continue if timeend and begin > timeend: continue _files.append(file) ##with progress(, label='Merging') as bar: echo('Merging .. %s - %s' % (timestart.strftime('%H:%M'), timeend.strftime('%H:%M')), end='') # TODO: do not reverse, calculate inpoint instead of outpoint. for file in reversed(_files): cmd = 'ffmpeg -i %s -an -f framemd5 -c copy -' % file data = sh(cmd, stderr=DEVNULL, shell=True).split(b'\n') timebase = [x for x in data if x.startswith(b'#tb')][0] tb_num, tb_den = list(map(int, timebase.split()[-1].split(b'/'))) frames = [ x.replace(b',', b'').split() for x in data if x and not x.startswith(b'#') ] for line in reversed(frames[1:]): frame = Frame(*line) if frame.hash == findhash: outpoint = float(frame.pts) * tb_num / tb_den break if outpoint: input.append('file %s\noutpoint %s' % (file, outpoint)) else: input.append('file %s' % file) #print(int(outpoint), end=';', flush=True) echo('.', end='') findhash = Frame(*frames[0]).hash cmd = 'ffmpeg -nostats -hide_banner -avoid_negative_ts make_zero -fflags +genpts -f concat -safe 0' \ ' -protocol_whitelist file,pipe -i - -c copy -flags +global_header -movflags +faststart -y ' #print(cmd + dst) pipe = Popen(cmd + dst, stdin=PIPE, stderr=PIPE, shell=True) stdout, stderr = pipe.communicate('\n'.join( reversed(input)).encode('utf8')) if pipe.returncode != 0: raise Exception(stderr.decode('utf8')) print()
def run_exploit(): try: cflag =os.environ['cflag'] except KeyError: cflag='' if sh(['clang','example.c','-ljpeg',cflag]): return 1 ret = sh(['./a.out']) if ret==0: # Not Vulnerable return 0 elif ret==77: # MemSan return 0 elif ret==2: # Vulnerable return 2 else: # Other Error return 1
def run_exploit(): try: cflag = os.environ['cflag'] except KeyError: cflag = '' if sh(['clang', 'example.c', '-ljpeg', cflag]): return 1 ret = sh(['./a.out']) if ret == 0: # Not Vulnerable return 0 elif ret == 77: # MemSan return 0 elif ret == 2: # Vulnerable return 2 else: # Other Error return 1
def create_repo(self, output=None, verb=None, quiet=None): ''' Creates metadata of rpm repository ''' opts = '' if verb: opts += ' --verbose' elif quiet: opts += ' --quiet' return sh('/bin/createrepo_c {} -d -x *.src.rpm {}'.format( opts, output))
def build_pkg(self): ''' Package build process ''' pkgs, deps = [], {} resultList = [] archs = self.opts.arch if self.opts.arch else [os.uname()[-1]] releases = self.opts.release if self.opts.release else [sh('rpm -E %fedora')] rootdir = self.outdir # build srpm for spec in self.parse_dep(self._get_spec('install')): if spec in self.results: if self.opts.verbose_cmd: echo('36', 'verb:', ' skip %s file.' % (spec)) continue specFile, specDict = self.parse_spec(spec, self.gitdir) if self.opts.verbose_cmd: echo('36', 'verb:', ' parser {} file.'.format(specFile)) self.get_sources(specDict['sources'], self.srcdir) srpmFile = self.build_srpm(specFile, self.srcdir) echo('32', 'info:', ' Build SRPM -', srpmFile) if re.match('.*\.net', srpmFile): key = specDict['name'] + '.net' else: key = specDict['name'] # queue pkgs.append(key) deps.update({key: [specDict['build_requires'], specDict['provides'], srpmFile, specFile],}) tasks, specs = self.resolve_depends(pkgs, deps) # build rpm for task in tasks: for rel in releases: for arch in archs: outDir = os.path.join(rootdir, rel, arch) echo('32', 'info:', ' Build RPM {} for fc{} - {}:'.format(task, rel, arch)) value, log = self.build_rpm(task, release=rel, arch=arch, output=outDir, opts=self.opts.mock_opts, verb=self.opts.verbose_cmd, quiet=self.opts.quiet_cmd) if self.opts.verbose_cmd: echo(log) echo('32', 'info:', ' Create metadata for fc{} - {}:\n'.format(rel, arch)) self.create_repo(outDir, verb=self.opts.verbose_cmd, quiet=self.opts.quiet_cmd) self.result(self.resultfile, [value, specs[tasks.index(task)], rel, arch]) resultList.append(self.result('-', [value, task, rel, arch])) echo('36', '\n** Build result **') for i in resultList: echo(''.join(i)) # install rpm for spec in specs: for pkg in self.pkgdict[spec]['provides']: echo('32', 'info:', ' Installing %s package' % pkg) self.operate_pkg('install', pkg)
def compress(self, f, output=None): """Compress file with GNU tar Args: f (str): path of the file output (str): name of the resulting file (optional) """ if output == None: output = f return sh( ['nocache', 'nice', '-n', '20', 'tar', '-cf', output + '.tar', f])
def build_srpm(self, specFile, output='build'): ''' Build source rpm ''' command = '/bin/rpmbuild ' \ '-D "_topdir ." ' \ '-D "_builddir {out}" ' \ '-D "_buildrootdir {out}" ' \ '-D "_rpmdir {out}" ' \ '-D "_sourcedir {out}" ' \ '-D "_specdir {out}" ' \ '-D "_srcrpmdir {out}" ' \ '-bs {}'.format(specFile, out=output) return re.search('/.*', sh(command)).group()
def hash(self, f): """Evaluate sha256sum for a file Args: f (str): path of the file to hash Returns: (str) sha256sum of the file """ out = sh(['sha256sum', f]) out = str(out) out = out.split(' ') return out[0]
def __init__(self, command, window_title_pattern=[], theme="Adwaita", variant="dark", refresh_time=100, verbose=False): executable_path = dirname(abspath(__file__)) xdotool_command = ['xdotool', 'search', '--name' ] + window_title_pattern xprop_command = [ 'xprop', '-f', '_GTK_THEME_VARIANT', '8u', '-set', '_GTK_THEME_VARIANT', variant, "-id" ] gtk_dark_environ = environ.copy() gtk_dark_environ["GTK_THEME"] = theme + ":" + variant output = Popen(command, encoding="UTF-8", stdout=PIPE, env=gtk_dark_environ) del gtk_dark_environ["GTK_THEME"] if window_title_pattern != None: while True: windows = [] try: windows = sh(xdotool_command, encoding="UTF-8").split("\n") except: pass windows = [w for w in windows if w != ""] for w in windows: try: sh(xprop_command + [w], encoding="UTF-8") except: pass sleep(refresh_time)
def __verify(u): """ Verifies a given file with GPG. """ f = u.split('/')[-1] devnull=open(os.devnull,'w') results = 0 try: if (sh(['gpg', '--verify', f+".asc"], stdout=devnull, stderr=devnull) and sh(['gpg', '--verify', f+".sig"], stdout=devnull, stderr=devnull)): print (clr.WARNING + "Verification Failed!" + clr.ENDC) results = 1 else: print (clr.OKGREEN + "Verified" + clr.ENDC) except OSError: print clr.WARNING + "GPG not found on system."+clr.ENDC results = 1 finally: # Remove any verification files if os.path.isfile(f+".sig"): os.remove(f+".sig") if os.path.isfile(f+".asc"): os.remove(f+".asc") return results
def convert_nb(nbname): # Execute the notebook sh(["jupyter", "nbconvert", "--to", "notebook", "--execute", "--inplace", nbname]) # Convert to .rst for Sphinx sh(["jupyter", "nbconvert", "--to", "rst", nbname, "--TagRemovePreprocessor.remove_cell_tags={'hide'}", "--TagRemovePreprocessor.remove_input_tags={'hide-input'}", "--TagRemovePreprocessor.remove_all_outputs_tags={'hide-output'}"]) # Clear notebook output sh(["jupyter", "nbconvert", "--to", "notebook", "--inplace", "--ClearOutputPreprocessor.enabled=True", nbname]) # Touch the .rst file so it has a later modify time than the source sh(["touch", nbname + ".rst"])
def convert_nb(nbname): # Execute the notebook sh(["jupyter", "nbconvert", "--to", "notebook", "--execute", "--inplace", nbname + ".ipynb"]) # Convert to .rst for Sphinx sh(["jupyter", "nbconvert", "--to", "rst", nbname + ".ipynb"]) # Clear notebook output sh(["jupyter", "nbconvert", "--to", "notebook", "--inplace", "--ClearOutputPreprocessor.enabled=True", nbname + ".ipynb"])
def convert_nb(nbname): # Execute the notebook sh(["ipython", "nbconvert", "--to", "notebook", "--execute", "--inplace", nbname + ".ipynb"]) # Convert to .rst for Sphinx sh(["ipython", "nbconvert", "--to", "rst", nbname + ".ipynb"]) # Clear notebook output sh(["ipython", "nbconvert", "--to", "notebook", "--inplace", "--ClearOutputPreprocessor.enabled=True", nbname + ".ipynb"])
def ___check_md5(u): """ Checks a given file with MD5 """ try: f = u.split('/')[-1] + ".md5" results = 0 if sh(['md5sum', '-c', f]): print(clr.WARNING + "MD5 Verification Failed!" + clr.ENDC) results = 1 else: print(clr.OKGREEN + "MD5 Verified" + clr.ENDC) except OSError: print clr.WARNING + "md5sum not found on system." + clr.ENDC results = 1 finally: if os.path.isfile(f): os.remove(f) return results
def split(self, f, output, size='100', digits=6): """split file using GNU split Args: f (str): path of the file output (str): name of the output files (they will be outputXX with XX numbers) size (float): size of the splitted chunks (optional) """ try: out = sh([ 'split', '--bytes', '{}MB'.format(size), '--suffix-length', '{}'.format(digits), '-d', "{}".format(f), "{}".format(output) ]) except Exception as e: print(e) print(out) return len([ piece for piece in ls(self.db.cache_path) if output.split("/")[-1] in piece ])
def __check_sha1(u): """ Checks a file with SHA-1 """ try: f = u.split('/')[-1] + ".sha1" results = 0 if sh(['sha1sum', '-c', f]): print(clr.WARNING + "SHA1 Verification Failed!" + clr.ENDC) results = 1 else: print(clr.OKGREEN + "SHA1 Verified" + clr.ENDC) except OSError: print clr.WARNING + "sha1sum not found on system." + clr.ENDC results = 1 finally: if os.path.isfile(f): os.remove(f) return results
def __init__(self, file_path, mode = 'r', depth = 3, pipe_buffer_size = 10 ** 8, fps = None, height=480, width=640, codec='h264', extra_args = []): self.file_path = file_path self.pipe_buffer_size = pipe_buffer_size self.depth = depth self.height = height self.width = width self.codec = codec self.num_frames = 0 self.extra_args = extra_args self.pipe = None self.fps = fps if mode == 'r': cmd = 'ffprobe -v quiet -print_format json -show_streams ' + self.file_path params = json.loads(sh(cmd, shell=True).decode('utf-8'))['streams'][0] self.height, self.width = params['height'], params['width'], self.fps_, self.num_frames = params['avg_frame_rate'], int(params['nb_frames']) if fps is None: self.fps = float(self.fps_.split('/')[0]) / float(self.fps_.split('/')[1]) else: self.fps_ = fps
def __check_sha1(u): """ Checks a file with SHA-1 """ try: f = u.split('/')[-1]+".sha1" results = 0 if sh(['sha1sum','-c',f]): print(clr.WARNING + "SHA1 Verification Failed!" + clr.ENDC) results = 1 else: print(clr.OKGREEN + "SHA1 Verified" + clr.ENDC) except OSError: print clr.WARNING + "sha1sum not found on system."+clr.ENDC results = 1 finally: if os.path.isfile(f): os.remove(f) return results
def ___check_md5(u): """ Checks a given file with MD5 """ try: f = u.split('/')[-1]+".md5" results = 0 if sh(['md5sum','-c',f]): print(clr.WARNING + "MD5 Verification Failed!" + clr.ENDC) results = 1 else: print(clr.OKGREEN + "MD5 Verified" + clr.ENDC) except OSError: print clr.WARNING+"md5sum not found on system."+clr.ENDC results = 1 finally: if os.path.isfile(f): os.remove(f) return results
def convert_nb(nbname): executable = replace_to_execute(nbname) # Execute the notebook sh([ "jupyter", "nbconvert", "--to", "notebook", "--execute", "--inplace", executable ]) replace_back_in_notebook(executable) # Convert to .html output = nbname.replace('.ipynb', '') sh([ "jupyter", "nbconvert", "--to", "html_ch", executable, "--output", output ]) sh(["rm", executable])
path.split('/')[1], 'daty', *path.split('/')[2:]) if files != []: for f in files: build.append((dirname, [join(path, f)])) except Exception as e: if type(e) == IndexError: pass else: raise e return build # GResources try: sh(['daty/resources/compile-resources.sh']) print("Gresources compiled") except Exception as e: print( "WARNING: to compile gresource be sure to have \"glib-compile-resources\" in your $PATH" ) # Variables theme_dir = "daty/resources/icons/hicolor" hicolor_path = "share/icons/hicolor" # Auxiliary functions # for paths in_hicolor_src = lambda x: join(theme_dir, x) in_hicolor = lambda x: join(hicolor_path, x)
import sys from subprocess import getoutput as sh import shlex import re ip = '' res = sh('VBoxManage showvminfo %s' % shlex.quote(sys.argv[1])) m = re.search(r'MAC: (\w+)', res) if m: mac = m.group(1) l = [ mac[i:i + 2].lower().lstrip('0') or '0' for i in range(0, len(mac), 2) ] mac = ':'.join(l) print(mac) myip = sh("ifconfig |grep -oP 'inet (\d+\.){3}\d+ .* broadcast' ") myip = re.search(r'[\d\.]+', myip).group(0) if '10.28.' in myip: print('myip %s' % myip) sh('for i in {142..145};do echo $i;ping -t1 -c1 10.28.209.$i ; done') res = sh('arp -a | grep %s' % mac) print(res) m = re.search(r'([\d\.]+)', res) if m: print(m.group(1))
def exploit_prompt(self, cls): exploit_args_dict = {arg: None for arg in cls.args.keys()} exploit_menu_dict = { "help": None, "options": None, "info": None, "set": exploit_args_dict, "unset": exploit_args_dict, "sh": None, "check": None, "run": None, "depends": None, "get": None, "cls": None, "back": None, "exit": None } exploits_help_menu = { "help": "show this help message", "options": "show exploit options", "info": "show general exploit information", "set": "set value", "unset": "unset value", "sh": "run shell command on local system", "check": "check if target is vulnerable", "run": "run exploit", "depends": "shows the dependencies needed for this exploit", "get": "install exploit dependencies", "cls": "clear screen", "back": "go back to previous menu", "exit": "exit program" } exploit_menu = NestedCompleter.from_nested_dict(exploit_menu_dict) message = [("class:prompt", cls.prompt)] while True: try: selected = self.session.prompt( message, style=self.style, completer=exploit_menu, complete_while_typing=False ).strip() except KeyboardInterrupt: print("[!!]::press (CTRL+D) to exit") continue except EOFError: print("❌ Goodbye ❌") exit(1) if selected == "": continue elif selected == "help": table = tabulate( [[k, v] for k, v in exploits_help_menu.items()], headers=["Command", "Description"], tablefmt="fancy_grid") print(table) elif selected == "options": table = tabulate( [[arg, cls.__dict__[arg]] for arg in cls.args.keys()], headers=["Argument", "Value"], tablefmt="fancy_grid") print(table) elif selected == "info": table = tabulate([[k, v] for k, v in cls.info.items()], headers=["Key", "Value"], tablefmt="fancy_grid") print(table) elif selected.startswith("unset"): cls.__dict__[selected.split()[-1].strip()] = "N/a" elif selected.startswith("set"): selected = selected.split() if len(selected) < 2: print("[!!]::the `set` command must be proceeded with an argument and value") print("\nexample:\n\tset url http://localhost:8080") continue arg, val = selected[1].strip(), " ".join(selected[2:]) cls.__dict__[arg] = val elif selected == "check": try: cls.check() except NotImplementedError: print(f"[!!]::```{cls.__class__.__name__}``` exploit has no check function") elif selected == "run": all_set = True for arg, values in cls.args.items(): if values["required"] and ( not cls.__dict__[arg] or cls.__dict__[arg].lower() == "n/a"): print("!"*70) print(f"``{arg}`` must be set before running this exploit") all_set = False if all_set: try: cls.run() except NotImplementedError: print("[XX]::This exploits `run` function has not been implemented") except ImportError: print("[!!]::an import error occured. Run command, `depends`, to get script dependencies") print("[**]::then run the command, `get`, to install the required dependencies") except Exception as e: print(f"[ERROR]::{e}") elif selected == "depends": if cls.pip_dependencies: print("[************ Dependencies *************]") for dependency in cls.pip_dependencies: print(f"\t\u2022 {dependency}") else: print("[!!]::exploit has no dependencies") elif selected == "get": if cls.pip_dependencies: print("[**]::fetching pip dependencies") from pip._internal.cli.main import main as pip_main [pip_main(["install", pkg]) for pkg in cls.pip_dependencies] print("\n[**]::pip dependencies successfully installed") print("[!!]::try re-running the exploit!!!") else: print("[NOTE]::this module does not require any pip dependencies") elif selected.startswith("sh"): cmd = selected.split()[1:] if cmd: try: out = sh(cmd, capture_output=True).stdout except: cmd = ' '.join(cmd) print(f"[XX]::unable to run command: `{cmd}`") continue try: print(out.decode("utf8")) except UnicodeDecodeError: print("[XX]::unable to decode command output!") continue else: print("[!!]::`sh` command used but no shell command was specified") elif selected == "cls": print("\n"*75) elif selected == "back": break elif selected == "exit": print("❌ Goodbye ❌") exit(0) else: print(f"[XX]::`{selected}` is not a valid command! Type `help` for help menu")
should be tab-delimited and should exclude the ".txt" extension. Example creation of one such text file, run from ../texts/raw: ls *.txt | awk -F '.txt' '{count++; print count "\t" $1;}' > ../textids/cat0001.txt The easiest form of parallelization at the moment is just to shift around the number of files in each location at /texts/textfiles, and possibly to create several different locations. """ #There are a whole bunch of directories that it wants to be there: for directory in ['texts','logs','texts/cleaned','logs','logs/clean','texts/unigrams','logs/unigrams','logs/bigrams','texts/bigrams','texts/encoded','texts/encoded/unigrams','texts/encoded/bigrams','logs/encode2','logs/encode1', 'texts/wordlist']: if not os.path.exists("../" + directory): sh(['mkdir', '../' + directory]) """Use the cleaning program to make texts that are set for tokenizing, and with sentences at linebreaks.""" print "Cleaning the texts" sh(['python','master.py','clean']) print "Creating 1 gram counts" sh(['python','master.py','unigrams']) print "Creating 2gram counts" sh(['python','master.py','bigrams']) #We could add 3grams, and so forth, here. print "Creating a master wordlist" sh(['python','WordsTableCreate.py']) #We could add an option to this to specify the size of the dictionary used. Currently hardcoded at 3,000,000 words. On very large dictionaries, this may crash for lack of memory; the script is an obvious candidate for map-reduce. print "Creating 1grams encodings"
def scan(self, codeURL, handler, runtime): codepath = handler.split(".")[0] zippath = self.downloads.joinpath('lambda.zip') zippath.write_bytes(requests.get(codeURL).content) if not is_zipfile(zippath): return # invalid zip zf = ZipFile(zippath) # Unzip Lambda source code for _ in zf.namelist(): if _.startswith(codepath): zf.extractall(self.downloads, members=[_]) # Configure sonar-project.properties if runtime.startswith('python'): language = 'py' elif runtime.startswith('node'): language = 'js' elif runtime.startswith('java'): language = 'java' else: return # unsupported language Path(self.downloads, 'sonar-project.properties').write_text( SONAR_PROJECT_PROPERTIES.format( self.config['url'], self.config['login'], self.config['password'], language ) ) # Run sonar-scanner cwd = Path('.').resolve() cd(self.downloads) sh(shsplit(self.config['command']), stdout=DEVNULL, stderr=DEVNULL) cd(cwd) # Get results curl = requests.Session() curl.auth = (self.config['login'], self.config['password']) while True: sleep(3) task = json.loads( curl.get(f'{self.config["url"]}/api/ce/activity').text )['tasks'][0] if task['status'] in ['SUCCESS', 'FAIL']: break issues = json.loads( curl.get(f'{self.config["url"]}/api/issues/search?project=lambdaguard').text )['issues'] curl.post(f'{self.config["url"]}/api/projects/delete', data={ 'project': 'lambdaguard' }) for issue in issues: if issue['status'] != 'OPEN': continue where = issue['component'].split(':', 1)[1] yield { 'level': 'high', 'text': f'{issue["message"]}\n{where} on line {issue["textRange"]["startLine"]}.' }
import sys from subprocess import getoutput as sh import shlex import re ip = '' res = sh('VBoxManage showvminfo %s' % shlex.quote(sys.argv[1])) m = re.search(r'MAC: (\w+)', res) if m: mac = m.group(1) l = [mac[i:i+2].lower().lstrip('0') or '0' for i in range(0, len(mac), 2)] mac = ':'.join(l) print(mac) myip = sh("ifconfig |grep -oP 'inet (\d+\.){3}\d+ .* broadcast' ") myip = re.search(r'[\d\.]+', myip).group(0) if '10.28.' in myip: print('myip %s' % myip) sh('for i in {142..145};do echo $i;ping -t1 -c1 10.28.209.$i ; done') res = sh('arp -a | grep %s' % mac ) print(res) m = re.search(r'([\d\.]+)', res) if m: print(m.group(1))
def callback(self, widget, data=None): #~ print "shell %s" % data sh(data, shell=True)
def parse_spec(self, specFile, cacheFile='.repocache.json'): ''' Parse the Spec file contents ''' if self.pkgdict: return specFile, self.pkgdict[specFile] items = lambda t, c: re.findall('%s:\s+(.*)' % t, c) split_str = lambda l: [re.split('[\s,=|>=|<=]+', i) for i in l] flat = lambda L: sum(map(flat, L), []) if isinstance(L, list) else [L] remove_ver = lambda l: [i for i in l if not re.match('^[0-9]', i)] decode = lambda v: v.decode() if v else v if os.path.exists(specFile) and specFile.endswith('.spec'): rpm_info = {} subpkgs, reqpkgs = [], [] spec = rpm.spec(specFile) hdr = spec.sourceHeader reqlist = [decode(i) for i in hdr[rpm.RPMTAG_REQUIRES]] content = sh('/bin/rpmspec -P {}'.format(specFile)) content = content[:content.index('%changelog')] # subpackages name = decode(hdr[rpm.RPMTAG_NAME]) subpkgs.append(name) if re.search('%package', content): for i in re.findall('%package\s*(.+)', content): if i.startswith('-n'): subpkgs.append(re.match('-n\s*(.*)', i).group(1)) else: subpkgs.append('{}-{}'.format(name, i)) provpkgs = remove_ver(flat(split_str(items('Provides', content)))) + subpkgs # parse buildrequires for i in reqlist: if re.match('.*\((.*)\)', i): reqpkgs.append(self._query_package(i)[0].name) else: reqpkgs.append(i) rpm_info = { "name": decode(hdr[rpm.RPMTAG_NAME]), "epoch": hdr[rpm.RPMTAG_EPOCHNUM], "version": decode(hdr[rpm.RPMTAG_VERSION]), "release": decode(hdr[rpm.RPMTAG_RELEASE]), "vendor": decode(hdr[rpm.RPMTAG_VENDOR]), "summary": decode(hdr[rpm.RPMTAG_SUMMARY]), "packager": decode(hdr[rpm.RPMTAG_PACKAGER]), "group": decode(hdr[rpm.RPMTAG_GROUP]), "license": decode(hdr[rpm.RPMTAG_LICENSE]), "url": decode(hdr[rpm.RPMTAG_URL]), "description": decode(hdr[rpm.RPMTAG_DESCRIPTION]), "sources": spec.sources, "patchs": [decode(i) for i in hdr[rpm.RPMTAG_PATCH]], "build_archs": [decode(i) for i in hdr[rpm.RPMTAG_BUILDARCHS]], "exclusive_archs": [decode(i) for i in hdr[rpm.RPMTAG_EXCLUSIVEARCH]], #"build_requires": [i.DNEVR()[2:] for i in rpm.ds(hdr, 'requires')], "build_requires": sorted(list(set(reqpkgs))), "requires": remove_ver(flat(split_str(items('\nRequires', content)))), "recommends": remove_ver(flat(split_str(items('Recommends', content)))), "supplements": [decode(i) for i in hdr[rpm.RPMTAG_SUPPLEMENTS]], "suggests": [decode(i) for i in hdr[rpm.RPMTAG_SUGGESTS]], "enhances": [decode(i) for i in hdr[rpm.RPMTAG_ENHANCES]], "provides": sorted(list(set(provpkgs))), "obsoletes": remove_ver(flat(split_str(items('Obsoletes', content)))), "conflicts": remove_ver(flat(split_str(items('Conflicts', content)))) } return specFile, rpm_info return False
def build_pkg(self): ''' Package build process ''' pkgs, deps = [], {} resultList = [] archs = self.opts.arch if self.opts.arch else [os.uname()[-1]] releases = self.opts.release if self.opts.release else [ sh('rpm -E %fedora') ] rootdir = self.outdir # build srpm for spec in self.parse_dep(self._get_spec('install')): if spec in self.results: if self.opts.verbose_cmd: echo('36', 'verb:', ' skip %s file.' % (spec)) continue specFile, specDict = self.parse_spec(spec, self.gitdir) if self.opts.verbose_cmd: echo('36', 'verb:', ' parser {} file.'.format(specFile)) self.get_sources(specDict['sources'], self.srcdir) srpmFile = self.build_srpm(specFile, self.srcdir) echo('32', 'info:', ' Build SRPM -', srpmFile) if re.match('.*\.net', srpmFile): key = specDict['name'] + '.net' else: key = specDict['name'] # queue pkgs.append(key) deps.update({ key: [ specDict['build_requires'], specDict['provides'], srpmFile, specFile ], }) tasks, specs = self.resolve_depends(pkgs, deps) # build rpm for task in tasks: for rel in releases: for arch in archs: outDir = os.path.join(rootdir, rel, arch) echo( '32', 'info:', ' Build RPM {} for fc{} - {}:'.format(task, rel, arch)) value, log = self.build_rpm(task, release=rel, arch=arch, output=outDir, opts=self.opts.mock_opts, verb=self.opts.verbose_cmd, quiet=self.opts.quiet_cmd) if self.opts.verbose_cmd: echo(log) echo('32', 'info:', ' Create metadata for fc{} - {}:\n'.format(rel, arch)) self.create_repo(outDir, verb=self.opts.verbose_cmd, quiet=self.opts.quiet_cmd) self.result(self.resultfile, [value, specs[tasks.index(task)], rel, arch]) resultList.append( self.result('-', [value, task, rel, arch])) echo('36', '\n** Build result **') for i in resultList: echo(''.join(i)) # install rpm for spec in specs: for pkg in self.pkgdict[spec]['provides']: echo('32', 'info:', ' Installing %s package' % pkg) self.operate_pkg('install', pkg)
def init(self): menu = NestedCompleter.from_nested_dict({ "help": None, "list": None, "search": None, "select": {exploit: None for exploit in self.exploits}, "sh": None, "cls": None, "back": None, "exit": None }) self.session = PromptSession( complete_while_typing=False, auto_suggest=AutoSuggestFromHistory() ) message = [("class:prompt", "〔Exploits〕❯ ")] while True: if not self.exploit: try: selected = self.session.prompt( message, style=self.style, completer=menu, ).strip() except KeyboardInterrupt: print("[!!]::press (CTRL+D) to exit") continue except EOFError: print("❌ Goodbye ❌") exit(1) if selected == "list": self.list() elif selected.startswith("search"): s = ' '.join(selected.split()[1:]) matched_exploits = [] for exploit in self.exploits: if s in exploit: matched_exploits.append(exploit) if matched_exploits: print("Matched Exploits:") for exploit in matched_exploits: print(f"\t* {exploit}") print() else: print(f"[!!]::no exploit names matching: `{s}`") elif selected.startswith("select"): selected = selected.split() if len(selected) == 1 or selected[-1] == "": print("[!!]::must provide an exploit by name to use, try `list` command") continue elif len(selected) > 2: print("[!!]::only one exploit can be selected at a time") continue exploit = selected[-1].strip() if exploit not in self.exploits: print(f"[XX]::{exploit} is not a valid exploit, try `list` command") continue self.select(exploit) elif selected == "help": table = tabulate([[k, v] for k, v in self._help_.items()], headers=["Command", "Description"], tablefmt="fancy_grid") print(table) elif selected.startswith("sh"): cmd = selected.split()[1:] if cmd: try: out = sh(cmd, capture_output=True).stdout except: cmd = ' '.join(cmd) print(f"[XX]::unable to run command: `{cmd}`") continue try: print(out.decode("utf8")) except UnicodeDecodeError: print("[XX]::unable to decode command output!") continue print(out.decode("utf8")) else: print("[!!]::`sh` command used but no shell command was specified") elif selected == "cls": print("\n"*75) elif selected in ("back"): break elif selected == "exit": print("❌ Goodbye ❌") exit(0) else: print(f"[XX]::`{selected}` is not a valid command! Type `help` for help menu") else: self.select(self.exploit)
def start_server(): sh(['proftpd']) sh(['rsyslogd']) sh(['ps','aux'])
# A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # -- Intersphinx ---------------------------------------------------------- intersphinx_mapping = {'https://docs.python.org/3/': None} # -- Read The Docs -------------------------------------------------------- on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: html_theme = 'alabaster' else: # readthedocs.org uses their theme by default, so no need to specify it # build rest-api, since RTD doesn't run make from subprocess import check_call as sh sh(['make', 'rest-api'], cwd=docs) # -- Spell checking ------------------------------------------------------- try: import sphinxcontrib.spelling except ImportError: pass else: extensions.append("sphinxcontrib.spelling") spelling_word_list_filename = 'spelling_wordlist.txt'
raise Exception("Expecting as many modes as limits") if output_folder.strip() == "": raise Exception("Output folder cannot be empty") sum = 0 for limit in limits: sum += int(limit) if sum > 100: raise Exception("Sum of all limits should be <= 100") if output_folder == "/": raise Exception("Can't use '/' as output folder, we'll 'rm -rf /*' !!!") # delete previous results sh("sudo rm -fr {}/*".format(output_folder), shell=True) sh("mkdir -p " + output_folder, shell=True) sh(['docker', 'pull', 'ivotron/fio']) # FIO stuff # # It's OK to run multiple fio processes separately since we're # only interested in measuring the performance of read/write # requests and not checking their integrity. # see: http://www.spinics.net/lists/fio/msg03295.html flags = "--entrypoint=genfio-test" img = "ivotron/fio" args = "-s -d {} -r {} -b {}k".format(device, runtime, bs) # create containers first, to minimize startup costs
# A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # -- Intersphinx ---------------------------------------------------------- intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None), 'tornado': ('https://www.tornadoweb.org/en/stable/', None), } # -- Read The Docs -------------------------------------------------------- on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if on_rtd: # readthedocs.org uses their theme by default, so no need to specify it # build both metrics and rest-api, since RTD doesn't run make from subprocess import check_call as sh sh(['make', 'metrics', 'scopes'], cwd=docs) # -- Spell checking ------------------------------------------------------- try: import sphinxcontrib.spelling except ImportError: pass else: extensions.append("sphinxcontrib.spelling") spelling_word_list_filename = 'spelling_wordlist.txt'
epub_publisher = author epub_copyright = copyright # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # -- Intersphinx ---------------------------------------------------------- intersphinx_mapping = {'https://docs.python.org/3/': None} # -- Read The Docs -------------------------------------------------------- on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if on_rtd: # readthedocs.org uses their theme by default, so no need to specify it # build both metrics and rest-api, since RTD doesn't run make from subprocess import check_call as sh sh(['make', 'metrics', 'rest-api'], cwd=docs) # -- Spell checking ------------------------------------------------------- try: import sphinxcontrib.spelling except ImportError: pass else: extensions.append("sphinxcontrib.spelling") spelling_word_list_filename = 'spelling_wordlist.txt'
def parse_spec(self, specFile, cacheFile='.repocache.json'): ''' Parse the Spec file contents ''' if self.pkgdict: return specFile, self.pkgdict[specFile] items = lambda t, c: re.findall('%s:\s+(.*)'%t, c) split_str = lambda l: [re.split('[\s,=|>=|<=]+', i) for i in l] flat = lambda L: sum(map(flat, L), []) if isinstance(L, list) else [L] remove_ver = lambda l: [i for i in l if not re.match('^[0-9]', i)] decode = lambda v: v.decode() if v else v if os.path.exists(specFile) and specFile.endswith('.spec'): rpm_info = {} subpkgs, reqpkgs = [], [] spec = rpm.spec(specFile) hdr = spec.sourceHeader reqlist = [decode(i) for i in hdr[rpm.RPMTAG_REQUIRES]] content = sh('/bin/rpmspec -P {}'.format(specFile)) content = content[:content.index('%changelog')] # subpackages name = decode(hdr[rpm.RPMTAG_NAME]) subpkgs.append(name) if re.search('%package', content): for i in re.findall('%package\s*(.+)', content): if i.startswith('-n'): subpkgs.append(re.match('-n\s*(.*)', i).group(1)) else: subpkgs.append('{}-{}'.format(name, i)) provpkgs = remove_ver(flat(split_str(items('Provides', content)))) + subpkgs # parse buildrequires for i in reqlist: if re.match('.*\((.*)\)', i): reqpkgs.append(self._query_package(i)[0].name) else: reqpkgs.append(i) rpm_info = { "name": decode(hdr[rpm.RPMTAG_NAME]), "epoch": hdr[rpm.RPMTAG_EPOCHNUM], "version": decode(hdr[rpm.RPMTAG_VERSION]), "release": decode(hdr[rpm.RPMTAG_RELEASE]), "vendor": decode(hdr[rpm.RPMTAG_VENDOR]), "summary": decode(hdr[rpm.RPMTAG_SUMMARY]), "packager": decode(hdr[rpm.RPMTAG_PACKAGER]), "group": decode(hdr[rpm.RPMTAG_GROUP]), "license": decode(hdr[rpm.RPMTAG_LICENSE]), "url": decode(hdr[rpm.RPMTAG_URL]), "description": decode(hdr[rpm.RPMTAG_DESCRIPTION]), "sources": spec.sources, "patchs": [decode(i) for i in hdr[rpm.RPMTAG_PATCH]], "build_archs": [decode(i) for i in hdr[rpm.RPMTAG_BUILDARCHS]], "exclusive_archs": [decode(i) for i in hdr[rpm.RPMTAG_EXCLUSIVEARCH]], #"build_requires": [i.DNEVR()[2:] for i in rpm.ds(hdr, 'requires')], "build_requires": sorted(list(set(reqpkgs))), "requires": remove_ver(flat(split_str(items('\nRequires', content)))), "recommends": remove_ver(flat(split_str(items('Recommends', content)))), "supplements": [decode(i) for i in hdr[rpm.RPMTAG_SUPPLEMENTS]], "suggests": [decode(i) for i in hdr[rpm.RPMTAG_SUGGESTS]], "enhances": [decode(i) for i in hdr[rpm.RPMTAG_ENHANCES]], "provides": sorted(list(set(provpkgs))), "obsoletes": remove_ver(flat(split_str(items('Obsoletes', content)))), "conflicts": remove_ver(flat(split_str(items('Conflicts', content)))) } return specFile, rpm_info return False