def __init__(self, binary): self.binary = os.path.abspath(binary) self.bug_find = BugFind(self.binary) self.dba = DBA(self.binary) self.exploit = Exploit(self.binary) self.verify = Verify(self.binary)
def run_flow(profiles, conv, test_id, conf, profile): print(20 * "=" + test_id + 20 * "=") conv.test_id = test_id conv.conf = conf for item in conv.flow["sequence"]: if isinstance(item, tuple): cls, funcs = item else: cls = item funcs = {} _oper = cls(conv, profile, test_id, conf, funcs) _oper.setup(profiles.PROFILEMAP) _oper() try: if conv.flow["tests"]: _ver = Verify(check_factory, message_factory, conv) _ver.test_sequence(conv.flow["tests"]) except KeyError: pass except Exception as err: raise return None
def checkAllSyntax(self, triples): """ Testmethod for Syntax - should be deleted later on """ for file in triples: print('Checking' + file) verify = Verify(file) verify.checkRDFsyntax()
def verifyAllLinks(self, triples): """ :param triples: A list of all absolute paths for .nt files :return: Errors """ for file in triples: print('Verifying ' + file) verify = Verify(file) verify.verifyLinks()
def lambda_handler(event, context): fileName = event['file'] messages = Verify(fileName).process() errorMessages = [] for value in messages: errorMessages.append(value) return errorMessages
def manage_http_verify(self, part, interval=60): ''' 每隔interval时间运行一次代理验证程序 :param id: :param interval: :return: ''' verify = Verify() while True: verify.run_verify_http(part) time.sleep(interval)
def _verify(self): if self.method == 'Zero Overwrite': pattern = b'\0x00' elif self.method == 'One Overwrite': pattern = b'\0xff' else: pattern = 'random' self.ui.erase_progress.setValue(100) self.verify = Verify(self.device, pattern, self.ui.verify_percent.value()) self.verify.finished.connect(self._finished) self.verify.start() self.verify.CURRENT_DATA.connect(self.ui.verify_progress.setValue) self.verify.CURRENT_TIME.connect(self.ui.verify_duration_label.setText)
def __init__(self, caller, calling, log_level=3): self.verify = Verify() if not self.verify.setup(): sys.exit(1) self.lib = pj.Lib() self.lib.init( ua_cfg=self.ua_cfg, log_cfg=pj.LogConfig( level=7, callback=lambda level, str, len: logging.debug(str.strip())), media_cfg=self.media_cfg) self.lib.start(with_thread=True) self.caller_ddi, self.caller_account, self.caller_cb, self.caller_cfg = self.register( caller, default=True) self.calling_ddi, self.calling_account, self.calling_cb, self.calling_cfg = self.register( calling)
def quotas_usage(request, project_id, resource): token = request.META.get('HTTP_X_AUTH_TOKEN') mongodb_info = setting.mongodb_info host = mongodb_info['host'] user = mongodb_info['user'] password = mongodb_info['password'] port = mongodb_info['port'] database = mongodb_info['database'] m = MongoDB(user, password, host, port, database) v = Verify() v.set_request(KEY_STONE_HOST['host'], KEY_STONE_HOST['port']) v.set_tenantname(project_id) if v.is_token_available(token): start_time = int(request.GET.get('start_time')) end_time = int(request.GET.get('end_time')) response = m.load(resource, project_id, start_time, end_time) response_json = json.dumps(response) return HttpResponse(response_json, content_type="application/json") else: return HttpResponse(v.get_request_data())
def create_flow(self, process_id): flow = Flow() # 初始化一个flow实例 config = self.get_config() # 检查配置项是否存在 if not config.get("common")["inputdir"]: logging.error("ERROR>>no inputdir in %s<< " % self.config_file) sys.exit() self.input_dir = config.get("common")["inputdir"] if not config.get("common")["input_rule_exp"]: logging.error("ERROR>>no input_rule_exp in %s<< " % self.config_file) sys.exit() self.match_expr = config.get("common")["input_rule_exp"] if not config.get("common")["redopath"]: logging.error("ERROR>>no redopath in %s<< " % self.config_file) sys.exit() redo_path = config.get("common")["redopath"] if not config.get("common")["fieldlen"]: logging.error("ERROR>>no fieldlen in %s<< " % self.config_file) sys.exit() fieldlen = config.get("common")["fieldlen"] if not config.get("common")["line_limit"]: logging.error("ERROR>>no line_limit in %s<< " % self.config_file) sys.exit() line_limit = config.get("common")["line_limit"] if line_limit == "": line_limit = 20000 if not config.get("common")["rules"]: logging.error('ERROR>>no rules in config<<') sys.exit() rule_list = config.get("common")["rules"].split(",") self.batch_size = config.get("common")["batchsize"] if not config.get("common")["bakpath"]: logging.error('ERROR>>no bakpath in config<<') sys.exit() bak_path = config.get("common")["bakpath"] flow.set_fieldlen(fieldlen) flow.set_line_limit(int(line_limit)) flow.set_process_id(process_id) flow.set_redo_path(redo_path) flow.set_bak(bak_path) output_dirs = {} # 检查各rule是否配置了输出目录 for rule in rule_list: output_dir = config.get(rule)["destdir"] if output_dir == "": logging.error("rule:%s no destdir" % rule) sys.exit() output_dirs = {rule: output_dir} # 检查配置文件中的路径信息是否存在 all_path = { 'inputdir': self.input_dir, 'redopath': redo_path, 'bakpath': bak_path } all_path.update(output_dirs) self.output_dirs = all_path verify = Verify(all_path) if not verify.check_path(): sys.exit() self.process_input_dir = self.input_dir + "/" + process_id flow.set_dir(self.process_input_dir) for rule_name in rule_list: _config = {'rulename': rule_name} rule_items = config.get(rule_name) _config.update(rule_items) flow.add_rule(Rule(_config)) # 返回一个fields flow.config = config return flow
# ------------------------------------------------------- """ Instead of using a session directly to store state between user requests and responses from the CLI, we're going to simplify the process since this is not theh final API construction. Instead of a database, we're going to use a simple global dict to create key-value pairs for users that log in with their JWT. Within these key-value pairs will be more dicts, storing information about a current request for a CWL run. Once that CWL run executes or fails, we delete that key from the dict; this will help manage memory. Note that every time the app is restarted this pseudo-database is blown away, which is entirely inefficient for any type of production environment. """ gstor = {} verifier = Verify() # token verifier # Method to verify user def verify(token): """Verify a token and return claims :param str token: JSON Web Token string""" client_id = conf['client_id'] keys_url = conf['keys_url'] return verifier.verify_token(token, keys_url, client_id) #------- # ROUTES # ------
''' import json, sys from verify import Verify if len(sys.argv) > 1: RUNPATH = sys.argv[1] else: RUNPATH = '../bin' LOGPATH = RUNPATH + '/logs' CONFIGFILE = RUNPATH + '/DAM.config' config_data= open(CONFIGFILE).read() data = json.loads(config_data) USERLIST = [json.dumps(u).strip('"') for u in data["userlist"]] RULELIST = [json.dumps(r).strip('"') for r in data["rulelist"]] SERVER = json.dumps(data["server"]).strip('"') print 'Runpath: ', RUNPATH print 'Logpath: ', LOGPATH print 'Server from config file: ', SERVER print 'Userlist from config file: ', USERLIST print 'Rulelist from config file: ', RULELIST v = Verify(LOGPATH, SERVER) v.run(USERLIST, RULELIST)
def manage_https_verify(self, interval=60): verify = Verify() while True: verify.run_verify_https() time.sleep(interval)
from verify import Verify fileName = '/media/Tcc_Tatiane.docx' messages = Verify(fileName).process() for value in messages: print value.content print value.detail
class Main: if __name__ == '__main__': # Run the settings script settings = tools.settings.Settings() # Run the outputSaver script output_saver = tools.outputSaver.OutputSaver() parser = argparse.ArgumentParser( description= 'MAD downloads anime from CrunchyRoll, WCOStream and other websites.' ) parser.add_argument('--version', action='store_true', help='Shows version and exits.') required_args = parser.add_argument_group('Required Arguments :') required_args.add_argument('-i', '--input', nargs=1, help='Inputs the URL to anime.') parser.add_argument('-p', '--password', nargs=1, help='Indicates password for a website.') parser.add_argument('-u', '--username', nargs=1, help='Indicates username for a website.') parser.add_argument('-r', '--resolution', nargs=1, help='Inputs the resolution to look for.', default='720') parser.add_argument('-l', '--language', nargs=1, help='Selects the language for the show.', default='Japanese') parser.add_argument('-se', '--season', nargs=1, help='Specifies what season to download.', default='All') parser.add_argument( '--skip', action='store_true', help='skips the video download and downloads only subs.') parser.add_argument('-nl', '--nologin', action='store_true', help='Skips login for websites.') parser.add_argument( '-o', '--output', nargs=1, help='Specifies the directory of which to save the files.') parser.add_argument('-n', '--newest', help='Get the newest episode in the series.', action='store_true') parser.add_argument( '-rn', '--range', nargs=1, help='Specifies the range of episodes to download.', default='All') parser.add_argument( "-v", "--verbose", help="Prints important debugging messages on screen.", action="store_true") parser.add_argument( '-x', '--exclude', nargs=1, help='Specifies the episodes to not download (ie ova).', default=None) parser.add_argument('--search', action='store_true', help='Search for a show.') parser.add_argument('--gui', action='store_true', help='Start the GUI') args = parser.parse_args() args.logger = False args.skipper = False args.settings = settings args.outputsaver = output_saver if args.search: run_search = tools.search.Search() array = run_search.start() for item in array: print(item) exit(1) if args.gui: run_gui = tools.gui.Gui() exit(1) if args.verbose: logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG) logging.debug('You have successfully set the Debugging On.') logging.debug("Arguments Provided : {0}".format(args)) logging.debug("Operating System : {0} - {1} - {2}".format( platform.system(), platform.release(), platform.version())) logging.debug("Python Version : {0} ({1})".format( platform.python_version(), platform.architecture()[0])) args.logger = True if args.version: print("Current Version: {0}".format(__version__)) exit() if args.skip: print("Will be skipping video downloads") args.skipper = True if args.nologin: args.username = ['username'] args.password = ['password'] if args.input is None: try: if args.outputsaver.get_show_url(args.input[0]) is not None: args.input[0] = args.outputsaver.get_show_url( args.input[0]) except TypeError as e: print( "Please enter the required argument (Input -i). Run __main__.py --help" ) exit(1) else: if type(args.username) == list: args.username = args.username[0] else: args.username = False if type(args.password) == list: args.password = args.password[0] else: args.password = False if type(args.resolution) == list: if "," in args.resolution[0]: args.resolution = args.resolution[0].split(',') else: args.resolution = args.resolution[0] if type(args.language) == list: args.language = args.language[0] if type(args.range) == list: args.range = args.range[0] if type(args.season) == list: args.season = args.season[0] if type(args.output) == list: args.output = args.output[0] # Lets check if the url is a website we support and if it requires a username and password verify = Verify(args.__dict__) if verify.isVerified(): # It is a website we support. Lets use it if verify.getWebsite() == 'WCO': sites.wcostream.WCOStream(args.__dict__) if verify.getWebsite() == 'Crunchyroll': sites.crunchyroll.Crunchyroll(args.__dict__)
def getLoginVCode(self): codeDialog = Verify() if codeDialog.showImage("login", "sjrand"): return codeDialog.getPoints() return ""