def post(self): fingerprint = self.get_argument('fingerprint') frequency = self.get_argument('frequency') parsedfreq = 60 * 60 * 24 * 2 hmac = self.get_argument('hmac') if not verify(hmac, fingerprint): self.set_status(403) self.write('hmac invalid :(') return try: parsedfreq = int(frequency) if parsedfreq < 1: self.set_status(403) self.write('frequency invalid :(') return except ValueError: self.set_status(403) self.write('frequency invalid :(') return conn = sqlite3.connect('TorWeather.db') with conn: conn.execute("INSERT OR REPLACE INTO subscribe (fingerprint, frequency) VALUES (:fingerprint, :frequency);", {'fingerprint': fingerprint, 'frequency': parsedfreq}) self.write(wrap_render("Successfully unsubscribed."))
def checkquery(name, testplatform, trustedplatform=SqliteRunner("testqueries"), tmppath="tmp"): # noqa """ @param name: name of query @param tmppath: existing directory for temporary files """ osutils.mkdir_p(tmppath) abstmppath = os.path.abspath(tmppath) testoutfn = testplatform.run(name, abstmppath) expectedfn = trustedplatform.run(name, abstmppath) print "test: %s" % (name) verify(testoutfn, expectedfn, False)
def checkquery(name, tmppath="tmp", querypath="testqueries"): """ @param name: name of query @param tmppath: existing directory for temporary files """ osutils.mkdir_p(tmppath) envir = os.environ.copy() # cpp -> exe exe_name = './%s.exe' % (name) subprocess.check_call(['make', exe_name], env=envir) # run cpp testoutfn = '%s/%s.out' % (tmppath, name) with open(testoutfn, 'w') as outs: try: subprocess.check_call([exe_name], stdout=outs, env=envir) except subprocess.CalledProcessError as e1: # try again, this time collecting all output to print it try: subprocess.check_call([exe_name], stderr=subprocess.STDOUT, env=envir) raise e1 # just in case this doesn't fail again except subprocess.CalledProcessError as e2: print "see executable %s" % (os.path.abspath(exe_name)) print subprocess.check_output(['ls', '-l', exe_name], env=envir) print subprocess.check_output(['cat', '%s.cpp' % (name)], env=envir) raise Exception('(Process output below)\n'+e2.output+'\n(end process output)') querycode = readquery("%s/%s.sql" % (querypath,name)) querystr = make_query(name, querycode) # run sql conn = sqlite3.connect(testdbname()) c = conn.cursor() expectedfn = '%s/%s.sqlite.csv' % (tmppath, name) with open(expectedfn, 'w') as csvfile: wr = csv.writer(csvfile, delimiter=' ') for row in c.execute(querystr): wr.writerow(list(row)) print "test: %s" % (name) verify(testoutfn, expectedfn, False)
def get(self): fingerprint = self.get_argument('fingerprint') hmac = self.get_argument('hmac') if not verify(hmac, fingerprint): self.set_status(403) self.write('hmac invalid :(') return self.write(wrap_render(SUB_GET_TEMPLATE, fingerprint=fingerprint, hmac=hmac))
def main(): (nodes,edges) = load('input.csv') solver = getSolver(nodes, edges) while True: solution = getSolution(solver) (success, output) = verify(solution,nodes,edges) if success: print "Found schedule: " + str(output) return output else: addConstraint(solver, output)
def post(self): fingerprint = self.get_argument('fingerprint') hmac = self.get_argument('hmac') if not verify(hmac, fingerprint): self.set_status(403) self.write('hmac invalid :(') return conn = sqlite3.connect('TorWeather.db') with conn: conn.execute("INSERT OR REPLACE INTO unsubscribe (fingerprint) VALUES (:fingerprint);", {'fingerprint': fingerprint}) self.write(wrap_render("Successfully unsubscribed."))
with open('/usr/testguest/' + case + '.json', 'r') as f: print >> sys.stderr, "JSON: " + f.read() # Read inputs if os.path.isfile(output_file): with open(output_file, 'r') as f: r = json.loads(f.read()) else: print("FAILED: Test \"" + case + ".out\" not produced by test program.") continue with open(golden_file, 'r') as f: g = json.loads(f.read()) with open(input_file, 'r') as f: d = json.loads(f.read()) print "Description: %s" % d["test"] # Verify test output ok, message = verifier.verify(r, d, g) wins += (1 if ok else 0) print (("OK" if ok else "FAILED") + ": Test \"" + case + ".in\"" + (message if message else ("\nyields: " + str(result) + "\nexpecting: " + str(g)))) # Produce grades print("Grade: " + str(wins) + "/" + str(len(cases))) score = (wins * 1.0) / len(cases) grades = { 'Auto-grader': score } with open('output', 'w') as scores: scores.write(json.dumps(grades))
def mailVerify(mail): if verifier.verify(mail, '17d753c1aaa19cba5323599ff6d603eb8c6502e1677a6cee869d6a659a1b11f3'): print("Right") else: print("Wrong")
from state import State from verifier import verify mem = [x.encode('ascii') for x in 'UJXGBE'] initial_state = State(mem, []) desired_output = [x.encode('ascii') for x in 'BUG'] def criteria(end_state: State): if end_state.output == desired_output: return True else: return False # todo: fill in candidates = [] # get all possible instructions for instrs in candidates: if verify(instrs, initial_state, criteria): pass # passed
import os import constant import prep import verifier import emails if __name__ == '__main__': fnames = os.listdir(constant.RAW_DIR) for fname in fnames: people = prep.rearrange_info(fname) for person in people.values(): print(person) email_prob_list = emails.get_emails(person) people[person['id']]['email_list_crawl'] = email_prob_list print('start verify') verifier.verify(fname, people, False) print('end verify')
import verifier import json if verifier.verify( '*****@*****.**', 'e005b9d6242eee960b34257203a23d42ac9df09a673c422aa9a9a9bb3e2f4032'): print("Hurray! Email is right!") else: print("Oh! Email is not real")
r = wrapper.run_test(d) # JSON-encode and decode the result to mimic sandboxed autograder try: r = json.loads(json.dumps(r)) except: print "WARNING: your return value in this test uses an unsupported type!" print "Stick to dictionaries, objects, arrays, strings, numbers, booleans, and null." # Read golden output with open("cases/" + case + '.out', 'r') as f: g = json.loads(f.read().replace("\'", '"').replace("(", '[').replace(")", ']')) # Verify test output ok, message = verifier.verify(r, d, g) # Accounting and grading errors += 0 if ok else 1 print(("OK" if ok else "FAILED") + ": Test \"" + case + ".in\" " + (message if message else (" yields \n" + str(result) + "\n, expecting \n" + str(g)))) print "--------------" if errors == 0: print "Yay! Everything looks correct! Good work." else: print "Oh no! " + str(errors) + " tests failed, so you aren't done yet."
import os import constant import prep import verifier import emails from multiprocessing import Process if __name__ == '__main__': fnames = os.listdir(constant.RAW_DIR) for fname in fnames: people = prep.rearrange_info(fname) for person in people.values(): print(person) email_prob_list = emails.get_emails(person) people[person['id']]['email_list_crawl'] = email_prob_list verifier.verify(fname,people,'server') p = Process(target=verifier.verify, args=(fname,people,False)) print('start verify') p.start() print('end verify')
def post_route(): """ Verification endpoint. This function just responds to the api call in localhost:5000/verify Input parameters: "orchestrator_id": The ID of the orchestrator responsible of transcoding "source": a valid URI to a video source "renditions": a list of renditions with the following structure: { "uri":A valid URI toi the transcoded video, "resolution":{ "height": vertical dimension, in pixels "width": horizontal dimension, in pixels }, "frame_rate": A value of the expected frames per seconds "pixels": The number of expected total pixels (height x width x number of frames) }, "model": The URL to the location of the trained model for verification Returns: {"orchestrator_id": The ID of the orchestrator responsible of transcoding, "source": The URI of the video source "results": A list with the verification results, with the following: { "frame_rate": The ratio between the expected frame rate and the one extracted with OpenCv's backend (GStreamer by default) "pixels": The number of expected total pixels (height x width x number of frames) "pixels_pre_verification": The ratio between the expected number of total pixels and the one extracted with OpenCv's backend (GStreamer by default) "pixels_post_verification": The ratio between the expected number of total pixels and the one computed during the decoding "resolution": { "height": The expected total vertical pixels "height_pre_verification": The ratio between the expected height and the one extracted with OpenCv's backend (GStreamer by default) "height_post_verification": The ratio between the expected height and the one computed during the decoding "width": The expected total horizontal pixels "width_pre_verification": The ratio between the expected width and the one extracted with OpenCv's backend (GStreamer by default) "width_post_verification":The ratio between the expected height and the one computed during the decoding }, "tamper": A float representing a distance to a decision function defined by the pre-trained model fo verification "uri": The URI of the rendition } """ if request.method == 'POST': data = request.get_json() verification = {} verification['orchestrator_id'] = data['orchestratorID'] verification['source'] = data['source'] model_uri = data['model'] model_file, model_name = retrieve_model(model_uri) # Inform user that model was succesfully retrieved OPERATIONS_LOGGER.info('Model successfully donwloaded: %s', model_uri) CONSOLE_LOGGER.info('Model successfully donwloaded: %s', model_uri) # Define whether profiling is needed for logging do_profiling = False # Define the maximum number of frames to sample max_samples = 10 # Execute the verification predictions = verify(verification['source'], data['renditions'], do_profiling, max_samples, model_file, model_name) results = [] i = 0 for rendition in data['renditions']: results.append(predictions[i]) i += 1 # Append the results to the verification object verification['results'] = results verification['model'] = model_uri VERIFICATIONS_LOGGER.info(verification) CONSOLE_LOGGER.info('Verification results: %s', results) return jsonify(verification)
os.mkdir(downloadPath) except: pass # download dret = None if len(files) == 0: downloader.download(conns, downloadPath, pars, False, guiInfo) else: dret = downloader.download(conns, downloadPath, files, False, guiInfo) # verify / extract vret = None if not len(files) == 0: if cfg.get("automation","verify") == "true": vret = verifier.verify(conns, downloadPath, pars) # TODO: extract download if cfg.get("automation","extract") == "true": if vret != -1: extractor.extract(downloadPath) # generate nfo file if dret: vresult = "" if vret == None or vret == 2: vresult = "???" elif vret == 1: vresult = "Passed" elif vret == 3: vresult = "Repaired" else:
from public import get_public_challenge from client import mint from verifier import verify if __name__ != '__main__': exit(0) hash_function, service, work, desired_token = get_public_challenge(work=10) x, i = mint(hash_function, service, work, desired_token) print(f'Found x: {x} in {i} attempts') print(f'To what does "service + x" hash? {hash_function(service + x)}') verify_result = verify(hash_function, service, work, desired_token, x) print(f'Verification result: {verify_result}')