def run_actual_test(self, model, train, batch_size, state_dict=None, input=None, use_gpu=True, rtol=0.001, atol=1e-7): """ This is what the user facing version will look like """ # set the training/test mode for the model model.train(train) # use the pre-trained model params if available if state_dict is not None: model.load_state_dict(state_dict) # Either user specified input or random (deterministic) input if input is None: input = Variable(torch.randn(batch_size, 3, 224, 224), requires_grad=True) # GPU-ize the model, if requested if use_gpu: model, input = self.convert_cuda(model, input) # Verify the model runs the same in Caffe2 verify.verify(model, input, c2, rtol=rtol, atol=atol)
def main(): if len(sys.argv) < 3: raise ValueError( "Give the partitioned matrix and target file as arguments.") P = utils.read_matrix(sys.argv[1]) verify.verify(P, P, debug=False) R, C, NZ, M = P F = min(10, max(1, MAX // max(R, C))) svg = open(sys.argv[2], 'w') svg.write('<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n') svg.write( str.format( '<svg xmlns:svg="http://www.w3.org/2000/svg" ' 'xmlns="http://www.w3.org/2000/svg" version="1.0" ' 'width="{}" height="{}" id="svg2">\n', C * F, R * F)) svg.write('\t<g>\n') svg.write( str.format( '\t\t<rect width="{}" height="{}" x="0" y="0" id="bkg" ' 'style="fill:#ffffff;fill-opacity:1;" />\n', C * F, R * F)) for r, row in enumerate(M): for c, v in row: svg.write( str.format( '\t\t\t<rect width="{}" height="{}" x="{}" y="{}" ' 'id="r{}-{}" style="fill:{};fill-opacity:1;" />\n', F, F, c * F, r * F, c, r, side_to_color(int(v[0])))) svg.write('\t</g>\n</svg>\n') svg.close()
def exportTest(self, model, inputs, rtol=1e-2, atol=1e-7): self.is_script_test_enabled = True with torch.onnx.select_model_mode_for_export(model, None): graph = torch.onnx.utils._trace(model, inputs, OperatorExportTypes.ONNX) torch._C._jit_pass_lint(graph) verify(model, inputs, backend, rtol=rtol, atol=atol)
def main(): if len(sys.argv) < 3: raise ValueError("Give the partitioned matrix and target file as arguments.") P = utils.read_matrix(sys.argv[1]) verify.verify(P, P, debug=False) R, C, NZ, M = P F = min(10, max(1, MAX // max(R, C))) svg = open(sys.argv[2], 'w') svg.write('<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n') svg.write(str.format('<svg xmlns:svg="http://www.w3.org/2000/svg" ' 'xmlns="http://www.w3.org/2000/svg" version="1.0" ' 'width="{}" height="{}" id="svg2">\n', C*F, R*F)) svg.write('\t<g>\n') svg.write(str.format('\t\t<rect width="{}" height="{}" x="0" y="0" id="bkg" ' 'style="fill:#ffffff;fill-opacity:1;" />\n', C*F, R*F)) for r, row in enumerate(M): for c, v in row: svg.write(str.format( '\t\t\t<rect width="{}" height="{}" x="{}" y="{}" ' 'id="r{}-{}" style="fill:{};fill-opacity:1;" />\n', F, F, c*F, r*F, c, r, side_to_color(int(v[0])))) svg.write('\t</g>\n</svg>\n') svg.close()
def exportTest(self, model, inputs, rtol=1e-2, atol=1e-7): with torch.onnx.select_model_mode_for_export(model, None): graph = torch.onnx.utils._trace(model, inputs, OperatorExportTypes.ONNX) torch._C._jit_pass_lint(graph) verify(model, inputs, backend, rtol=rtol, atol=atol, opset_version=self.opset_version)
def processing(args): csv_data = csv_processing.get_data(args.data_path) # print(csv_data) model = Model(csv_data, args) #1 # model=Angle_model(csv_data,args) # model=Prob_Model(csv_data,args)#3 ans = model.run_raw() if args.top5: model.run_angle() else: verify.verify(args, csv_data, model.dist_data, ans)
def test_jumbled_params(self): class MyModel(Module): def __init__(self): super(MyModel, self).__init__() def forward(self, x): y = x * x self.param = Parameter(torch.tensor([2.0])) return y x = torch.tensor([1, 2]) with self.assertRaisesRegex(RuntimeError, "state_dict changed"): verify(MyModel(), x, backend)
def test_jumbled_params(self): class MyModel(Module): def __init__(self): super(MyModel, self).__init__() def forward(self, x): y = x * x self.param = Parameter(torch.Tensor([2])) return y x = Variable(torch.Tensor([1, 2])) with self.assertRaisesRegex(RuntimeError, "state_dict changed"): verify(MyModel(), x, backend)
def assertVerifyExpectFail(self, *args, **kwargs): try: verify(*args, **kwargs) except AssertionError as e: if str(e): # substring a small piece of string because the exact message # depends on system's formatting settings self.assertExpected(str(e)[:60]) return else: raise # Don't put this in the try block; the AssertionError will catch it self.assertTrue(False, msg="verify() did not fail when expected to")
def test_modifying_params(self): class MyModel(torch.nn.Module): def __init__(self): super(MyModel, self).__init__() self.param = torch.nn.Parameter(torch.tensor([2.0])) def forward(self, x): y = x * x self.param.data.add_(1.0) return y x = torch.tensor([1, 2]) verify(MyModel(), x, backend, do_constant_folding=False)
def exportTest(self, model, inputs, rtol=1e-2, atol=1e-7, **kwargs): import caffe2.python.onnx.backend as backend with torch.onnx.select_model_mode_for_export( model, torch.onnx.TrainingMode.EVAL ): graph = torch.onnx.utils._trace(model, inputs, OperatorExportTypes.ONNX) torch._C._jit_pass_lint(graph) verify( model, inputs, backend, rtol=rtol, atol=atol, opset_version=self.opset_version, )
def verify_sig(): try: signature = request.get_json()["signature"] uuid = request.get_json()["uuid"] return verify.verify(uuid, signature) except Exception as e: return data_not_found(e)
def check_copy(src, dst): # check all file is copied and copy is correct. error, err_msg = verify(src, dst, check_hash=True) if error: print(err_msg) return error print('checking complete, everything is ok.')
def assertVerifyExpectFail(self, *args, **kwargs): try: verify(*args, **kwargs) except AssertionError as e: if str(e): # substring a small piece of string because the exact message # depends on system's formatting settings # self.assertExpected(str(e)[:60]) # NB: why we comment out the above check? because numpy keeps # changing the error format, and we have to keep updating the # expect files let's relax this constraint return else: raise # Don't put this in the try block; the AssertionError will catch it self.assertTrue(False, msg="verify() did not fail when expected to")
def testGaussModSpec(self): """ Gaussian objects with model spectra """ import math import axesim import verify # make the simulation axesim.simdispim(incat='input_cat_verifyII.dat', config='verificationConfI.conf', dispim_name='test_verify_GaussModspec.fits', model_spectra='input_flat.spc.fits', exptime=10., bck_flux=0.0, detector=self.detectorFlag, silent=self.silentFlag) # check that the output image exists resultFile = os.path.join(os.environ['AXE_OUTSIM_PATH'], 'test_verify_GaussModspec.fits') self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile) # compute the simulated flux and extract the flux values from the simulated image simFlux, extrVals = verify.verify(os.path.join(os.environ['AXE_OUTSIM_PATH'],'test_verify_GaussModspec.fits'), os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyII.dat'), os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.conf'), inSpec=os.path.join(os.environ['AXE_IMAGE_PATH'],'input_flat.spc.fits')) # go over all objects for index in range(len(simFlux)): # compute the relative difference between the simulated # and the extracted flux relDiff1 = math.fabs(simFlux[index]-extrVals[index]['ave'])/simFlux[index] relDiff2 = math.fabs(simFlux[index]-extrVals[index]['med'])/simFlux[index] relDiff3 = extrVals[index]['maxdev']/simFlux[index] relDiff4 = extrVals[index]['std']/simFlux[index] # make sure the difference is small self.assertLess(relDiff1, 2.0E-05)
def __init__(self, **kwargs): if 'verb' in kwargs: self.verb = kwargs['verb'] self.change = CHANGE self.halve = HALVE self.pool = LIMIT self.reward = REWARD self.diff = DIFFICULTY self.confirm = CONFIRMATION self.utxo = [] self.transactions = [] self.chain = [] self.nodes = [] self.temp_spent = [] try: self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.s.connect(('127.0.0.1', 9168)) ip = get('https://api.ipify.org').text i = 500 sendall(self.s, {'type': 'chain', 'start': 0, 'end': i}) r = recvall(self.s) length, chain = r['length'], r['chain'] v, c = verify(chain).v_chain() if length < i: if not v: print(json.dumps(chain, indent=4)) print('recieved chain was bad, making our own') raise socket.error while i <= length: if not v: print('recieved chain was bad, making our own') raise socket.error i += 500 if i >= length: break sendall(self.s, {'type': 'chain', 'start': i - 500, 'end': i}) r = recvall(self.s) length, chain = r['length'], chain + r['chain'] v, c = c.v_chain(chain) self.chain = chain self.utxo = c.utxo sendall(self.s, { 'type': 'new_node', 'ip': '127.0.0.1', 'port': kwargs['port'] }) r = recvall(self.s) for n in r['nodes']: self.nodes.append(toTuple(n)) print('chain accepted') self.s.close() except WindowsError or socket.error: self.genesis() print('could not find a node, running our own\n')
def scrape_each_artist(profile_link): artist_page = connect(profile_link) artist_soup = BS(artist_page, 'lxml') try: #verifying the artist page verify(artist_soup, 'Artist') except VerificationError: #put the link into a file for later checks with open('invalid-artist-links.txt', 'a') as invalid_artist_file: invalid_artist_file.write(profile_link + '\n') return try: #if there is info about the artist's birthdate birth_date = ' '.join(artist_soup.find('span', id = 'ContentPlaceHolder1_lblBirthInfo').text.split()) except AttributeError: birth_date = None try: #if there is info about the artist's birthplace birth_place = ' '.join(artist_soup.find('span', id = 'ContentPlaceHolder1_lblBirthPlace').text.split()) except AttributeError: birth_place = None try: #if there is info about the artist's death date death_date = ' '.join(artist_soup.find('span', id = 'ContentPlaceHolder1_lblDeathInfo').text.split()) except AttributeError: death_date = None try: #if there is info about the artist's death place death_place = ' '.join(artist_soup.find('span', id = 'ContentPlaceHolder1_lblDeathPlace').text.split()) except AttributeError: death_place = None try: #if there is info about the artist's education education = artist_soup.find('span', id = 'ContentPlaceHolder1_lblEducation').text except AttributeError: education = None try: scrape_past_reg_auctions(birth_date, birth_place, death_date, death_place, education, artist_soup) except AttributeError as no_work_error: #if there is no work at past auctions pass try: scrape_past_LTD_auctions(birth_date, birth_place, death_date, death_place, education, artist_soup) except AttributeError as no_work_error: pass
def check_chain(self): if len(self.chain) >= self.confirm: self.confirm += CONFIRMATION print('Checking Chain....') v, c = verify(self.chain).v_chain() if not v: print('chain is wrong! Quitting') sys.exit() else: print('chain is right\n')
def gverify(ui, repo, **opts): '''verify that a Mercurial rev matches the corresponding Git rev Given a Mercurial revision that has a corresponding Git revision in the map, this attempts to answer whether that revision has the same contents as the corresponding Git revision. ''' ctx = scmutil.revsingle(repo, opts.get('rev'), '.') return verify.verify(ui, repo, ctx)
def main(): mila_URL = "https://www.newdvdreleasedates.com/images/profiles/mila-kunis-01.jpg" leo_URL = "https://specials-images.forbesimg.com/imageserve/558c0172e4b0425fd034f8ba/440x0.jpg?fit=scale&background=000000" # mila = getFaceIdandGender(mila_URL) # leo = getFaceIdandGender(leo_URL) leo = StudentInfo("leo", leo_URL) mila = StudentInfo("mila", mila_URL) compare = verify(mila, leo)
def thread(): db_session = DBSession() solution = db_session.query(Solution).filter( Solution.id == solution_id).first() with Solution._verify_sem: solution.verification = verify.verify( solution.language, solution.source, solution.problem.test_input, solution.problem.test_output, solution.problem.timeout) db_session.add(solution) db_session.commit() db_session.close()
def login(): if request.method == "GET": return render_template("login.html", message="") else: name = request.form["username"].encode("ascii", "ignore") pw = request.form["password"].encode("ascii", "ignore") if verify.verify(name, pw): session['username'] = name redirect(url_for('hidden')) else: return render_template("login.html", message="Invalid Username or Password")
def run(name): root = os.path.dirname(os.path.abspath(__file__)) + os.sep + name + os.sep res = os.system(sys.argv[1] + ' %spatch.dat > nul' % root) if res: print('running for patch.dat for element %s failed!' % name) sys.exit(1) res = verify.verify(name) if res: print('patch test for element %s passed.' % name) else: print('patch test failed for element %s!' % name) sys.exit(1)
def test_something_is_less_than_one_fails(self): algo_result = { "__qpis": { "number_of_args": 4 }, "first_dataframe": [{ "something": 0 }] } results = verify.verify(algo_result) self.assertIsNotNone(results)
def decode(self, m_part): m_ver = [] for i in m_part: if verify(str(i[0][1]), i[1][0], i[1][1], i[1][2], self.bitlen, self.generator, self.prime, self.pub_key): m_ver.append(i[0]) else: print("removing from consideration:") x = [i for i,j in m_ver] y = [j for i,j in m_ver] bits = [round(i) for i in polyfit(x, y, self.k-1)] print(bits) return strdecode(bits)
def bond_redeem(bond, address): """ Given a bond and an address, verifies the bond and then adds the bond and address to the RedeemerDB for later redemption. Returns True on success, raises exception on error. """ if not valid_address(address): raise rpc_lib.RPCException("Invalid address.") if not bond_sane(bond): raise rpc_lib.RPCException("Bond not sane.") if not verify.verify(bond=bond): raise rpc_lib.RPCException("Invalid bond.") if RedeemerDB.try_to_redeem(bond=bond, address=address): return True # Success! else: raise rpc_lib.RPCException("Bond already used.")
def token_parse(stok): stoklen = len(stok) pad = ['', '===', '==', '='][stoklen % 4] tok2 = (stok + pad).decode('base64') i = 0 while i < len(tok2) and tok2[i] >= '0' and tok2[i] <= '9': i += 1 end = int(tok2[0:i]) + i jtok = json.loads("{%s}" %(tok2[i:end])) o = 0 if jtok.has_key('o'): o = jtok['o'] vdata = tok2[i:end + o] sig = tok2[end + o:] if not verify.verify(vdata, sig): raise Exception("Verification failed.") if o > 0: return jtok, tok2[end: end + o] return jtok, None
def token_parse(stok): stoklen = len(stok) pad = ['', '===', '==', '='][stoklen % 4] tok2 = (stok + pad).decode('base64') i = 0 while i < len(tok2) and tok2[i] >= '0' and tok2[i] <= '9': i += 1 end = int(tok2[0:i]) + i jtok = json.loads("{%s}" % (tok2[i:end])) o = 0 if jtok.has_key('o'): o = jtok['o'] vdata = tok2[i:end + o] sig = tok2[end + o:] if not verify.verify(vdata, sig): raise Exception("Verification failed.") if o > 0: return jtok, tok2[end:end + o] return jtok, None
def test_baidu_search(self): '''百度搜索设置''' self.driver.get("https://www.baidu.com") #打开百度主页 setUp = findmethod(self, "link_text", u"设置") ActionChains(self.driver).move_to_element(setUp).perform() #鼠标悬浮设置 search = findmethod(self, "link_text", u"搜索设置") search.click() #点击搜索设置 sleep(1) select = findmethod(self, "id", "nr") select.click() sleep(1) Select(select).select_by_visible_text("每页显示20条") #选择每页显示20条 save = findmethod(self, "link_text", u"保存设置") save.click() #点击保存设置 alert = self.driver.switch_to_alert().text a = verify.verify(self.driver, alert, u"已经记录下您的使用偏") sleep(5) print(a) # self.assertEqual(u"已经记录下您的使用偏好",alert) self.driver.switch_to_alert().accept() sleep(3)
def testModImgSpec_tips(self): """ Model images with input spectra at TIPS level """ import math import axesim import verify inCat = os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyIV.fits') inSpc = os.path.join(os.environ['AXE_IMAGE_PATH'],'input_flat.spc.fits') inThm = os.path.join(os.environ['AXE_IMAGE_PATH'],'galaxyThumbs.fits') obs = tips.Observation(inCat, inSpc, inCatForm='TIPS', inSpcForm='aXeSIM', norm=True, inThmDir=inThm) obs.loadFromFile(os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.fits')) obs.runSimulation(workDir=self.tipsDir) # check that the output image exists resultFile = os.path.join(self.tipsDir,'OUTSIM', 'input_cat_verifyIV_WFC3_IR_00_v1_verify_d300914_IMG.fits') self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile) axesimCat = os.path.join(self.tipsDir,'DATA','input_cat_verifyIV_WFC3_IR_00.cat') verify.getInitialModIndex(inCat, axesimCat) # compute the simulated flux and extract the flux values from the simulated image simFlux, extrVals = verify.verify(os.path.join(self.tipsDir,'OUTSIM','input_cat_verifyIV_WFC3_IR_00_v1_verify_d300914_IMG.fits'), axesimCat, os.path.join(self.tipsDir,'CONF','WFC3_IR_00_v1_verify_d300914.conf'), inSpec=inSpc, inModel=inThm) # go over all objects for index in range(len(simFlux)): # compute the relative difference between the simulated # and the extracted flux relDiff1 = math.fabs(simFlux[index]-extrVals[index]['ave'])/simFlux[index] relDiff2 = math.fabs(simFlux[index]-extrVals[index]['med'])/simFlux[index] relDiff3 = extrVals[index]['maxdev']/simFlux[index] relDiff4 = extrVals[index]['std']/simFlux[index] # make sure the difference is small self.assertLess(relDiff1, 2.0E-05)
def verify(repo): """verify the consistency of a repository""" ret = verifymod.verify(repo) # Broken subrepo references in hidden csets don't seem worth worrying about, # since they can't be pushed/pulled, and --hidden can be used if they are a # concern. # pathto() is needed for -R case revs = repo.revs("filelog(%s)", util.pathto(repo.root, repo.getcwd(), ".hgsubstate")) if revs: repo.ui.status(_("checking subrepo links\n")) for rev in revs: ctx = repo[rev] try: for subpath in ctx.substate: ret = ctx.sub(subpath).verify() or ret except Exception: repo.ui.warn(_(".hgsubstate is corrupt in revision %s\n") % node.short(ctx.node())) return ret
def verify(repo): """verify the consistency of a repository""" ret = verifymod.verify(repo) # Broken subrepo references in hidden csets don't seem worth worrying about, # since they can't be pushed/pulled, and --hidden can be used if they are a # concern. # pathto() is needed for -R case revs = repo.revs("filelog(%s)", util.pathto(repo.root, repo.getcwd(), '.hgsubstate')) if revs: repo.ui.status(_('checking subrepo links\n')) for rev in revs: ctx = repo[rev] try: for subpath in ctx.substate: ret = ctx.sub(subpath).verify() or ret except Exception: repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') % node.short(ctx.node())) return ret
def verify(repo): """verify the consistency of a repository""" return verifymod.verify(repo)
def exportTest(self, model, inputs, rtol=1e-2, atol=1e-7): trace = torch.onnx.utils._trace(model, inputs, OperatorExportTypes.ONNX) torch._C._jit_pass_lint(trace.graph()) verify(model, inputs, backend, rtol=rtol, atol=atol)
data_value = (str.encode("Initial block")) block_data_format = struct.Struct('14s') packed_head_values = block_head_format.pack(*head_values) packed_data_values = block_data_format.pack(data_value) curr_block_head = block_head._make( block_head_format.unpack(packed_head_values)) curr_block_data = block_data._make( block_data_format.unpack(packed_data_values)) # print(curr_block_head) # print(curr_block_data) fp = open(file_path, 'wb') fp.write(packed_head_values) fp.write(packed_data_values) fp.close() # Initiated prev_hash = hashlib.sha1(packed_head_values + packed_data_values).digest() else: # Verify count = 0 # Number of Transactions block_chain_state = "CLEAN" # CLEAN or ERROR verify(file_path) # display(file_path) #For trial and error purpose sys.exit(0)
def scrape_all_works(birth_date, birth_place, death_date, death_place, education, past_auctions_link, work_type): #append to the csv file all_works_page = connect( past_auctions_link ) #go to the page of all the works from past auctions all_works_soup = BS(all_works_page, 'lxml') try: verify(all_works_soup, 'Search Results') except VerificationError: with open('invalid-all-works-links.txt', 'a') as invalid_all_works_file: invalid_all_works_file.write(past_auctions_link + '\n') raise VerificationError ''' If the page fails the verification test, we put the link into a file for later checks and raise a VerificationError. This error is handled by scrape_past_reg_auctions() or scrape_past_LTD_auctions() (see scrapeEachArtist.py). This means the first four works from past auctions have already been scraped >>> Beware of duplicates! ''' try: #finding number of pages num_of_pages = int(all_works_soup.find_all('dd')[-1].text.strip('…')) except ValueError: #if there is only 1 page num_of_pages = 1 csv_file = open('saffronart.csv', 'a') csv_writer = csv.writer(csv_file) scrape_each_work_mappings = { 'scrape_each_reg_work': scrape_each_reg_work, 'scrape_each_LTD_work': scrape_each_LTD_work } scrape_each_work = scrape_each_work_mappings['scrape_each_' + work_type + '_work'] #get the correct scrape_each_work function from work_type for page in range(1, num_of_pages + 1): page_content = connect(past_auctions_link, {'pu': page}) page_soup = BS(page_content, 'lxml') works_list = page_soup.find_all( 'a', text='Details') #list of all the works in the page for work in works_list: work_link = work['onclick'].split("'")[1] try: (artist_name, title, winning_bid, lo_est, hi_est, auction_name, auction_date, category, style, provenance, exhibition, details) = scrape_each_work(work_link) csv_writer.writerow([ artist_name, birth_date, birth_place, death_date, death_place, education, title, winning_bid, lo_est, hi_est, auction_name, auction_date, category, style, provenance, exhibition, details ]) except TypeError: continue csv_file.close()
def apply(self, fn): if config.print_transform_timings: start_time = time.time() transform_name = self.__class__.__name__ if config.print_transform_names: print "-- Running %s on %s" % (transform_name, fn.name) if config.print_functions_before_transforms == True or \ (isinstance(config.print_functions_before_transforms, list) and transform_name in config.print_functions_before_transforms): print print "Running transform %s" % transform_name print "--- before ---" print repr(fn) print self.fn = fn self.type_env = fn.type_env # push an extra block onto the stack just in case # one of the pre_apply methods want to put statements somewhere self.blocks.push() pre_fn = self.pre_apply(self.fn) pre_block = self.blocks.pop() if pre_fn is not None: fn = pre_fn self.fn = fn self.type_env = fn.type_env # pop the outermost block, which have been written to by new_body = self.transform_block(fn.body) if len(pre_block) > 0: new_body = pre_block + new_body fn.body = new_body fn.type_env = self.type_env self.blocks.push() new_fn = self.post_apply(fn) post_block = self.blocks.pop() if new_fn is None: new_fn = fn if len(post_block) > 0: new_fn.body = new_fn.body + post_block if config.print_functions_after_transforms == True or \ (isinstance(config.print_functions_after_transforms, list) and transform_name in config.print_functions_after_transforms): print print "Done with %s" % transform_name print "--- after ---" print repr(new_fn) print if self.verify: try: verify.verify(new_fn) except: print "ERROR after running %s on %s" % (transform_name , new_fn) raise if config.print_transform_timings: end_time = time.time() c = self.__class__ total_time = transform_timings.get(c, 0) transform_timings[c] = total_time + (end_time - start_time) transform_counts[c] = transform_counts.get(c, 0) + 1 return new_fn
untouched = [l for l in g.neighbors(node) if l not in touched] score = len(untouched) maxAdd = 0 for neighbor in untouched: u = [l for l in g.neighbors(neighbor) if l not in touched] maxAdd = max(maxAdd, (len(u) - 1) * 0.5) score += maxAdd return score if __name__ == "__main__": if len(sys.argv) <= 1: print "Input graph required" else: heur = "oracle" if len(sys.argv) > 2: heur = sys.argv[2] """Read Graph in""" g = nx.read_adjlist('graphs/' + sys.argv[1] + '.adjlist') print "Solving", sys.argv[1], "with", heur+"Heuristic" """Solve For Graph with greedy algorithm""" solution = solve(g, heuristic=eval(heur + "Heuristic"), alg="greedy") print "Size of solution:",len(solution) print "Solution:",solution """Verify the correctness of the solution""" if verify(g, solution): print "Valid!" else: print "Invalid"
def pending_work(): conn = sqlite3.connect(os.path.join(carpetbag_root, 'carpetbag.db')) pending = list(conn.execute("SELECT id, srcpkg FROM jobs WHERE status = 'pending'"))[:1] for jobid, name in pending: built = False valid = None build_logfile = None # start logging (of this thread only) to job logfile this_thread = threading.get_ident() def threadFilter(record): return (record.thread == this_thread) job_logfile = os.path.join('/var/log/carpetbag', '%d.log' % jobid) fh = logging.FileHandler(job_logfile, mode='w') fh.addFilter(threadFilter) logging.getLogger().addHandler(fh) logging.info('jobid %d: processing %s' % (jobid, name)) # reldir = os.path.dirname(name) outdir = tempfile.mkdtemp(prefix='carpetbag_') indir = os.path.join(UPLOADS, reldir) # update in database conn.execute("UPDATE jobs SET status = ?, log = ?, start_timestamp = ? WHERE id = ?", ('in-progress', job_logfile, datetime.datetime.now(), jobid)) conn.commit() status = 'exception' try: arch = name.split(os.sep)[0] srcpkg = os.path.join(UPLOADS, name) # examine the source package package = analyze(srcpkg, indir) if package.kind: # build the packages build_logfile = os.path.join('/var/log/carpetbag', 'build_%d.log' % jobid) built = build(srcpkg, os.path.join(outdir, arch, 'release'), package, jobid, build_logfile, arch) if built: # verify built package valid = verify(indir, os.path.join(outdir, reldir)) # one line summary of this job logging.info('jobid %d: processed %s, build %s, verify %s' % (jobid, name, color_result(built), color_result(valid))) # clean up if not debug: logging.info('removing %s' % outdir) shutil.rmtree(outdir) logging.info('removing %s' % indir) shutil.rmtree(indir) status = 'processed' except: logging.exception('') raise finally: # stop logging to job logfile logging.getLogger().removeHandler(fh) # update in database conn.execute("UPDATE jobs SET status = ?, buildlog = ?, built = ?, valid = ?, end_timestamp = ? WHERE id = ?", (status, build_logfile, built, valid, datetime.datetime.now(), jobid)) conn.commit()
import random from generate import generate from sign import sign from verify import verify length = int(input("Enter number of bits: ")) generator, prime = generate(length) print(generator, prime) # Key generation pri_key = random.randint(1, prime - 1) #private key print("Private Key =", pri_key) pub_key = pow(generator, pri_key, prime) #public key print("Public Key =", pub_key) message = input("Enter message text: ") z, c, t = sign(message, pri_key, length, generator, prime) print(verify(message, z, c, t, length, generator, prime, pub_key))
for ptfield in ptform_xml.xpath('//field[field_attributes/cid_mapping]'): if ptfield.attrib['id'] in alist: continue altid = (ptfield.xpath("./field_attributes/image/refined_image/alternate_field_id/@value") or [''])[0] cid = (ptfield.xpath("./field_attributes/cid_mapping/formml_field_id/@value") or [''])[0] f.write("\t<tps id='{0}' type='{1}' {2} {3}/>\n".format(ptfield.attrib['id'], (ptfield.xpath("./field_attributes/main/type/@value") or [''])[0], "" if not altid else "altid='%s' " % altid.upper(), "" if not cid else "cid='%s' " % cid )) f.write("</Document>") verify(settings, mefform, output, documentName) with open('output/%s/%s.xml' % (settings.get('mefformset'),mefform), 'w') as f: f.write(get_header(settings.get("mefformset"), mefform)) for each in output.getchildren(): if 'tps' in each.attrib: each.attrib.pop('tps') if 'func' in each.attrib: each.attrib.pop('func') if 'field_on_value' in each.attrib: each.attrib.pop('field_on_value') f.write('\t' + etree.tostring(each) + '\n') f.write("</Document>")
##### INPUT CHECKING ###### # only one of the three options must be set if not (options.install ^ options.create ^ options.verify) or not (options.install or options.create or options.verify): print parser.get_usage() sys.exit(1) # filter options that work only with install if not options.install and (options.force or options.confs or options.repos): print parser.get_usage() sys.exit(2) # create, install and verify takes at least one argument if (options.create and len(args) != 1) or (options.install and len(args) < 2) or (options.verify and len(args) == 0): print parser.get_usage() sys.exit(3) ##### EXECUTE ####### if options.install: wconf = args[0] rconfs = split_args(options.confs) repos = split_args(options.repos) packages = args[1:] install.install(packages, repos, rconfs, "/", wconf, options.force) if options.create: create.create(args[0]) if options.verify: verify.verify(args)