def test_simply_supported_beam_unequal_non_symmetric_loads(): """Simple beam: Two unequal concentrated loads non-symmetrically placed load case 11 """ P1 = -900 P2 = -1200 a = L / 4 # location of first load from right b = L / 5 # location of second load from left R1 = -(P1 * (L - a) + P2 * b) / L R2 = -(P1 * a + P2 * (L - b)) / L M1 = R1 * a x = L / 2 Mx = R1 * x + P1 * (x - a) M2 = R2 * b p = [ PointLoad(magnitude=P1, location=a), PointLoad(magnitude=P2, location=L - b) ] r = [PinnedReaction(x) for x in [0, L]] beam = Beam(length=L, loads=p, reactions=r, E=E, Ixx=Ixx) beam.solve() # verify reactions for m, loc in zip((M1, Mx, M2), (a, L / 2, L - b)): # assert approx(beam.reactions[0].value[0], rel=1e-4) == R1*1.25 validate(beam, loc=loc, R=[(R1, 0), (R2, 0)], M_loc=m, d_loc=None)
def msls_whitenapply(params, m, P, dimensions=[2048, 1024, 512, 256, 128, 64, 32]): cities = msls_cities[params.subset] features_dir ="results/" + params.dataset+"/"+params.subset+"/" if not os.path.exists(features_dir): os.makedirs(features_dir) for d in dimensions: result_file=features_dir+params.name+"_retrieved_whiten_"+str(d)+".csv" f=open(result_file, "w+") f.close() for c in cities: db_file=features_dir+params.name+"_"+c+"_mapfeats.npy" q_file=features_dir+params.name+"_"+c+"_queryfeats.npy" ds_folder=params.subset if params.subset=="test" else "train_val" q_idx_file=params.root_dir+ds_folder+"/"+c+"/query.json" m_idx_file=params.root_dir+ds_folder+"/"+c+"/database.json" m_raw_file=params.root_dir+ds_folder+"/"+c+"/database/raw.csv" q_raw_file=params.root_dir+ds_folder+"/"+c+"/query/raw.csv" mapquery_whitenapply(params.dataset, params.name, params.root_dir, params.subset, db_file, q_file, m, P,m_idx_file=m_idx_file,q_idx_file=q_idx_file, m_raw_file=m_raw_file, dimensions=dimensions) if params.subset =="val": for d in tqdm(dimensions): result_file=features_dir+params.name+"_retrieved_whiten_"+str(d)+".csv" validate(result_file, params.root_dir, result_file.replace("retrieved", "result").replace(".csv", ".txt"))
def test_simply_supported_beam_offset_load(): """simple beam - concentrated load at arbitrary points Load case 8 """ locations = [2, 3, 5, 7, 8] for location in locations: # Exact setup a = location b = L - a R1 = -P * b / L R2 = -P * a / L M_loc = -P * a * b / L # moment at load d_loc = P * a**2 * b**2 / (3 * EI * L) # deflection at load # numerical result beam = Beam( L, loads=[PointLoad(P, location)], reactions=[PinnedReaction(x) for x in [0, L]], E=E, Ixx=Ixx, ) beam.solve() # verify reactions validate(beam, loc=location, R=[(R1, 0), (R2, 0)], M_loc=M_loc, d_loc=d_loc)
def main(): filename = sys.argv[1] output_filename = sys.argv[2] data = load_file(filename) output = optimize(data) validate(data, output) export(output_filename, output)
def validate_mdl(filename_v17_mdl, filename_v19_mdl, target_version): """ Validates provided generated MDL against XSLT conversion of provided starting MDL. :param filename_v17_mdl: The v0_8_17 MDL file. :param filename_v19_mdl: The v0_8_19 MDL file. :param target_version: Indicates which of the files provided was generated vs. source of generation. """ # figure out whether we're going to 19 or to 17 and set vars accordingly filename_to_transform = filename_v17_mdl filename_xslt = V17_TO_V19_XSLT filename_xsd = V19_FILENAME_XSD filename_xml = filename_v19_mdl if target_version == 17: filename_to_transform = filename_v19_mdl filename_xslt = V19_TO_V17_XSLT filename_xsd = V17_FILENAME_XSD filename_xml = filename_v17_mdl # perform transformation and pass values off to be validated xml_doc = etree.parse(filename_to_transform) xslt_doc = etree.parse(filename_xslt) xslt = etree.XSLT(xslt_doc) transformed_doc = xslt(xml_doc) transformed_str = strip_default_ns(str(transformed_doc)) transformed_file = StringIO(transformed_str) # invoke validation validate(filename_xsd, filename_xml, transformed_file)
def main(opt): """ Main process of test.py """ # Load Model model = utils.loadModel(opt.checkpoint, ImproveNet(opt.rb), dataparallel=True) device = utils.selectDevice( ) if opt.cuda and torch.cuda.is_available() else 'cpu' if opt.normalize: model = nn.Sequential( model, InverseMeanShift(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])).to(device) # Inference the images on the test set predict(opt, model, opt.hazy, opt.dehazy, device) # Measure the performance on the validation set # predict(opt, net, opt.hazy, opt.dehazy, device) # Measure the performance on the test set gts = sorted(utils.load_all_image(opt.gt)) dehazes = sorted(utils.load_all_image(opt.dehazy)) if opt.record is None: validate(dehazes, gts) if opt.record is not None: validate(dehazes, gts, os.path.join(opt.dehazy, opt.record)) return
def handle_upload(self, blob): if self.file is None: if len(blob) == 0: # No file is uploaded self.close() return self.file = NamedTemporaryFile(mode="wb", dir=TEMP_PATH) if len(blob) != 0: self.file.write(blob) return self.file.flush() try: tar = tarfile.open(self.file.name, mode='r:gz') validate(self.metadata, tar) tar.close() except: tar.close() raise # And push it into the database and such add_content(self.user, self.metadata, self.file.name, self.db_conn) # Only close the file afterwards as this close removes the file self.file.close() self.file = None self.send("all okay, content uploaded into bananas") self.close()
def test_validate_errors_with_no_files(capsys): with pytest.raises(SystemExit) as pytest_e: sys.argv = [""] validate() out, err = capsys.readouterr() assert "No files given to process" in err assert [pytest_e.type, pytest_e.value.code] == [SystemExit, 1]
def run(participants, output_folder: str = __DEFAULT_OUTPUT_FOLDER__) -> None: create_output_folder(output_folder) groups = create_groups(participants) for group in groups: assign_names(group, output_folder) validate(participants, output_folder) print(f"Picks complete. See {output_folder}/*.txt")
def get_options(): parser = argparse.ArgumentParser() parser.add_argument("-e", "--epochs", type=int, dest="epochs", help="number of epochs", default=100) parser.add_argument("-lr", type=float, dest="lr", help="learning rate", default=0.001) #parser.add_argument("-d","--decay",type=float,dest="decay",help="weight decay",default=0.005) #parser.add_argument("-m","--momentum",type=float,dest="momentum",help="learning momentum",default=0.9) parser.add_argument("--display", action='store_true') parser.add_argument("--save", action='store_true') parser.add_argument("--load", action='store_true') parser.add_argument("--eval", action='store_true') parser.add_argument("--validate", action='store_true') args = parser.parse_args() #train(args.epochs, args.lr, args.momentum, args.decay, args.display) if args.eval: evaluate() elif args.validate: validate(args.display) else: train(args.epochs, args.lr, args.display, save=args.save, load=args.load)
def main(filename, flags): gc = Gedcom(filename) gc.displayOutput(flags) print( "\n\n\n========================= Error informations ==========================\n" ) validate.validate(gc) return 0
def inspectBoard(move, b, heading, pieces): y = move[0] x = move[1] player = move[2] compass = { # visual of compass heading on grid # 0,1,2 # 7,*,3 # 6,5,4 0: (y - 1, x - 1, player), 1: (y - 1, x, player), 2: (y - 1, x + 1, player), 3: (y, x + 1, player), 4: (y + 1, x + 1, player), 5: (y + 1, x, player), 6: (y + 1, x - 1, player), 7: (y, x - 1, player) } if (heading is None): heading = range(len(b)) for h in heading: target = compass[h] ## check for array out of bounds if validate(target, b): dst_sq = b[target[0]][target[1]] else: continue if (dst_sq == player): flipColors(b, pieces) elif (dst_sq == '-'): pieces = [] else: pieces.append(target) inspectBoard(target, b, h, pieces) else: target = compass[heading] ## check for array out of bounds if validate(target, b): dst_sq = b[target[0]][target[1]] if (dst_sq == player): flipColors(b, pieces) elif (dst_sq == '-'): pieces = [] else: pieces.append(target) inspectBoard(target, b, heading, pieces) return b, pieces
def main(): net = Network() train( net=net, criterion=nn.CrossEntropyLoss(), optimizer=optim.RMSprop(net.parameters(), lr=0.001), num_epochs=NUM_EPOCHS ) validate(net)
def test_validate_fails_invalid_json(prep_data, capsys): with pytest.raises(SystemExit) as pytest_e: sys.argv = ["", os.path.join(prep_data["dir"], "invalid_json.log")] validate() out, err = capsys.readouterr() assert "invalid_json.log: invalid" in err assert not os.path.isfile( os.path.join(prep_data["dir"], "invalid.log.validated")) assert [pytest_e.type, pytest_e.value.code] == [SystemExit, 0]
def run_test_scenario(scenario_dir: str, output: str, endpoint: str): """Run the test scenario identified and store the results""" validate(scenario_dir, endpoint) scenario_name = scenario_dir.split("/")[1] os.rename(join(scenario_dir, "result.json"), join(output, "%s.json" % scenario_name)) os.rename(join(scenario_dir, "m1.png"), join(output, "%s_m1.png" % scenario_name))
def get_cfg( cfile=TEST_FILE, verbose=False, template_vars=[], template_vars_file=None ): global cfg if not cfg: cfg = parse( cfile, verbose=verbose, template_vars=template_vars, template_vars_file=template_vars_file ) validate( cfg, SPEC ) cfg = expand( cfg, SPEC ) return cfg
def MusicXML_tree_to_MusicXML_file(tree, filename=None): assert ElementTree.iselement(tree) output_encoding = config.get("xml_encoding") writer = FileWriter(filename, output_encoding) writer.write("<?xml version='1.0' encoding='%s'?>\n" % output_encoding) writer.write("<!-- %s -->\n" % created) writer.write('<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN" "http://www.musicxml.org/dtds/partwise.dtd">\n') ElementTree.ElementTree(tree).write(writer) if config.get("dtd"): validate(filename)
def test_validate_will_not_overwrite(prep_data, capsys): shutil.copy( os.path.join(prep_data["dir"], "valid.log"), os.path.join(prep_data["dir"], "valid.log.validated"), ) with pytest.raises(SystemExit) as pytest_e: sys.argv = ["", os.path.join(prep_data["dir"], "valid.log")] validate() out, err = capsys.readouterr() assert "valid.log: valid" not in err assert [pytest_e.type, pytest_e.value.code] == [SystemExit, 0]
def test_validate_respects_dry_run(prep_data, capsys): with pytest.raises(SystemExit) as pytest_e: sys.argv = [ "", os.path.join(prep_data["dir"], "valid.log"), "--dry-run" ] validate() out, err = capsys.readouterr() assert "valid.log: valid" in err assert not os.path.isfile( os.path.join(prep_data["dir"], "valid.log.validated")) assert os.path.isfile(os.path.join(prep_data["dir"], "valid.log")) assert [pytest_e.type, pytest_e.value.code] == [SystemExit, 0]
def train(num_epochs, model, dataloader, optimizer, loss_fn, number_of_showed_predictions=0, is_cuda_available=False, validate_frequency=1): best_acc = 0 images_to_show = [] predictions = [] ground_truths = [] if is_cuda_available: model.cuda() for epoch in range(num_epochs): model.train() train_acc = 0. train_loss = 0. for data in tqdm.tqdm(dataloader): image, labels = data #print(image.size()) optimizer.zero_grad() if is_cuda_available: image = (torch.autograd.Variable(image).cuda()) labels = (torch.autograd.Variable(labels).cuda()) output = model(image) loss = loss_fn(output, labels) loss.backward() optimizer.step() train_loss += loss.cpu().item() prediction = torch.argmax(output, dim=1) for pred, lab in zip(list(prediction), list(labels)): print("PREDICTED: {}, GT: {}".format( ODIRDataset.Pathology(pred.item()).name, ODIRDataset.Pathology(lab.item()).name)) #print("Predicted: {}".format("AMD" if bool(prediction) else "nonAMD")) train_acc += torch.sum(prediction == labels) torch.cuda.empty_cache() if epoch % validate_frequency == 0: validate(model, dataloader, loss_fn, is_cuda_available) print("Epoch {}, Train Accuracy: {} , Train Loss: {}.".format( epoch, train_acc, train_loss))
def test_validate_fails_duplicate_id(prep_data, capsys): with pytest.raises(SystemExit) as pytest_e: sys.argv = [ "", os.path.join(prep_data["dir"], "duplicate.log"), "--verbose" ] validate() out, err = capsys.readouterr() assert "Duplicate" in err assert "duplicate.log: invalid" in err assert not os.path.isfile( os.path.join(prep_data["dir"], "duplicate.log.validated")) assert [pytest_e.type, pytest_e.value.code] == [SystemExit, 0]
def get_cfg(cfile=TEST_FILE, verbose=False, template_vars=[], template_vars_file=None): global cfg if not cfg: cfg = parse(cfile, verbose=verbose, template_vars=template_vars, template_vars_file=template_vars_file) validate(cfg, SPEC) cfg = expand(cfg, SPEC) return cfg
def test_validate_handles_success_and_failure(prep_data, capsys): with pytest.raises(SystemExit) as pytest_e: sys.argv = [ "", os.path.join(prep_data["dir"], "valid.log"), os.path.join(prep_data["dir"], "duplicate.log"), os.path.join(prep_data["dir"], "invalid_json.log"), ] validate() out, err = capsys.readouterr() assert ": invalid" in err assert ": valid" in err assert [pytest_e.type, pytest_e.value.code] == [SystemExit, 0]
def validate_params(self, params): validator = validate.validate() if len(params.keys()) == 0: self.help() return False # # I am a blank canvas # valid = {'output': 'png'} # # La la la - I can't hear you # if not params.has_key('method'): params['method'] = ['center'] # # Everyone needs a provider... # try: validator.ensure_args(params, ('provider', )) except Exception, e: self.error(101, e) return False
def test_plate_validation_british(self): valid = validate() valid_in = 'YS56 Bhc' #british number plat result0, result1 = valid.plate(valid_in) valid_in = 'YS56 BHC' #Change to upper case becuase the function does self.assertEqual(valid_in, result0) self.assertFalse(result1)
def main(): # build target system targetSys = buildSystem(modelFile) makeOTA(targetSys, filePath, '/results/targetSys') # get prior information required for learning with open(preconditionFile, 'r') as fr: information = json.load(fr) inputs = information["inputs"] upperGuard = information["upperGuard"] epsilon = information["epsilon"] # accuracy delta = information["delta"] # confidence stateNum = information["stateNum"] # pac learning OTA startLearning = time.time() print("********** start learning *************") comparatorFlag = True learnedSys, mqNum, eqNum, testNum = learnOTA(targetSys, inputs, upperGuard, epsilon, delta, stateNum, comparatorFlag, debugFlag=False) endLearning = time.time() # verify model quality correctFlag, passingRate = validate(learnedSys, targetSys, upperGuard, stateNum, eqNum, delta, epsilon) # learning result if learnedSys is None: print("Error! Learning Failed.") return {"result": "Failed"} else: print("---------------------------------------------------") print("Learning Succeed! The result is as follows.") learnedSys = structSimpleHypothesis(learnedSys) makeLearnedOTA(learnedSys, filePath, '/results/learnedSys_' + str(i)) print("Total time of learning: " + str(endLearning - startLearning)) print("Total number of MQs (no-cache): " + str(mqNum)) print("Total number of EQs (no-cache): " + str(eqNum)) print("Total number of test (no-cache): " + str(testNum)) print('accuracy', str(1 - epsilon), ' passingRate', str(passingRate)) trans = [] for t in learnedSys.trans: trans.append([str(t.tranId), str(t.source), str(t.input), t.showGuards(), str(t.isReset), str(t.target)]) resultObj = { "result": "Success", "learningTime": endLearning - startLearning, "mqNum": mqNum, "eqNum": eqNum, "testNum": testNum, "passingRate": passingRate, "correct": correctFlag, "learnedState": len(learnedSys.states), "Model": { "inputs": learnedSys.inputs, "states": learnedSys.states, "initState": learnedSys.initState, "acceptStates": learnedSys.acceptStates, "sinkState": learnedSys.sinkState, "trans": trans } } return resultObj
def validate_params (self, params) : validator = validate.validate() if len(params.keys()) == 0 : self.help() return False # # I am a blank canvas # valid = {'output' : 'png'} # # La la la - I can't hear you # if not params.has_key('method') : params['method'] = ['center'] # # Everyone needs a provider... # try : validator.ensure_args(params, ('provider',)) except Exception, e : self.error(101, e) return False
def webhook(): res = request.get_json(silent=True) event = checkEvent(res).get_event() menu = menusFB(url_send,os.environ['TOKEN'],res,usuarios_faltantes) req_backend = reqsbackend() validate_obj = validate(url_send,os.environ['TOKEN'],res) print res print event if event is 'postback': print event postback_obj = postback(res,menu,validate_obj) postback_obj.derivar_postback() elif event is 'message': if menu.guardar_y_solicitar_dato(): menu.pedirDato() if menu.contieneTexto('menu'): menu.menu_principal() elif event is 'optin': if not req_backend.existeUser(res['sender']['id']): url = url_user_datos.replace('<USER_ID>', res['sender']['id']) + os.environ['TOKEN'] datos_user = request.get(url) datos_user['user_fb'] = res['sender']['id'] req_backend.crearUser(datos_user) else: print "error event" return "asd"
def test_project(self): repo = models.Repository() user = repo.users[user_to_test] project = user.projects[project_to_test] l = validate(project) pprint(l) assert len(l) > 0
def on_epoch_end(self, epoch, logs={}): print('logging!') print(type(self.model)) valTr, valRo, _ = validate(self.model) with open(historyloglocation, "a+") as f: f.write('{},{},{},{}\n'.format(logs.get('val_loss'), logs.get('loss'), valTr, valRo))
def test(): print('Running Test..') message = identify(validate(request())) if(message != False): print(message) else: print('Invalid IP Address! Try Again!')
def validation(cluster_labels, distances): validation_results = {} for method_types, labels in cluster_labels.items(): ratio = validate.validate(labels, distances) validation_results[method_types] = ratio return validation_results
def initialize(self, f, n, bounds): """ Description of the minimization problem :param 'f': the name of the python function where the cost function\ is implemented (type: str) :param 'n': the number of optimization variables (type: int) :param 'bounds': a numpy array for the lower and upper bounds of\ optimization variables (type: int) :return: error code = 0 if successful (type: int) """ # Test the arguments error = validate(f, n, bounds, self.verbose) if error: if self.verbose: print('--> genop Error!') print('\tIncorrect arguments.') return -1 # Save all the information self.function = f self.nvar = n self.bounds = bounds # Initialize the population and the cost function self._pop = np.zeros((self.popsize, self.nvar)) self._fvalpop = np.zeros((self.popsize, 1)) # Job done self.__ready = True return 0
def main(): parser = argparse.ArgumentParser(description="Remove duplicate exception aliases.") parser.add_argument('file', metavar='FILE', type=str, help="File to remove duplicates from.") parser.add_argument('--output', dest='output', action='store', default='FILE', help="File to write result to. (default: %(default)s)") parser.add_argument('--write', dest='write', action='store_true', help="Disable prompt, write to OUTPUT file automatically") args = parser.parse_args() if args.output == 'FILE': args.output = args.file print "file", '"' + args.file + '"' print "output", '"' + args.output + '"' print "write", args.write print if not os.path.exists(args.file): print "ERROR: file", '"' + args.file + '"', "does not exist" return data = remove_duplicates(args.file) output_original = get_text(args.output) data_len = len(data.replace('\n', '').replace('\r', '')) output_original_len = len(output_original.replace('\n', '').replace('\r', '')) print abs(data_len - output_original_len), "bytes changed" if data_len - output_original_len == 0: print "nothing to save" return print 'validating data:', if not validate(data, trace=True): return if not args.write: args.write = raw_input('Write to "' + args.output + '"? [no]: ') == 'yes' if args.write: f = open(args.output, 'wb') f.write(data) f.close() print "done" else: print 'not writing data'
def authR(self,args): cookie,key = args.split(' ') if not self.user or not self.cookie: self.error('unexpectedMessage','Your software seems to be malfunctioning<br />Technical note: it sent message authR without a previous authQ') elif cookie != self.cookie: self.error('unexpectedMessage','Your software seems to be malfunctioning<br />Technical note: It replied with a different cookie') elif not validate(_Authentication_Method,self.cookie,self.user.passwd,key): self.error('authenticationError','Authentication error. Please check your user name and password') else: self.authenticated=True for p in _players: p.userStatus(self.name,userstatus.ONLINE)
def gof(p): para = [('vMax', p[0]), ('aMax', p[1]), ('bMax', p[2]), ('lCar', p[3]), ('sigA', p[4]), ('tTau', p[5])] print('# simulation with:', *["%s:%.3f" % i for i in para]) fType = open('data/input_types.add.xml', 'w') fType.write(('<routes>\n <vType accel="%(aMax)s" decel="%(bMax)s" id="pass"' + ' length="%(lCar)s" minGap="2.5" maxSpeed="%(vMax)s"' + ' sigma="%(sigA)s" tau="%(tTau)s" />\n</routes>') % dict(para)) fType.close() result = validate.validate(checkBinary('sumo')) print('#### yields rmse: %.4f' % result) print("%s %s" % (" ".join(["%.3f" % pe for pe in p]), result), file=fpLog) fpLog.flush() return result
def calculateAshGlobally(): tried, succeded = 0, 0 for e in dao.ixr_getEntriesWithoutAsh(5000000): r = validate.validate(e, propagate_exceptions=False) tried += 1 if r['success']: ash = yacpdb.entry.ash(e) dao.ixr_updateEntryAsh(e["id"], ash) succeded += 1 print "%s: %s" % (e["id"], ash) else: pass # print str(e["id"]) + ": failed - " + "; ".join(r['errors']) print "tried: %d, succeeded: %d" % (tried, succeded)
def __init__(self, request_data): self.request_data = request_data self.valid = validate() site_id = self.valid.site(request_data['site_id']) self.site_cam_id = self.valid.cam(request_data['camera_id']) self.uuid = self.valid.uuid(request_data['uuid']) self.time_2 = self.valid.time(request_data['epoch_time']) self.sqlite = database() self.calc = calculate() self.s_id, self.s_limit = self.sqlite.find_site(site_id) self.cam_id = self.sqlite.get_cam_id(self.site_cam_id, self.s_id) self.curr_cam_m = self.sqlite.get_cam_m(self.cam_id, self.s_id) for plate_no in range(0,len(self.request_data['results'])): self.plate(plate_no)
def main(): parser = argparse.ArgumentParser(description="Merge files by parsing them into dictionaries and adding any new aliases from the REMOTE file.") parser.add_argument('local', metavar='LOCAL', type=str, help="LOCAL file to merge REMOTE into.") parser.add_argument('remote', metavar='REMOTE', type=str, help="REMOTE file to merge.") parser.add_argument('--output', dest='output', action='store', default='exceptions.merged.txt', help="File to write result to. (default: \"%(default)s\")") parser.add_argument('--write', dest='write', action='store_true', help="Disable prompt, write to OUTPUT file automatically") args = parser.parse_args() print "local", '"' + args.local + '"' print "remote", '"' + args.remote + '"' print "output", '"' + args.output + '"' print "write", args.write print if not os.path.exists(args.local): print "ERROR: local file", '"' + args.local + '"', "does not exist" return if not os.path.exists(args.remote): print "ERROR: remote file", '"' + args.remote + '"', "does not exist" return data = merge( args.local, args.remote ) print 'validating data:', if not validate(data, trace=True): return if not args.write: args.write = raw_input('Write to "' + args.output + '"? [no]: ') == 'yes' if args.write: f = open(args.output, 'wb') f.write(data) f.close() print "done" else: print 'not writing data'
def gof(p): para = {'vMax': p[0], 'aMax': p[1], 'bMax': p[2], 'lCar': p[3], 'sigA': p[4], 'tTau': p[5]} print '# simulation with:', for k, v in para.items(): print "%s:%.3f" % (k,v), print fType = open('data/input_types.add.xml', 'w') fType.write(('<routes>\n <vType accel="%(aMax)s" decel="%(bMax)s" id="pass"' + ' length="%(lCar)s" minGap="2.5" maxSpeed="%(vMax)s"' + ' sigma="%(sigA)s" tau="%(tTau)s" />\n</routes>') % para) fType.close() result = validate.validate(checkBinary('sumo')) print '#### yields rmse: %.4f' % result print >> fpLog, "%s %s" % (" ".join(["%.3f" % pe for pe in p]), result) fpLog.flush() return result
def __init__(self, pipeline, package_manager): if not validate(pipeline): raise RuntimeError('Validation failed') tree = ET.parse(pipeline) self._root = tree.getroot() self._imports = {} self._inputs = {} self._outputs = {} self._options = {} self._step_pipelines = {} self._dependencies = {} self._steps = {} self._shell_steps = [] self._package_manager = package_manager self._test_avaliable = None
def validate_params (self, params) : valid = wscompose.handler.validate_params(self, params) if not valid : return False # validator = validate.validate() # # markers # if params.has_key('marker') : try : valid['markers'] = validator.markers(params['marker']) except Exception, e : self.error(141, e) return False
def test_time_validation_valid1(self): valid = validate() valid_in = 1457543875 result = valid.time(valid_in)#valid time self.assertEqual(valid_in, result)
def test_given(self): self.assertFalse(validate('6011 5940 0319 9511')) self.assertTrue(validate('5537 0213 6797 6815')) self.assertFalse(validate('5574 8363 8022 9735')) self.assertFalse(validate('3044 8507 9391 30')) self.assertTrue(validate('6370 1675 9034 6211 774'))
def test_example(self): self.assertTrue(validate('1556 9144 6285 339')) self.assertFalse(validate('6363 1811 2857 7650'))
def test_uuid_validation_valid(self): valid = validate() valid_in = "30414be5-0924-4d7e-860a-113c5e039e30" result = valid.uuid(valid_in)#valid uuid self.assertEqual(valid_in, result)
def test_uuid_validation_invalid2(self): valid = validate() valid_in = "9dc680e2-f8bc-11e5-9ce9-5e551" result = valid.uuid(valid_in)#invalid uuid self.assertFalse(result)
def test_confidence_validation_valid3(self): valid = validate() valid_in = 100.000 result = valid.confidence(valid_in)#invalid confidence self.assertEqual(valid_in, result)
def test_time_validation_valid2(self): valid = validate() valid_in = '1234567890' #valid time in string result = valid.time(valid_in) valid_in = int(valid_in) #convert input to int self.assertEqual(valid_in, result)
#!/usr/bin/env python import json import optparse from validate import validate, elementExists if __name__ == "__main__": doc = validate() elementExists("ComputingManager",doc) elementExists("ComputingService",doc) elementExists("ExecutionEnvironment",doc) elementExists("Location",doc) elementExists("ComputingShare",doc) print("run 'qstat' or similar to verify that the following information is correct:") shares = doc["ComputingShare"] print(" %d computing shares" % len(shares)) print("compare the information in the JSON file to the detailed job information from 'qstat -Q -f' and 'pbsnodes' or similar")
def test_time_validation_invalid(self): valid = validate() result = valid.time('34--vbghjn_._J.')#in valid time self.assertFalse(result)
def test_confidence_validation_valid1(self): valid = validate() valid_in = 87.657 result = valid.confidence(valid_in)#valid confidence self.assertEqual(valid_in, result)
def test_cam_validation_invalid(self): valid = validate() valid_in = '765ghgt' result = valid.cam(valid_in)#invalid cam id self.assertFalse(result)
def test_cam_validation_valid(self): valid = validate() valid_in = 5 result = valid.cam(valid_in)#valid cam id self.assertEqual(valid_in, result)
def test_confidence_validation_invalid1(self): valid = validate() valid_in = "Ggkjdhb86.gge" result = valid.confidence(valid_in)#invalid confidence self.assertNotEqual(valid_in, result)
def test_plate_validation_invalid(self): valid = validate() result0, result1 = valid.plate('7-ki-666777') #fake number plate self.assertFalse(result0) self.assertFalse(result1)
def test_plate_validation_usa(self): valid = validate() valid_in = '7LOV391' result0, result1 = valid.plate(valid_in) #califorian number plate self.assertEqual(valid_in, result0) self.assertTrue(result1)
def test_road_validation_valid(self): valid = validate() valid_in = "A site Name" result = valid.site(valid_in) valid_in = str(valid_in) #convert input to string self.assertEqual(valid_in.lower(), result) #.lower() because validate returns lower version
def test_confidence_validation_valid2(self): valid = validate() valid_in = 0 result = valid.confidence(valid_in)#valid confidence self.assertFalse(result)