def getType(): tests = [] tests.append(["anata ha BAKA desu.", "r"]) tests.append(["あなたはバカです。", "k"]) tests.append([["a", "na", "ta", "ha", "ba", "ka", "de", "su", "."], "i"]) for x in tests: result = JapaneseTools.Conversion.getType(x[0]) if result != x[1]: return Tester.Result(False, "Failed on " + str(x)) return Tester.Result(True, "All Types were properly detected")
def ktor(): tests = collections.OrderedDict() tests["ぼく"] = "boku" tests["あたい"] = "atai" tests["しんぶん"] = "shin'bun'" tests["しんおさか"] = "shin'osaka" tests["ネイセン"] = "NEISEN'" tests["てゐ"] = "tewi" tests["ハニュー"] = "HANYU-" for x in tests: result = JapaneseTools.Conversion.convert(x, sourcetype="k", targettype="r") if result != tests[x]: return Tester.Result(False, "Failed on '" + x + "'") return Tester.Result(True, "All Kana to Romaji worked properly")
def rtok(): tests = collections.OrderedDict() tests["boku"] = "ぼく" tests["atai"] = "あたい" tests["shinbun"] = "しんぶん" tests["shin'osaka"] = "しんおさか" tests["NEISEN"] = "ネイセン" tests["tewi"] = "てゐ" tests["HANYU-"] = "ハニュー" for x in tests: result = JapaneseTools.Conversion.convert(x, sourcetype="r", targettype="k") if result != tests[x]: return Tester.Result(False, "Failed on '" + x + "'") return Tester.Result(True, "All Romaji to Kana worked properly")
def learn(): # splits() print("Done Splitting...") tester = Tester() test_set = tester.getTestSet() print("Finished tester Stuff") answers = [] reader = FileReader("training.txt") X = reader.read_file() print("Starting SVD..") svd = TruncatedSVD(n_components=10, n_iter=10, random_state=42) dense = svd.fit_transform(X) print("Done with SVD, starting K Means...") km = KMeans(n_clusters=100) ans = km.fit_predict(dense) print("Done with K Means...") inverseAns = {cluster: [] for cluster in range(100)} # centroids = svd.inverse_transform(km.cluster_centers_) for trainingProdKey, trainingProdIndex in reader.product.items(): inverseAns[ans[trainingProdIndex]].append(trainingProdKey) print('Done inverting clusters') i = 0 for prod in test_set: # print("Inside Loop") answers.append(predict(prod, reader.product, ans, inverseAns)) if i % (len(test_set) // 100) == 0: print("\rDone with {}% of predicting...".format(i / len(test_set)), end='') i = i + 1 print() print(tester.checkAnswers(answers))
def gen_medium(iSeq1_len, sSeq1_len): return Tester.MediumData(smallData=gen_small(iSeq1_len), n3=random.randint(1, 1000), n4=random.randint(1, 1000), d1=random.uniform(1, 100.0), d2=random.uniform(1, 100.0), s2=random_string(10), b2=random_bool(), sSeq1=random_s_list(sSeq1_len))
def runTests(self): tester = Tester.Tester(self.stop, self.color, self.verbose) tester.offline = self.offline try: for test in self.test.getTests(): test.run(tester) except KeyboardInterrupt: sys.stderr.write('\n%s\n' % ('=' * 72)) sys.stderr.write('\nTesting interrupted\n') if self.report: tester.report() return
def process_input(): ticker = e1.get() start_date = e2.get() end_date = e3.get() candle_interval = e4.get() ma_period = int(e5.get()) path = e6.get() df = tr.MAIN_Tester(ticker, start_date, end_date, candle_interval, ma_period).run() writer = pd.ExcelWriter(path) sheet_name = ticker + " " + start_date + " " + end_date df.to_excel(writer, sheet_name) writer.save()
def configure(logging=None, noLogEvents=None): # """Sets the global event manager's logging options.""" # if g_eventManager: # g_eventManager.setLogging(logging) # g_eventManager.setNoLogEvents(noLogEvents) # else: # BugUtil.error("BugEventManager - BugEventManager not setup before configure()") # K-Mod. I've expanded the purpose of this function. """Sets the global event manager's logging options. And registers some BUG events handlers.""" if not g_eventManager: BugUtil.error( "BugEventManager - BugEventManager not setup before configure()") return g_eventManager.setLogging(logging) g_eventManager.setNoLogEvents(noLogEvents) # K-Mod. Don't use register BUG events for PitBoss host. # (Note: actually, if this is a PitBoss host, this function won't even be called # because the BUG core will not initialize any mod components in PitBoss mode.) if CyGame().isPitbossHost(): BugUtil.debug( "BugEventManager - skipping event registration for PitBoss host") return # K-Mod end # --------- Better BTS AI (2/2) (moved by K-Mod) ------------- # K-Mod, only enable these features if the cheat mode is enabled. #if getChtLvl(): # advc.127: Replacing the above. ChtLvl is always 0 in multiplayer. if getChtLvl() or (CyGame().isGameMultiPlayer() and gc.getDefineINT("ENABLE_AUTOPLAY_MULTIPLAYER") > 0): AIAutoPlay.AIAutoPlay(g_eventManager) ChangePlayer.ChangePlayer(g_eventManager) Tester.Tester(g_eventManager) # advc.106c: Changed OnLoad handler g_eventManager.addEventHandler("kbdEvent", g_eventManager.onKbdEvent) g_eventManager.addEventHandler("OnLoad", g_eventManager.resetActiveTurnAfterLoad) g_eventManager.addEventHandler("GameStart", g_eventManager.resetActiveTurn) g_eventManager.addEventHandler("gameUpdate", g_eventManager.onGameUpdate)
def main(): net = YoloNet.YOLONet() criterion = Loss.MyLoss() if input('Do you want to load network?').upper() == 'N': optimizer = optim.Adam(net.parameters(), lr=1e-4) train_data = Dataset.DetectionDataSet() trainer = Trainer.Trainer(net=net, data_set=train_data, optimizer=optimizer, criterion=criterion) trainer.train() else: net.load_state_dict(torch.load('yolo_cpu.pt')) test_data = Dataset.DetectionDataSet(paths='numpy_test/paths.txt', label_dir='numpy_test/', root_dir='test/') tester = Tester.Tester(net=net, test_criterion=criterion, data_set=test_data) tester.test()
def configure(logging=None, noLogEvents=None): # """Sets the global event manager's logging options.""" # if g_eventManager: # g_eventManager.setLogging(logging) # g_eventManager.setNoLogEvents(noLogEvents) # else: # BugUtil.error("BugEventManager - BugEventManager not setup before configure()") # K-Mod. I've expanded the purpose of this function. """Sets the global event manager's logging options. And registers some BUG events handlers.""" if not g_eventManager: BugUtil.error( "BugEventManager - BugEventManager not setup before configure()") return g_eventManager.setLogging(logging) g_eventManager.setNoLogEvents(noLogEvents) # K-Mod. Don't use register BUG events for PitBoss host. # (Note: actually, if this is a PitBoss host, this function won't even be called # because the BUG core will not initialize any mod components in PitBoss mode.) if CyGame().isPitbossHost(): BugUtil.debug( "BugEventManager - skipping event registration for PitBoss host") return # K-Mod end # --------- Better BTS AI (2/2) (moved by K-Mod) ------------- AIAutoPlay.AIAutoPlay(g_eventManager) ChangePlayer.ChangePlayer(g_eventManager) Tester.Tester(g_eventManager) # advc.106c: Changed OnLoad handler g_eventManager.addEventHandler("kbdEvent", g_eventManager.onKbdEvent) g_eventManager.addEventHandler("OnLoad", g_eventManager.resetActiveTurnAfterLoad) g_eventManager.addEventHandler("GameStart", g_eventManager.resetActiveTurn) g_eventManager.addEventHandler("gameUpdate", g_eventManager.onGameUpdate)
def gen_big(iSeq1_len, sSeq1_len, iSeq2_len, sSeq2_len, dSeq1_len, dSeq2_len): return Tester.BigData(mediumData=gen_medium(iSeq1_len, sSeq1_len), n5=random.randint(1, 1000), n6=random.randint(1, 1000), n7=random.randint(1, 1000), n8=random.randint(1, 1000), n9=random.randint(1, 1000), n10=random.randint(1, 1000), s3=random_string(10), s4=random_string(10), s5=random_string(10), s6=random_string(10), s7=random_string(10), s8=random_string(10), d3=random.uniform(1, 100.0), d4=random.uniform(1, 100.0), d5=random.uniform(1, 100.0), b3=random_bool(), b4=random_bool(), b5=random_bool(), iSeq2=random_i_list(iSeq2_len), sSeq2=random_s_list(sSeq2_len), dSeq1=random_d_list(dSeq1_len), dSeq2=random_d_list(dSeq2_len))
test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False, num_workers=0) if name == 'densenet': model = models.densenet161(pretrained=False) model.classifier = nn.Sequential( nn.Dropout(0.3), nn.Linear(model.classifier.in_features, num_classes)) model = torch.nn.DataParallel(model).module model_path = os.path.join(workspace, 'checkpoints', trained + str(num_classes), 'densenet.pth') model.load_state_dict(torch.load(model_path, map_location='cuda:0')) elif name == 'inception': model = models.inception_v3(pretrained=True, aux_logits=False) model.fc = nn.Linear(model.fc.in_features, num_classes) model_path = os.path.join(workspace, 'checkpoints', trained + str(num_classes), 'inception.pth') model.load_state_dict(torch.load(model_path, map_location='cuda:0')) else: model = models.resnext50_32x4d(pretrained=True) model.fc = nn.Sequential(nn.Dropout(0.3), nn.Linear(model.fc.in_features, num_classes)) model_path = os.path.join(workspace, 'checkpoints', trained + str(num_classes), 'resnext.pth') model.load_state_dict(torch.load(model_path, map_location='cuda:0')) test_acc, f1_test, cm, out, labels, preds = tester.test( model, device, test_loader, test_size, name) #utils.plot_confusion_matrix(cm, classes, name + '_' + str(num_classes) + '_' + trained, workspace, name + ' - acc: ' + str(test_acc.item()), save=False)
rms = audioop.rms(data, 2) # here's where you calculate the volume #print("yo: ",audioop.avg(data, 2), rms) # difference between current volume and last volume # diff = rms - lastRms percentage = rms / 100 # checks if |diff| exceeds the difference_threshold if (abs(diff) >= DIFFERENCE_THRESHOLD): # sets the brightness value bri = int(BRI_MODIFIER * percentage) if (bri < 0): bri = 0 elif (bri > 254): bri = 254 print("Difference: ", diff, "\nBrightness: ", bri) t.change_bri(bri) lastRms = rms print("* done") stream.stop_stream() stream.close() p.terminate()
def target_spotted(direction): gamefile.pointgun(direction * 1.4) gamefile.fire() gamefile.fire() gamefile.fire()
test_labs[n_label_0 : ,1 ] = 1 test_cls = np.argmax(test_labs,axis=1) print len(paths_0) print len(paths_1) # Image for i,path in enumerate(paths_0 + paths_1): print i,os.path.split(path)[-1] img=np.asarray(Image.open(path).convert('RGB')) imgs.append(img) test_imgs=np.asarray(imgs) assert len(test_labs) == len(test_imgs) print np.shape(test_imgs) tester=Tester.Tester(None) tester._reconstruct_model(restore_model) tester.n_classes =2 test_imgs = test_imgs/255. tester.validate(test_imgs , test_labs, 60 ,0 ,False) for i,path in enumerate(paths_0 + paths_1): print os.path.split(path)[-1].split('_')[0] , tester.pred_all[i] ,test_cls[i] inspect_cam(tester.sess , tester.classmap_op , tester.top_conv ,test_imgs , test_labs , 0 , tester.x_ , tester.y_ , tester.is_training , tester.logits_ ) actmap=tester.sess.run(tester.classmap_op , {tester.x_: test_imgs[0:1] , tester.is_training :False}) actmap=np.squeeze(actmap) print np.shape(actmap) plt.imsave('actmap_0.png' , actmap) print ''
parser.add_argument( '--lrde', default=20, type=int, help='[net] divided the learning rate 10 by every lrde epochs') parser.add_argument('--mom', default=0.9, type=float, help='[net] momentum') parser.add_argument('--wd', default=1e-3, type=float, help='[net] weight decay') parser.add_argument('--lr', default=0.01, type=float, help='[net] learning rate') parser.add_argument('--ep', default=60 * 1, type=int, help='[net] epoch') parser.add_argument('--beta', default=0.3, type=float, help='[net] hyperparameter for pre-class loss weight') parser.add_argument('--pmp', default=pmp, type=str, help='[net] pre-trained model path') args = parser.parse_args() print args print int((args.sr * args.msc) / args.hs) Trer = Tester(args) pred = Trer.run()
print("Error: missing files") exit(-1) classes = 3 training_data = numpy.genfromtxt(sys.argv[1], delimiter=',', dtype="|U5") training_labels = numpy.genfromtxt(sys.argv[2], delimiter=',') test_data = numpy.genfromtxt(sys.argv[3], delimiter=',', dtype="|U5") training_data, test_data = Preparations.Preparations( training_data, test_data).prepare(1) perceptron_weights, svm_weights, pa_weights = Trainer.Trainer( training_data, training_labels, classes).train_all_simul() tester = Tester.Tester(test_data, perceptron_weights, svm_weights, pa_weights) tester.test() if len(sys.argv) == 5 and True: # debug mode perceptron_success_rate, svm_success_rate, pa_success_rate = tester.calculate_statistics( numpy.genfromtxt(sys.argv[4], delimiter=',')) print("succeeds rate: per: {}, svm:{}, pa: {}".format( perceptron_success_rate, svm_success_rate, pa_success_rate)) Grapher.Grapher(training_data, training_labels, test_data, numpy.genfromtxt(sys.argv[4]), classes).perceptron_graph() else: # testing mode tester.test()
tests["しんおさか"] = "shin'osaka" tests["ネイセン"] = "NEISEN'" tests["てゐ"] = "tewi" tests["ハニュー"] = "HANYU-" for x in tests: result = JapaneseTools.Conversion.convert(x, sourcetype="k", targettype="r") if result != tests[x]: return Tester.Result(False, "Failed on '" + x + "'") return Tester.Result(True, "All Kana to Romaji worked properly") #TODO : Make n/n' more intelligent in the k to r converter #print(JapaneseTools.Conversion.convert("しんぶん", targettype="r")) #print(JapaneseTools.Conversion.convert("ハニュー", targettype="r")) t = Tester.Tester() t.addTest("Romaji To Kana", "Tests Romaji to Kana conversion", rtok) t.addTest("Type Detection", "Tests the capacity to determine the type of a string", getType) t.addTest("Kana To Romaji", "Tests Kana to Romaji conversion", ktor) Tester.makeGUI(t) t.doAllTests() print("t") r = t.getAllTests() for x in r: print(str(x)) print("")
def commands(): gamefile.move(180) gamefile.fire() gamefile.stop(180) gamefile.fire() gamefile.turn_left(120) gamefile.fire() gamefile.turn_right(120) gamefile.fire() gamefile.turn_right(120) gamefile.fire() gamefile.turn_left(120) gamefile.fire() gamefile.done()
) powershellRunner.t1(username, password) print( "Analyzing Process stage 1 [OK - NO ERROR]" ) print( "Analyzing Process stage 2: Extracts IP addresses [START]" ) csvAnalyzer.analyzer() print( "Analyzing Process stage 2: [OK - NO ERROR]" ) print( "Analyzing Process stage 3: Ip Analyzing - Finding Ips GeoLocation etc [START]" ) Tester.ipAnalyzer() print( "Analyzing Process stage 3 [OK - NO ERROR]" ) print( "Analyzing Process stage 4: Ip Analyzing - Extracting rules [START]" ) powershellRunner.t2(username, password) print( "Analyzing Process stage 4: [OK - NO ERROR]" ) print( "Analyzing Process stage 5: Logon Analyzing [START]" ) GeoLogonalyzer.geoLogonAnalyer() powershellRunner.t3()
def main(): workspace = os.path.abspath("../") num_classes = 5 trained = 'last' # test_fold = 1 data_dir = os.path.join(workspace, 'Datasets', str(num_classes) + '-classes') # ResNeXt and DenseNet transform0 = transforms.Compose([ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) # Inception transform1 = transforms.Compose([ transforms.Resize(299), transforms.CenterCrop(299), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) test_dir = os.path.join(data_dir, 'Val') test_set_0 = datasets.ImageFolder(test_dir, transform0) test_set_1 = datasets.ImageFolder(test_dir, transform1) test_size = len(test_set_0) classes = test_set_0.classes print(classes) test_loader0 = torch.utils.data.DataLoader(test_set_0, batch_size=1, shuffle=False, num_workers=0) test_loader1 = torch.utils.data.DataLoader(test_set_1, batch_size=1, shuffle=False, num_workers=0) loaders = [test_loader0, test_loader1, test_loader0] model_path = os.path.join(workspace, 'checkpoints', trained + str(num_classes)) nets = [os.path.join(model_path, x) for x in os.listdir(model_path)] model_list, model_name = load_model(model_path, nets, num_classes) test_accs, test_f1s, cms, outs = [], [], [], [] softmax = nn.Softmax(dim=0) for i in range(len(model_list)): test_acc, f1_test, cm, out, labels, _ = tester.test( model_list[i], device, loaders[i], test_size, model_name[i]) out = [softmax(x).cpu().numpy() for x in out] out = np.asmatrix(out) test_accs.append(test_acc) test_f1s.append(f1_test) cms.append(cm) outs.append(out) #utils.plot_confusion_matrix(cm, classes, model_name[i] + '_' + str(num_classes), workspace, model_name[i] + ' - Acc: ' + str(round(test_acc.item(), 3)) + '%', save=False) #utils.create_test_log(workspace, cm, test_acc, f1_test, model_name[i], test_fold) start = datetime.datetime.now() preds, labels, total_correct = predict_with_ensemble( outs, utils.list_toTorch(labels)) end = datetime.datetime.now() elapsed = end - start # for i in range(len(preds)): # if labels[i] == 1 and preds[i] != labels[i]: # sample_fname, _ = test_loader0.dataset.samples[i] # print(sample_fname.split('\\')[-1]) # print('pred', preds[i].item(),'label', labels[i].item()) total_acc = total_correct.numpy() / len(preds.numpy()) total_fscore = f1_score(labels, preds, average='micro') total_cm = confusion_matrix(labels, preds) print('\n[INFO] ensemble model testing complete') print('- total accuracy = ', total_acc) print('- total F1-score = ', total_fscore) print('- elapsed time (microsec) = ', elapsed.microseconds) #utils.compute_AUC_scores(labels, preds, classes) #timestamp = str(datetime.datetime.now()).split('.')[0] #utils.plot_confusion_matrix(total_cm, classes, 'ensamble_' + str(num_classes) + '_' + trained, workspace, 'Ensamble - acc: ' + str(round(total_acc.item(),3)) + '%', save=False) return labels, preds, classes, total_cm
def run(self): for i in self.data["Insurance"]: ins = Insurance(i["insuranceId"]) ins.setType(i["type"]) ins.setInsuranceCeiling(i["ceil"]) ins.setInsuranceRate(i["rate"]) self.insurances.append(ins) for i in self.data["Patient"]: p = Patient(i["id"]) p.setName(i["name"]) p.setUsername(i["username"]) p.setLastName(i["family"]) p.setGender(i["gender"]) p.setPhoneNumber(i["phone"]) p.setPassword(i["pass"]) p.setAge(i["age"]) p.setDisease(i["disease"]) for ins in self.insurances: if ins.getInsuranceId() == i["insurance"]["insuranceId"]: p.setInsurance(ins) p.setPrescription(i["prescription"]) self.patients.append(p) for i in self.data["Test"]: t = Test(i["id"]) t.setTestDescription(i["description"]) t.setTestPreCondition(i["preCondition"]) t.setBasePrice(i["basePrice"]) self.tests.append(t) for i in self.data["TimeSlot"]: t = TimeSlot(i["year"], i["month"], i["day"], i["start"], i["end"], i["id"], i["status"]) self.timeSlots.append(t) for i in self.data["Tester"]: t = Tester(i["id"]) t.setName(i["name"]) t.setLastName(i["family"]) t.setGender(i["gender"]) t.setPhoneNumber(i["phone"]) allTimes = [] for atid in i["available_time"]: for ts in self.timeSlots: if atid == ts.getId(): allTimes.append(ts) t.setAllTimes(allTimes) self.testers.append(t) for i in self.data["Labratory"]: l = Labratory(i["id"]) l.setName(i["name"]) l.setAvailableTests(i["availableTests"]) l.setPriceRate(i["priceRate"]) allTesters = [] for t in i["testers"]: for tester in self.testers: if t == tester.getId(): allTesters.append(tester) l.setTesters(allTesters) self.labratories.append(l)
def gen_small(iSeq1_len): return Tester.SmallData(n1=random.randint(1, 1000), n2=random.randint(1, 1000), s1=random_string(10), b1=random_bool(), iSeq1=random_i_list(iSeq1_len))
if outcome.upper() == 'PASS': result = 1 else: result = 0 output = 'Result: ' if result == 1: output += 'PASS\n' else: output += 'FAIL\n' return result, output def test_01(): output = ('test_01\n' '\t1. Run the test\n' '\t2. Check if it passes\n') print output result, passed = isPassed() output += passed return (result, output) def test_02(): output = ('test_01\n' '\t1. Run the test\n' '\t2. Check if it passes\n') print output result, passed = isPassed() output += passed return (result, output) if __name__ == '__main__': Tester.run('SampleTest')
def menu(): continueGame = Button((255, 255, 255), "Buttons/ContinueButton.png", (0, 200)) newGame = Button((255, 255, 255), "Buttons/NewGameButton.png", (0, 250)) instructions = Button((255, 255, 255), "Buttons/InstructionButton.png", (200, 200)) sentences = Button((255, 255, 255), "Buttons/SentencesButton.png", (200, 250), (100, 40)) customizations = Button((255, 255, 255), "Buttons/CustomizationsButton.png", (400, 200)) credits = Button((255, 255, 255), "Buttons/creditsButton.png", (400, 250)) quit = Button((255, 255, 255), "Buttons/QuitButton.png", (200, 450)) jetpackMode = False jetpack = Button((255, 255, 255), "res/testButton.png", (450, 450), (50, 50)) twoPlayerMode = False twoPlayer = Button((255, 255, 255), "res/testButton.png", (0, 450), (50, 50)) Tester.restart() state = 0 while state == 0: screen.fill([255, 255, 255]) screen.blit(background, backgroundRect) screen.blit(continueGame.image, continueGame) screen.blit(newGame.image, newGame) screen.blit(instructions.image, instructions) screen.blit(sentences.image, sentences) screen.blit(customizations.image, customizations) screen.blit(credits.image, credits) screen.blit(jetpack.image, jetpack) screen.blit(twoPlayer.image, twoPlayer) screen.blit(quit.image, quit) pygame.display.update() for event in pygame.event.get(): if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE): sys.exit("quit game") if event.type == MOUSEBUTTONDOWN: loc = pygame.mouse.get_pos() if (continueGame.clicked(loc[0], loc[1])): Tester.setBoatImage(Customizations.getSelectedBoat()) Tester.resume() elif (newGame.clicked(loc[0], loc[1])): Tester.setSentenceFile(SentenceSelector.getSelected()) Tester.restart() Tester.setBoatImage(Customizations.getSelectedBoat()) Tester.resume() elif (instructions.clicked(loc[0], loc[1])): print("instruction menu") Instructions.load() elif (sentences.clicked(loc[0], loc[1])): SentenceSelector.load() pass elif (customizations.clicked(loc[0], loc[1])): Customizations.load() pass elif (credits.clicked(loc[0], loc[1])): Credits.load() pass elif (jetpack.clicked(loc[0], loc[1])): jetpackMode = not jetpackMode print "Jetpack mode: " + str(jetpackMode) Tester.setJetpackMode(jetpackMode) elif (twoPlayer.clicked(loc[0], loc[1])): twoPlayerMode = not twoPlayerMode print "Two player mode: " + str(twoPlayerMode) Tester.setTwoPlayerMode(twoPlayerMode) elif (quit.clicked(loc[0], loc[1])): return
def test_01(): testName = 'Camera Connection' output = (testName + '\n' '\t1. Connect the camera via USB\n' '\t2. Verify log includes "Camera Connected!" message\n' '\t3. Disconnect the camera\n' '\t4. Verify log includes "Camera Disconnected!" message\n') print output result, passed = isPassed() output += passed return (result, output) def test_02(): testName = 'Downloaded Images stay on Camera' output = (testName + '\n' '\t1. Connect the camera via USB\n' '\t2. Open up the directory specified in the config file\n' '\t3. Take a new photo\n' '\t4. Verify photo appears in specified directory\n' '\t5. Open up the directory the filesystem gives the camera\n' '\t6. Verify photo appears in camera memory\n') print output result, passed = isPassed() output += passed return (result, output) if __name__ == '__main__': Tester.run('ReaderTest')
else: result = 0 output = 'Result: ' if result == 1: output += 'PASS\n' else: output += 'FAIL\n' return result, output def test_01(): output = ('test_01\n' '\t1. Run the test\n' '\t2. Check if it passes\n') print output result, passed = isPassed() output += passed return (result, output) def test_02(): output = ('test_01\n' '\t1. Run the test\n' '\t2. Check if it passes\n') print output result, passed = isPassed() output += passed return (result, output) if __name__ == '__main__': Tester.run('SampleTest')
import Tester import csv import json typetester = Tester.Tester() lineswithpoints = {} longeststrings = {} linespecialchars = {} outputdictlist = [] text = typetester.retrieveText() text = typetester.addMarkers(text) text = text.replace("\n", " ") lines = text.split("<>") longestwords = typetester.longestWords(lines, longeststrings) lineratings = typetester.rateSpecialChars(lines, linespecialchars) for line in lines: line.rstrip("\n") linedifficulty = typetester.calculateLineComplexity( line, longeststrings, linespecialchars) lineswithpoints[line] = linedifficulty with open("Difficulties.json", 'w') as outfile: for item in lineswithpoints.items(): outputdict = { } #Creates a new dictionary for each tuple because in the test file there is a dictionary for each line and its difficulty outputdict["text"] = item[ 0] #Stores the line under the "text" key ready for uploading to the database outputdict["difficulty"] = item[ 1] #Stores the line's difficulty under the "difficulty" key ready for uploading to the database outputdictlist.append( outputdict