train_set = NonterminalFeaturesDataset(os.path.join( args.data_dir, 'train')) train_loader = DataLoader(train_set, batch_size=None) val_set = NonterminalFeaturesDataset(os.path.join(args.data_dir, 'val')) val_loader = DataLoader(train_set, batch_size=None) gen_set = NonterminalFeaturesDataset(os.path.join(args.data_dir, 'gen')) gen_loader = DataLoader(train_set, batch_size=None) classifier = Classifier(args.features, args.nonterminals) classifier = classifier.to(device) optimizer = torch.optim.SGD(classifier.parameters(), lr=args.lr) # Trainer and metrics save_dict = {'classifier': classifier} trainer = Engine(step_train(classifier, optimizer)) metric_names = ['loss', 'accuracy'] RunningAverage(Loss(F.cross_entropy, lambda x: (x['y_pred'], x['y_true']))).attach(trainer, 'loss') RunningAverage(Accuracy(lambda x: (x['y_pred'], x['y_true']))).attach( trainer, 'accuracy') # Evaluator and metrics evaluator = Engine(step_train(classifier, None, train=False)) Accuracy(lambda x: (x['y_pred'], x['y_true'])).attach( evaluator, 'accuracy') # Begin training run(args.run_name, save_dict, metric_names, trainer, evaluator, train_loader, val_loader, gen_loader, args.epochs, 'accuracy')
DataSet=int(sys.argv[2]), TestID=int(sys.argv[3]), NewC=int(sys.argv[4]), NetworkType=int(sys.argv[5]), runs=int(sys.argv[6]), Module=int(sys.argv[7]), Mfit=boolArgv(sys.argv[8]), FusionNType=int(sys.argv[9]), FusionMfit=boolArgv(sys.argv[10]), FLdata=boolArgv(sys.argv[11]), FUSION=True) elif len(sys.argv) == 8: training.run(GPU_Device_ID=int(sys.argv[1]), DataSet=int(sys.argv[2]), ValidID=int(sys.argv[3]), TestID=int(sys.argv[4]), NetworkType=int(sys.argv[5]), runs=int(sys.argv[6]), Module=int(sys.argv[7])) else: print( "argument errors, try\npython runfile.py <GPU_Device_ID> <DataSet> <ValidID> <TestID> <NetworkType> <runs> <Module>" ) ''' Usage: python Runscript.py 0 1 0 0 1 0 python Runscript.py 0 2 3 3 2 5 python Runscript.py 1 4 3 3 2 8 python Runscript.py 1 6 5 5 3 3 '''
import utils import training import testing import sys from config import Configuration if __name__ == '__main__': # Load condiguration from file or arguments config = Configuration() print("\nRun torcs env with this configuration?\n") pprint(config.args) # Create basic directories if not exists utils.mkdir_resources(config.args['resources']) while (True): question = input( "\n\nPlease, press one key:\n 1. TRAIN\n 2. TEST\n 0. EXIT\n") question = int(question) if (question == 1): utils.to_json(config.args) training.run() elif (question == 2): testing.run() elif (question == 0): sys.exit(0) else: continue
from get_train_config import training_configs # -o D:\Projects\Myeloma_BM_mapping\Data\20200401\models -d D:\Projects\Myeloma_BM_mapping\Data\20200401\combined_polyscope-rectangles\patch -n 4 if __name__ == '__main__': configs, args = training_configs() if args.cluster: print('*' * 50) print('job started running on a cluster') print('*' * 50) print('*' * 50) print('running configuration number: {}'.format(args.config_id)) print('*' * 50) if args.config_id == '-1': for n in range(1, len(configs) + 1): print('*' * 50) print('running configuration number: {}'.format(n)) print('*' * 50) run(**configs[str(n)], all_params=configs[str(n)]) else: run(**configs[args.config_id]) else: for n in range(1, len(configs) + 1): print('*' * 50) print('running configuration number: {}'.format(n)) print('*' * 50) run(**configs[str(n)], all_params=configs[str(n)])
default=None, help="Override train batch size") parser.add_argument("--lr", type=float, default=None, help="Override train learning rate") parser.add_argument("--ep", type=int, default=None, help="Override number of epochs") args = parser.parse_args() assert args.config is not None assert args.config.exists() # Define configuration mutations if certain cmd args are defined mutations = {} if args.bs is not None: mutations["train_batch_size"] = args.bs if args.lr is not None: mutations["learning_rate"] = args.lr if args.ep is not None: mutations["num_epochs"] = args.ep # Pass configuration file into py_config_runner.ConfigObject # and fetch configuration parameters as attributes # see inside run() function config = ConfigObject(args.config, mutations=mutations) run(config)
model = SyntaxTreeNetwork(train_set.input_size, args.hidden_size, train_set.num_nonterminal_rules, train_set.num_nonterminals) elif args.model == 'tree_network': model = TreeNetwork(train_set.input_size, args.hidden_size) elif args.model == 'rnn': model = RNN(train_set.input_size, args.hidden_size) else: raise NotImplementedError('unknown model type {}'.format(args.model)) model = model.to(device) classifier = Classifier(args.hidden_size, train_set.n_classes) classifier = classifier.to(device) optimizer = torch.optim.SGD(list(model.parameters()) + list(classifier.parameters()), lr=args.lr) # Trainer and metrics save_dict = {'model': model, 'classifier': classifier} trainer = Engine(step_train(model, optimizer)) metric_names = ['loss', 'accuracy'] RunningAverage(Loss(F.cross_entropy, lambda x: (x['y_pred'], x['y_true']))).attach(trainer, 'loss') RunningAverage(Accuracy(lambda x: (x['y_pred'], x['y_true']))).attach( trainer, 'accuracy') # Begin training run(args.run_name, save_dict, metric_names, trainer, None, train_loader, None, None, args.epochs, 'accuracy')
logging.info('All data from raster will be kept in %s', allDataPreProcessedPath) # make 32x32 samples saves them at samplesPaths # and create List of all samples [ name, classId, % of pixels of the class] and saves it to dataset_folder_path samplesList = raster.sampleriseRaster( mainRasterPath, samplesPaths, dataset_folder_path + processName + "SamplesList", saveSamplesToDisc) #if you skip line before uncoment below # with open(dataset_folder_path + processName + "SamplesList.pkl", mode='rb') as file: # samplesList = pickle.load(file, encoding='latin1') # prepers input for CNN dataConversion.prepBatches(samplesList, samplesPaths, 7000, dataset_folder_path + processName + "TrainingBatch") # for prepered set dataConversion.prepBatches(samplesList, samplesPaths, 61168, allDataPreProcessedPath + processName + "AllDataBatch") # for All data set # trains the network training.run(dataset_folder_path, save_model_path, processName + "TrainingBatch") # test data evaluating evalTrainings.test_model(save_model_path, dataset_folder_path) # evaluate network on all data processAllDataForValidation.preprocess_and_save_data(allDataPreProcessedPath + processName) evaluating.test_model( save_model_path, allDataPreProcessedPath ) # allDataPreProcessedPath must be path to folder, all data '.p' will be used during evaluation
def loop(): switchApp("umamusume") if not reg.has(windowTitle): return False if reg.has( Pattern("1623306979891.png").similar(0.95).targetOffset(-226, 0)): reg.click() return True if reg.has("1616475107941.png"): reg.click() return True if reg.has(nextLabel): reg.click() return True # not in main menu -> skip if not reg.has(mainMenuHeader): return True if (reg.has("1615928744521.png") or reg.has(Pattern("1626317018337.png").similar(0.85)) ) and reg.has("1616367046236.png"): reg.click("1616367046236.png") waitClick(okLabel) return True if reg.has("1626393239326.png"): reg.click() if reg.has("1616366183098.png", 2): waitClick(okLabel) return True else: # ポップアップが出なかった場合、誤クリックなので流す sleep(1) if reg.has(raceDayLabel): reg.click(raceDayLabel) race.runRace(True) try: waitClick(nextLabel) sleep(2) waitClick(nextLabel) return True except FindFailed: print("Final") return True isRaceDay = race.isRaceDay() if (isRaceDay or objectiveRegion.has(notYetLabel) ) and not reg.has(raceKeyLabel) and reg.has(raceLabel): reg.click(raceLabel) if reg.has("1615978551852.png", 5): reg.click("1615978573903.png") sleep(2) # returnせずに次の判定にうつる else: result = race.runRace(False) if result: return True else: # returnせず次の判定にうつる sleep(2) if reg.has(gageHalf): if reg.has(restLabel): reg.click(restLabel) waitClick(okLabel) return True if reg.has("1616407518547.png"): reg.click("1616407518547.png") waitClick(okLabel) return True if reg.has(trainingLabel): reg.click(trainingLabel) reg.wait("1626318791352.png") training.run([1, 0.6, 1, 0.3, 0.4]) return True # 何にもヒットしなかったのでもう一度ループ return True