def train_model(train_dir, test_dir, savefile): loss_fn = nn.BCELoss() runner = ExperimentRunner(loss_fn, train_dir, test_dir, get_train_transforms(), get_test_transforms(), batch_size=64) num_models_to_train = 5 lr = 0.00001 num_epochs = 200 best_roc_auc = 0.0 for i in range(num_models_to_train): print( "Model #{}: Training {} layers for {} epochs with lr={}...".format( i, NUM_LAYERS_TO_TRAIN, num_epochs, lr)) model = get_model(NUM_LAYERS_TO_TRAIN) optimizer = optim.SGD(model.module.parameters(), lr=lr, momentum=0.9) model = runner.train(model, optimizer, num_epochs) paths, labels, probs = runner.test(model) roc_auc = roc_auc_score(labels, probs) print("ROC auc: {}".format(roc_auc)) if roc_auc > best_roc_auc: best_roc_auc = roc_auc print("Best model so far; saving...") torch.save(model.state_dict(), savefile)
def run_trials(args): """ Runs experiments for multiple trials with random ground truth. """ from experiment_runner import ExperimentRunner if args.start < 1: raise ValueError('trial # must be a positive integer') if args.end < args.start: raise ValueError('end trial can''t be less than start trial') exp_descs = sum([ed.parse(util.io.load_txt(f)) for f in args.files], []) for exp_desc in exp_descs: runner = ExperimentRunner(exp_desc) for trial in xrange(args.start, args.end + 1): try: runner.run(trial=trial, sample_gt=True, rng=np.random) except misc.AlreadyExistingExperiment: print 'EXPERIMENT ALREADY EXISTS' print 'ALL DONE'
def get_predictions(test_dir, model_savefiles): runner = ExperimentRunner(None, None, test_dir, None, get_test_transforms(), batch_size=64) probs = {} for i, savefile in enumerate(model_savefiles): model = get_model(NUM_LAYERS_TO_TRAIN) model.load_state_dict(torch.load(savefile)) paths, labels, probs[i] = runner.test(model) return paths, labels, probs
def RunCrosperf(argv): parser = argparse.ArgumentParser() parser.add_argument('--noschedv2', dest='noschedv2', default=False, action='store_true', help=('Do not use new scheduler. ' 'Use original scheduler instead.')) parser.add_argument( '-l', '--log_dir', dest='log_dir', default='', help='The log_dir, default is under <crosperf_logs>/logs') SetupParserOptions(parser) options, args = parser.parse_known_args(argv) # Convert the relevant options that are passed in into a settings # object which will override settings in the experiment file. option_settings = ConvertOptionsToSettings(options) log_dir = os.path.abspath(os.path.expanduser(options.log_dir)) logger.GetLogger(log_dir) if len(args) == 2: experiment_filename = args[1] else: parser.error('Invalid number arguments.') working_directory = os.getcwd() if options.dry_run: test_flag.SetTestMode(True) experiment_file = ExperimentFile(open(experiment_filename, 'rb'), option_settings) if not experiment_file.GetGlobalSettings().GetField('name'): experiment_name = os.path.basename(experiment_filename) experiment_file.GetGlobalSettings().SetField('name', experiment_name) experiment = ExperimentFactory().GetExperiment(experiment_file, working_directory, log_dir) json_report = experiment_file.GetGlobalSettings().GetField('json_report') signal.signal(signal.SIGTERM, CallExitHandler) atexit.register(Cleanup, experiment) if options.dry_run: runner = MockExperimentRunner(experiment, json_report) else: runner = ExperimentRunner(experiment, json_report, using_schedv2=(not options.noschedv2)) runner.Run()
def run_experiment(args): """ Runs experiments. """ from experiment_runner import ExperimentRunner exp_descs = sum([ed.parse(util.io.load_txt(f[:-2])) for f in args.files], []) seed_descs = [int(float(f[-2:])) for f in args.files] for exp_desc, seed in zip(exp_descs, seed_descs): print '\n seed: ' print seed exp_desc.get_dir = lambda: os.path.join(exp_desc.sim+str(seed), exp_desc.inf.get_dir()) print exp_desc.get_dir() try: er = ExperimentRunner(exp_desc) er.run(trial=0, sample_gt=False, rng=np.random.RandomState(seed)) except misc.AlreadyExistingExperiment: print 'EXPERIMENT ALREADY EXISTS' print 'ALL DONE'
def run_experiment(args): """ Runs experiments. """ from experiment_runner import ExperimentRunner exp_descs = sum([ed.parse(util.io.load_txt(f)) for f in args.files], []) for exp_desc in exp_descs: try: ExperimentRunner(exp_desc).run(trial=0, sample_gt=False, rng=np.random.RandomState(42)) except misc.AlreadyExistingExperiment: print 'EXPERIMENT ALREADY EXISTS' print 'ALL DONE'
def runCurrentExperiment(self, expType="Standard", isLoad=False): """ Creates an experiment runner for the current model and starts running the model in a seperate thread """ if self.experimentRunner: self.stopCurrentExperiment() self.datasets[self.currentDataset].rewind() if isLoad: modelInfo = json.loads(ExperimentDB.get(self.name)['metadata']) modelDescriptionText = modelInfo['modelDescriptionText'] subDescriptionText = modelInfo['subDescriptionText'] self.loadDescriptionFile(modelDescriptionText, subDescriptionText) else: data = dict( modelDescriptionText=self.descriptionText, subDescriptionText=self.subDescriptionText ) ExperimentDB.add(self.name, data) self.__currentModelData = [] if expType == "Standard": self.experimentRunner = ExperimentRunner( name = self.name, modelDescription=self.models[self.currentModel], control= self.control, dataset=self.datasets[self.currentDataset]) elif expType == "Anomaly": self.experimentRunner = AnomalyRunner( name = self.name, modelDescription=self.models[self.currentModel], control= self.control, dataset=self.datasets[self.currentDataset]) if isLoad: self.experimentRunner.load() else: self.experimentRunner.run() return self.getExperimentInfo(self.models[self.currentModel])
test_transforms = [] test_transforms.append(transforms.Resize((1024, 768))) test_transforms.append(transforms.CenterCrop((512, 512))) test_transforms.append(transforms.Resize((224, 224))) test_transforms.append(transforms.ToTensor()) test_transforms.append( transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) ) runner = ExperimentRunner( loss_fn, 'melanoma_dataset', train_transforms, test_transforms, batch_size=64) ensemble_preds = np.array([]) num_ensembles = 5 for e in range(num_ensembles): for i in [9]:#[num_layers for num_layers in range(2, 10)]: for lr in [0.00001]: for j in [200]: print("Model #{}: Training {} layers for {} epochs with lr={}...".format( e, i, j, lr )
parser.add_argument('--model', default='base_vae', help='model definition') parser.add_argument('--freeze_embeddings', default=False, help='freezing embeddings') parser.add_argument('--pretrained_embeddings', default=None, help='freezing embeddings') parser.add_argument('--weight_kl', default=0.01, help='weight for kl loss term') parser.add_argument('--kloss_max', default=0.15, help='freezing embeddings') parser.add_argument('--kl_anneat_start', default=3000, help='freezing embeddings') args = parser.parse_args() text_dataset = TextDataset(emb_dim=args.emb_dim, batch_size=args.batch_size, fix_length=args.fix_length) if args.model == "base_vae": print("in here") args.freeze_embeddings = False model = VAE_base(args, text_dataset) experiment_runner = ExperimentRunner(text_dataset, model, args) experiment_runner.train()