def main(): usage = "usage: %prog [option] img_path" parser = OptionParser(usage) options, args = parser.parse_args() print('options', options) print('args', args) if len(args) != 1: parser.error('incorrect number of arguments') path = [args[0]] model = get_pretrained_model('densenet161').to(device) img_pil = list(map(pil_loader, path)) img_tensor = list(map(_preprocess, img_pil)) img_variable = torch.stack(img_tensor).to(device) p, x = gcam(model, img_variable) t = crop_heat(x, img_variable, threshold=0.63) print('Saving images at current directory...') print('orig.png ...') img_pil[0].save('./orig.png') print('boundbox.png ...') Image.fromarray( add_boundedbox(x[0], add_heatmap_ts(x[0], img_variable[0], need_transpose_color=False), threshold=0.63, need_transpose_color=True)).save('./boundbox.png') print('croped.png ...') tsimg2img(t[0]).save('./croped.png')
def main(): dls = dataloader.get_fold_loaders(k, BATCH_SIZE) for i, d in enumerate(dls): torch.save(d, "loaders/" + str(i) + ".pth") accs = [] norm_confmats = [] confmats = [] best_state_dict_init = torch.load("models/inits/best.pth") for fold in range(k): mod = model.get_pretrained_model(layer_names=setting["layers"], type_init=setting["init"]).to(device) mod.load_state_dict(best_state_dict_init) optim = model.get_optimizer(mod, feature_extract=True, lr=setting["lr"], mom=setting["mom"]) criterion = nn.CrossEntropyLoss() for e in range(EPOCHS): mod, valloss, _, confmat = traintest.trainepoch( mod, dls[fold], criterion, optim, device) valacc = get_acc_from_conf(confmat) if e == EPOCHS - 1: confmats.append(confmat) norm_confmat = normalize(confmat) norm_confmats.append(norm_confmat) accs.append(valacc) torch.save(mod.state_dict(), "models/folds/" + str(fold))
def main(): usage = "usage: %prog [option] img_path dir_path" parser = OptionParser(usage) parser.add_option('-d', '--draw', help='show the images', action='store_true', default=False, dest='draw') options, args = parser.parse_args() print('options', options) print('args', args) if len(args) != 2: parser.error('incorrect number of arguments') dataloaders, dataset_sizes = get_dataloaders(study_name=None, data_dir=args[1], batch_size=30, batch_eval_ten=15, shuffle=False) img_path = args[0] model = get_pretrained_model('densenet161').to(device) if options.draw: findTOP5pic(model, dataloaders, img_path) else: findTOP5addr(model, dataloaders, img_path)
def main(): usage = 'usage: %prog [options] data_dir' parser = OptionParser(usage) parser.add_option( '-p', '--phase', help= 'valid or train, assume the same directory structure with the training set, ' 'where there should be a file named <PHASE>.csv in the data directory', action='store', type='string', default='valid', dest='phase') parser.add_option( '-m', '--model', help= 'the model to evaluate, one of [densenet161, densenet169, resnet50, vgg19, agnet]', action='store', type='string', default='agnet', dest='model') parser.add_option('-s', '--study', help='for evaluating on a specific study', action='store', type='string', default='all', dest='study') parser.add_option('-d', '--draw', help='draw roc curves', action='store_true', default=False, dest='draw') options, args = parser.parse_args() print('options', options) print('args', args) if len(args) != 1: parser.error('incorrect number of arguments') data_dir = args[0] if not options.model in common.model_names: parser.error('Unknown model name') if not options.study in common.study_names + ['all']: parser.error('Unknown study name') study_name = None if options.study != 'all': study_name = options.study dataloader = any_dataloader( options.phase, study_name=study_name, data_dir=args[0], ) model = get_pretrained_model(options.model).to(device) evaluate(model, dataloader, options.phase, options.draw)
def run(setting, n, save_dir, folder, early_stop=True, split=0.75, init_dict=None): name = convert_to_name(setting) model_save_dir = save_dir + '/' history = { "loss": [], "acc": [], "normacc": [], "ipacc": [], "npacc": [], "confmat": [], "best_avg": 0 } mod = model.get_pretrained_model(layer_names=setting["layers"], type_init=setting["init"]).to(device) if init_dict is not None: mod.load_state_dict(init_dict) optim = model.get_optimizer(mod, feature_extract=True, lr=setting["lr"], mom=setting["mom"]) criterion = nn.CrossEntropyLoss() Path(model_save_dir + name + "/" + n).mkdir(parents=True, exist_ok=True) torch.save(mod.state_dict(), model_save_dir + name + "/" + n + '/epoch_0') stop = False if early_stop: dataloaders = dataloader.get_loaders(BATCH_SIZE, split) while not stop: print(stopcrit.checks) mod, valloss, valacc, confmat = traintest.trainepoch( mod, dataloaders, criterion, optim, device) #normalacc, ipacc, npacc = accs_from_confmat(confmat) history["loss"].append(valloss) history["acc"].append(valacc) #history["normacc"].append(normalacc) #history["ipacc"].append(ipacc) #history["npacc"].append(npacc) history["confmat"].append(confmat) stop = stopcrit.check(valacc, mod.state_dict()) else: dataloaders = dataloader.get_loaders(BATCH_SIZE, split) for epoch in range(EPOCHS): if split == 1.0: validate = False else: validate = True mod, valloss, valacc, confmat = traintest.trainepoch( mod, dataloaders, criterion, optim, device, validate) if valloss is not None: #normalacc, ipacc, npacc = accs_from_confmat(confmat) history["loss"].append(valloss) history["acc"].append(valacc) #history["normacc"].append(normalacc) #history["ipacc"].append(ipacc) #history["npacc"].append(npacc) history["confmat"].append(confmat) stop = stopcrit.check(valacc, mod.state_dict()) if split != 1.0: history["best_avg"] = stopcrit.last_avg torch.save( stopcrit.best_model_dict, model_save_dir + name + "/" + n + '/epoch_' + str(stopcrit.best_check)) plot_run(name, n, history, folder) best_acc = stopcrit.best_val best_epoch = stopcrit.best_check stopcrit.reset() else: torch.save(mod.state_dict(), model_save_dir + name + "/" + n + "/epoch_" + str(EPOCHS)) best_acc = None best_epoch = None return history, best_acc, best_epoch