# from api import prob_map_fcn as prob_map from api import prob_map_fcn_kb as prob_map # from api import prob_map_cls as prob_map from api import heat_map_fun from api import slide_fun import openslide import numpy as np import train_helper import os import torch from PIL import Image # import matplotlib.pyplot as plt cfg = config_fun.config() # torch.cuda.set_device(0) model = train_helper.get_model(cfg, load_param_from_ours=True) # model.cuda() model.eval() f = open(cfg.test_file, 'r') for s in f.readlines(): if s.split() == []: continue # file_name, label = s.split('*') file_name = s.split('\n')[0] if os.path.exists(file_name + '.heatmap.jpg'): continue try: data = slide_fun.AllSlide(file_name)
def main(): cfg = config_fun.config() best_prec1 = 0 # only used when we resume training from some checkpoint model resume_epoch = 0 if cfg.resume_training and os.path.exists(cfg.init_model_file): model = train_helper.get_model(cfg, load_param_from_ours=True) else: model = train_helper.get_model(cfg, pretrained=cfg.model_pretrain) print('model: ') print(model) # multiple gpu # model.cuda() # optimizer optimizer = optim.SGD(model.parameters(), cfg.lr, momentum=cfg.momentum, weight_decay=cfg.weight_decay) # if we load model from pretrained, we need the optim state here if cfg.resume_training and os.path.exists(cfg.optim_state_file): print('loading optim epoch prec from {0}'.format(cfg.optim_state_file)) optim_state = torch.load(cfg.optim_state_file) resume_epoch = optim_state['epoch'] + 1 best_prec1 = optim_state['best_prec1'] best_confusion_mat = optim_state['best_confusion_matrix'] optimizer.load_state_dict(optim_state['optim_state_best']) del optim_state criterion = nn.CrossEntropyLoss() print('shift model and criterion to GPU .. ') # model = model.cuda() # define loss function (criterion) and pptimizer criterion = criterion.cuda() train_loader = None val_loader = None if cfg.train_file_wise == False and cfg.train_slide_wise == False: train_loader = train_helper.get_dataloader(True, cfg.train_patch_frac, cfg) val_loader = train_helper.get_dataloader(False, cfg.val_patch_frac, cfg) for epoch in range(resume_epoch, cfg.max_epoch): if cfg.train_slide_wise: # train_helper.train_slide_wise(train, model, criterion, optimizer, epoch, cfg) prec1, confusion_mat = train_helper.validate_slide_wise( validate, model, criterion, epoch, cfg) elif cfg.train_file_wise: train_helper.train_file_wise(train, model, criterion, optimizer, epoch, cfg) prec1, confusion_mat = train_helper.validate_file_wise( validate, model, criterion, epoch, cfg) else: train(train_loader, model, criterion, optimizer, epoch, cfg) prec1, confusion_mat = validate(val_loader, model, criterion, epoch, cfg) if best_prec1 < prec1: # save checkpoints best_prec1 = prec1 best_confusion_mat = confusion_mat train_helper.save_model_and_optim(cfg, model, optimizer, epoch, best_prec1, best_confusion_mat) print('best accuracy: ', best_prec1) print('best confusion matrix:') print(best_confusion_mat)
def main(): cfg = config.config() best_prec1 = 0 # only used when we resume training from some checkpoint models resume_epoch = 0 if cfg.resume_training and os.path.exists( cfg.modelnet_init_cls_model_file): model = train_helper.get_model(cfg, dataset='modelnet40', resume=True) else: model = train_helper.get_model(cfg, dataset='modelnet40') print('models: ') print(model) # multiple gpu # models.cuda() # optimizer optimizer = optim.SGD(model.parameters(), cfg.lr, momentum=cfg.momentum, weight_decay=cfg.weight_decay) # if we load models from pretrained, we need the optim state here if cfg.resume_training and os.path.exists( cfg.modelnet_init_cls_optim_file): print('loading optim epoch prec from {0}'.format( cfg.modelnet_init_cls_optim_file)) optim_state = torch.load(cfg.modelnet_init_cls_optim_file) resume_epoch = optim_state['epoch'] + 1 best_prec1 = optim_state['best_prec1'] best_confusion_mat = optim_state['best_confusion_matrix'] optimizer.load_state_dict(optim_state['optim_state_best']) del optim_state criterion = nn.CrossEntropyLoss() print('shift models and criterion to GPU .. ') # models = models.cuda() # define loss function (criterion) and pptimizer criterion = criterion.cuda() train_data = point_datasets.point_modelnet40_Dataset_cls(mode='train') val_data = point_datasets.point_modelnet40_Dataset_cls(mode='test') train_loader = torch.utils.data.DataLoader(train_data, batch_size=cfg.batch_size, shuffle=True, num_workers=int(cfg.workers)) val_loader = torch.utils.data.DataLoader(val_data, batch_size=cfg.batch_size, shuffle=True, num_workers=int(cfg.workers)) for epoch in range(resume_epoch, cfg.max_epoch): train(train_loader, model, criterion, optimizer, epoch, cfg) prec1, confusion_mat = validate(val_loader, model, criterion, epoch, cfg) if best_prec1 < prec1: # save checkpoints best_prec1 = prec1 best_confusion_mat = confusion_mat train_helper.save_model_and_optim(cfg, model, optimizer, epoch, best_prec1, best_confusion_mat) print('best accuracy: ', best_prec1) print('best confusion matrix:') print(best_confusion_mat)
#!/usr/bin/env python import train_helper n_epochs = 20 batch_size = 32 dataloaders, class_names = train_helper.load_data('../../data', batch_size=batch_size) model, criterion, optimizer, scheduler = train_helper.get_model( dataloaders, n_epochs) model = train_helper.train_model(model, criterion, optimizer, scheduler, dataloaders, n_epochs=n_epochs) train_helper.save_model(model, 'model.pt') train_helper.save_mobile_model(model, 'model_mobile.pt')
from api import config_fun # from api import prob_map_fcn as prob_map from api import prob_map_fcn_kb as prob_map # from api import prob_map_cls as prob_map from api import heat_map_fun from api import slide_fun import openslide import numpy as np import train_helper import os import torch from PIL import Image cfg = config_fun.config() # torch.cuda.set_device(int(cfg.gpu_id)) model = train_helper.get_model(cfg, load_param_from_folder=True) model.cuda() model.eval() f = open(cfg.test_file, 'r') for s in f.readlines(): if s.split() == []: continue file_name = s.split('\n')[0] if os.path.exists(filename + '.heatmap.jpg'): continue try: data = slide_fun.AllSlide(file_name) level = data.level_count - 1