import pandas as pd import torch import pickle from config import config from utils.utils import get_device, generate_embeddings, build_annoy_index from model.model import SentenceTransformer from data.data import SentenceTransformerDataset from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler device = get_device() # device = 'cpu' df = pd.read_csv('inputs/data.csv') texts = df['text'].values labels = df['label'].values model = SentenceTransformer().to(device) batch_dataset = SentenceTransformerDataset(text=texts, target=labels) batch_data_loader = torch.utils.data.DataLoader( batch_dataset, sampler=SequentialSampler(batch_dataset), batch_size=config.BATCH_SIZE, num_workers=4) print("Generating embeddings") embeddings, texts, labels = generate_embeddings(batch_data_loader, model, device) with open('embeddings.pkl', 'wb') as f: pickle.dump(embeddings, f)
help='The name of the trainin json file to load.') parser.add_argument( '--upsample_multiplier', type=int, default=0, help= 'Multiplier used to increase the amount of confounders in training data' ) parser.add_argument('--note', type=str, default='', help='Add a note that can be seen in wandb') args, unparsed = parser.parse_known_args() config = args.__dict__ wandb.config.update(config) config['device'] = get_device() config['n_classes'] = 2 if config['loss_func'] == 'ce' else 1 # Check all provided paths: if not os.path.exists(config['data_path']): raise ValueError("[!] ERROR: Dataset path does not exist") else: LOGGER.info("Data path checked..") if not os.path.exists(config['model_path']): LOGGER.warning( "Creating checkpoint path for saved models at: {}\n".format( config['model_path'])) os.makedirs(config['model_path']) else: LOGGER.info("Model save path checked..") if 'config' in config:
def train(config): cfg, cfg_data, cfg_model, cfg_optim = read_config(config) device, n_gpu = utils.get_device() utils.set_seeds(cfg.seed, n_gpu) train_batch_size = int(cfg_optim.train_batch_size / cfg_optim.gradient_accumulation_steps) processor = get_class(cfg.task.lower()) tokenizer = BertTokenizer.from_pretrained(cfg.bert_model, do_lower_case=cfg.do_lower_case) train_examples = None num_train_steps = None if cfg.do_train: train_examples = processor.get_train_examples(cfg_data.data_dir) num_train_steps = int( len(train_examples) / train_batch_size / cfg_optim.gradient_accumulation_steps * cfg_optim.num_train_epochs) label_list = processor.get_labels() # Prepare model print(PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(-1)) model = BertForSequenceClassification.from_pretrained( cfg.bert_model, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(-1), num_labels=len(label_list)) model.to(device) # Prepare optimizer if cfg_optim.optimize_on_cpu: param_optimizer = [(n, param.clone().detach().to('cpu').requires_grad_()) \ for n, param in model.named_parameters()] else: param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0 }] t_total = num_train_steps optimizer = BertAdam(optimizer_grouped_parameters, lr=cfg_optim.learning_rate, warmup=cfg_optim.warmup_proportion, t_total=t_total) global_step = 0 if cfg.do_train: train_features = convert_examples_to_features(train_examples, label_list, cfg_optim.max_seq_length, tokenizer, show_exp=False) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", train_batch_size) logger.info(" Num steps = %d", num_train_steps) train_dataloader = convert_features_to_tensors(train_features, train_batch_size) model.train() best_score = 0 flags = 0 for _ in trange(int(cfg_optim.num_train_epochs), desc="Epoch"): for step, batch in enumerate( tqdm(train_dataloader, desc="Iteration")): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch loss = model(input_ids, segment_ids, input_mask, label_ids) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if cfg_optim.fp16 and cfg_optim.loss_scale != 1.0: # rescale loss for fp16 training # see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html loss = loss * cfg_optim.loss_scale if cfg_optim.gradient_accumulation_steps > 1: loss = loss / cfg_optim.gradient_accumulation_steps loss.backward() if (step + 1) % cfg_optim.gradient_accumulation_steps == 0: if cfg_optim.optimize_on_cpu: if cfg_optim.fp16 and cfg_optim.loss_scale != 1.0: # scale down gradients for fp16 training for param in model.parameters(): if param.grad is not None: param.grad.data = param.grad.data / cfg_optim.loss_scale is_nan = utils.set_optimizer_params_grad( param_optimizer, model.named_parameters(), test_nan=True) if is_nan: logger.info( "FP16 TRAINING: Nan in gradients, reducing loss scaling" ) cfg_optim.loss_scale = cfg_optim.loss_scale / 2 model.zero_grad() continue optimizer.step() utils.copy_optimizer_params_to_model( model.named_parameters(), param_optimizer) else: optimizer.step() model.zero_grad() f1 = evaluate(model, processor, cfg_optim, label_list, tokenizer, device) if f1 > best_score: best_score = f1 print('*f1 score = {}'.format(f1)) flags = 0 checkpoint = {'state_dict': model.state_dict()} torch.save(checkpoint, cfg_optim.model_save_pth) else: print('f1 score = {}'.format(f1)) flags += 1 if flags >= 6: break model.load_state_dict(torch.load(cfg.model_save_pth)['state_dict']) test(model, processor, cfg_optim, label_list, tokenizer, device)
import os import torch from torchvision import transforms, models from torch.utils.tensorboard import SummaryWriter from datasets.FASDataset import FASDataset from utils.transform import RandomGammaCorrection from utils.utils import read_cfg, get_optimizer, get_device, build_network from trainer.FASTrainer import FASTrainer from models.loss import DepthLoss from torch.optim.lr_scheduler import StepLR cfg = read_cfg(cfg_file="config/CDCNpp_adam_lr1e-3.yaml") device = get_device(cfg) network = build_network(cfg) optimizer = get_optimizer(cfg, network) lr_scheduler = StepLR(optimizer=optimizer, step_size=30, gamma=0.1) criterion = DepthLoss(device=device) writer = SummaryWriter(cfg['log_dir']) dump_input = torch.randn( (1, 3, cfg['model']['input_size'][0], cfg['model']['input_size'][1])) writer.add_graph(network, dump_input) train_transform = transforms.Compose([
def __init__(self, n_class, arch, use_CBAM=False): super(ISICModel_singleview_reid, self).__init__() self.mode = 'singleview_reid' cfg = gl.get_value('cfg') self.cfg = cfg if arch == 'resnet50': if cfg.MODEL.USE_ADL is True: model_backbone = resnet50_adl( pretrained=True, num_classes=n_class, ADL_position=cfg.MODEL.ADL_POSITION, drop_rate=cfg.MODEL.ADLRATE, drop_thr=cfg.MODEL.ADLTHR) else: model_backbone = models.resnet50(pretrained=True) #in_features = 4096 self.backbone = (nn.Sequential( *list(model_backbone.children())[:-2])) self.backbone_lc = nn.ReLU(inplace=True) #skip elif arch == 'sk_resnet50': model_backbone = sk_resnet50(pretrained=True) #in_features = 4096 self.backbone = (nn.Sequential( *list(model_backbone.children())[:-2])) self.backbone_lc = nn.ReLU(inplace=True) #skip elif arch == 'resnet50d': model_backbone = resnet50d(pretrained=True) #in_features = 4096 self.backbone = (nn.Sequential( *list(model_backbone.children())[:-2])) self.backbone_lc = nn.ReLU(inplace=True) #skip elif arch == 'sge_resnet50': model_backbone = sge_resnet50(pretrained=True) #in_features = 4096 self.backbone = (nn.Sequential( *list(model_backbone.children())[:-2])) self.backbone_lc = nn.ReLU(inplace=True) #skip elif arch == 'resnext50_32x4d': model_backbone = models.resnext50_32x4d(pretrained=True) self.backbone = (nn.Sequential( *list(model_backbone.children())[:-2])) self.backbone_lc = nn.ReLU(inplace=True) #skip elif arch == 'se_resnext50': model_backbone = SENet(block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, last_stride=2) param_dict = torch.load( '../models/se_resnext50_32x4d-a260b3a4.pth') for i in param_dict: if 'classifier' in i or 'last_linear' in i: continue model_backbone.state_dict()[i].copy_(param_dict[i]) self.backbone = model_backbone #(nn.Sequential(*list(model_backbone.children())[:-3]) ) self.backbone_lc = nn.ReLU(inplace=True) #skip elif arch == 'effnetb4': model_backbone = EfficientNet.from_pretrained('efficientnet-b4') self.backbone = model_backbone #(nn.Sequential(*list(model_backbone.children())[:-3]) ) self.backbone_lc = nn.ReLU(inplace=True) #skip self.imfeat_dim = cfg.MODEL.IMG_FCS #(4096,512) self.use_fc = cfg.MODEL.REID_USE_FC self.num_classes = n_class self.pdrop_lin = cfg.MODEL.REID_PDROP_LIN self.neck_feat = cfg.MODEL.REID_NECK_FEAT if self.use_fc is True: self.in_planes = self.imfeat_dim[1] self.after_backbone = nn.Sequential( layers.AvgPool(), nn.Dropout(p=self.pdrop_lin), nn.Linear(self.imfeat_dim[0] // 2, self.in_planes, bias=False)) self.bottleneck = nn.BatchNorm1d(self.imfeat_dim[1]) self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False) else: self.in_planes = self.imfeat_dim[0] // 2 self.after_backbone = layers.AvgPool() self.bottleneck = nn.BatchNorm1d(self.in_planes) self.classifier = nn.Sequential( nn.Dropout(p=self.pdrop_lin), nn.Linear(self.in_planes, self.num_classes, bias=False)) self.bottleneck.bias.requires_grad_(False) # no shift self.center_feat = torch.zeros(n_class, self.in_planes) device = get_device(self.cfg) self.center_feat = self.center_feat.to(device) #self.head_im = nn.Sequential(self.after_backbone,self.bottleneck) init_cnn(self.after_backbone) init_cnn(self.bottleneck) init_cnn(self.classifier) self.meta_fc = nn.ReLU(inplace=True) self.final_conv = nn.ReLU(inplace=True) #skip #if cfg.MODEL.BACKBONE_PRETRAIN_PATH is not None and os.path.exists(cfg.MODEL.BACKBONE_PRETRAIN_PATH): # self.backbone.load_state_dict(torch.load(cfg.MODEL.BACKBONE_PRETRAIN_PATH)) if cfg.MODEL.PRETRAIN_PATH is not None and os.path.exists( cfg.MODEL.PRETRAIN_PATH): self.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH))
def main(cfg, model_cfg): # Load Configuration cfg = configuration.params.from_json(cfg) # Train or Eval cfg model_cfg = configuration.model.from_json(model_cfg) # BERT_cfg set_seeds(cfg.seed) # Load Data & Create Criterion data = load_data(cfg) if cfg.uda_mode: unsup_criterion = nn.KLDivLoss(reduction='none') data_iter = [data.sup_data_iter(), data.unsup_data_iter()] if cfg.mode=='train' \ else [data.sup_data_iter(), data.unsup_data_iter(), data.eval_data_iter()] # train_eval else: data_iter = [data.sup_data_iter()] sup_criterion = nn.CrossEntropyLoss(reduction='none') # Load Model model = models.Classifier(model_cfg, len(data.TaskDataset.labels)) # Create trainer trainer = train.Trainer(cfg, model, data_iter, optim.optim4GPU(cfg, model), get_device()) # Training def get_loss(model, sup_batch, unsup_batch, global_step): # logits -> prob(softmax) -> log_prob(log_softmax) # batch input_ids, segment_ids, input_mask, label_ids = sup_batch if unsup_batch: ori_input_ids, ori_segment_ids, ori_input_mask, \ aug_input_ids, aug_segment_ids, aug_input_mask = unsup_batch input_ids = torch.cat((input_ids, aug_input_ids), dim=0) segment_ids = torch.cat((segment_ids, aug_segment_ids), dim=0) input_mask = torch.cat((input_mask, aug_input_mask), dim=0) # logits logits = model(input_ids, segment_ids, input_mask) # sup loss sup_size = label_ids.shape[0] sup_loss = sup_criterion(logits[:sup_size], label_ids) # shape : train_batch_size if cfg.tsa: tsa_thresh = get_tsa_thresh(cfg.tsa, global_step, cfg.total_steps, start=1. / logits.shape[-1], end=1) larger_than_threshold = torch.exp( -sup_loss ) > tsa_thresh # prob = exp(log_prob), prob > tsa_threshold # larger_than_threshold = torch.sum( F.softmax(pred[:sup_size]) * torch.eye(num_labels)[sup_label_ids] , dim=-1) > tsa_threshold loss_mask = torch.ones_like(label_ids, dtype=torch.float32) * ( 1 - larger_than_threshold.type(torch.float32)) sup_loss = torch.sum(sup_loss * loss_mask, dim=-1) / torch.max( torch.sum(loss_mask, dim=-1), torch_device_one()) else: sup_loss = torch.mean(sup_loss) # unsup loss if unsup_batch: # ori with torch.no_grad(): ori_logits = model(ori_input_ids, ori_segment_ids, ori_input_mask) ori_prob = F.softmax(ori_logits, dim=-1) # KLdiv target # ori_log_prob = F.log_softmax(ori_logits, dim=-1) # confidence-based masking if cfg.uda_confidence_thresh != -1: unsup_loss_mask = torch.max( ori_prob, dim=-1)[0] > cfg.uda_confidence_thresh unsup_loss_mask = unsup_loss_mask.type(torch.float32) else: unsup_loss_mask = torch.ones(len(logits) - sup_size, dtype=torch.float32) unsup_loss_mask = unsup_loss_mask.to(_get_device()) # aug # softmax temperature controlling uda_softmax_temp = cfg.uda_softmax_temp if cfg.uda_softmax_temp > 0 else 1. aug_log_prob = F.log_softmax(logits[sup_size:] / uda_softmax_temp, dim=-1) # KLdiv loss """ nn.KLDivLoss (kl_div) input : log_prob (log_softmax) target : prob (softmax) https://pytorch.org/docs/stable/nn.html unsup_loss is divied by number of unsup_loss_mask it is different from the google UDA official The offical unsup_loss is diviede by total https://github.com/google-research/uda/blob/master/text/uda.py#L175 """ unsup_loss = torch.sum(unsup_criterion(aug_log_prob, ori_prob), dim=-1) unsup_loss = torch.sum( unsup_loss * unsup_loss_mask, dim=-1) / torch.max( torch.sum(unsup_loss_mask, dim=-1), torch_device_one()) final_loss = sup_loss + cfg.uda_coeff * unsup_loss return final_loss, sup_loss, unsup_loss return sup_loss, None, None # evaluation def get_acc(model, batch): # input_ids, segment_ids, input_mask, label_id, sentence = batch input_ids, segment_ids, input_mask, label_id = batch logits = model(input_ids, segment_ids, input_mask) _, label_pred = logits.max(1) result = (label_pred == label_id).float() accuracy = result.mean() # output_dump.logs(sentence, label_pred, label_id) # output dump return accuracy, result if cfg.mode == 'train': trainer.train(get_loss, None, cfg.model_file, cfg.pretrain_file) if cfg.mode == 'train_eval': trainer.train(get_loss, get_acc, cfg.model_file, cfg.pretrain_file) if cfg.mode == 'eval': results = trainer.eval(get_acc, cfg.model_file, None) total_accuracy = torch.cat(results).mean().item() print('Accuracy :', total_accuracy)
for i in range(n_images*n_images): y_a = NC.G(x_A, x_B, train=False, a_c=a_c_[i].to(device).unsqueeze(0), a_s=a_s_[i].to(device).unsqueeze(0)) file_path = path + '_' + str(a_s[i]) + '_' + str(a_c[i]) + ext save_image(normalize_tensor(y_a), file_path) else: # basic transition a = torch.linspace(start=0, end=1.0, steps=n_images) # y_a = torch.zeros(n_images, 3, config['img_size'], config['img_size']) for i in range(n_images): y_a = NC.G(x_A, x_B, train=False, a_c=a[i].to(device).unsqueeze(0), a_s=a[i].to(device).unsqueeze(0)) file_path = path + '_' + str(a[i]) + ext save_image(normalize_tensor(y_a), file_path) save_image(normalize_tensor(x_A), out_dir + '/x_A' + ext) save_image(normalize_tensor(x_B), out_dir + '/x_B' + ext) print('Generated images are saved at %s' % out_dir) # y_a = vutils.make_grid(y_a, # nrow=n_images, # normalize=True, # scale_each=True) # save_image(y_a, out_path) if __name__ == '__main__': args, config = options() device, _ = get_device(args) test(args, config)
def init_processes(rank, size, args, fn, port, backend='gloo'): """ Initialize the distributed environment. """ os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = str(port) dist.init_process_group(backend, rank=rank, world_size=size) fn(args) if __name__ == "__main__": args = argparser.parse_args() size = args.world_size # Force CPU backend = 'gloo' if get_device('cpu') == 'cpu' else 'nccl' processes = [] # https://stackoverflow.com/questions/3671666/sharing-a-complex-object-between-python-processes BaseManager.register('ExponentialMovingAvg', ExponentialMovingAvg) BaseManager.register('TBWrapper', TBWrapper) manager = BaseManager() manager.start() reward_ema = manager.ExponentialMovingAvg(args.reward_eam_factor) writer = manager.TBWrapper(experiment_name) vanilla_policy_gradient_mt = partial(vanilla_policy_gradient, reward_ema=reward_ema, writer=writer) for rank in range(size): p = Process(target=init_processes,
def train(model: PlantModel, optimizer, criterion, lr_scheduler, data_loader: DataLoader, data_loader_test: DataLoader, num_epochs: int = 10, use_cuda: bool = True, epoch_save_ckpt: Union[int, list] = None, dir: str = None): """ Method to train FasterRCNN_SaladFruit model. Args: data_loader (torch.utils.data.DataLoader): data loader to train model on data_loader_test (torch.utils.data.DataLoader): data loader to evaluate model on num_epochs (int = 10): number of epoch to train model use_cuda (bool = True): use cuda or not epoch_save_ckpt (list or int): Epoch at which you want to save the model. If -1 save only last epoch. dir (str = "models/): Directory where model are saved under the name "{model_name}_{date}_ep{epoch}.pth" """ if epoch_save_ckpt == -1: epoch_save_ckpt = [num_epochs - 1] if not dir: dir = "checkpoints" dir = Path(dir) dir.mkdir(parents=True, exist_ok=True) # choose device device = get_device(use_cuda) print(f"Using device {device.type}") # define dataset model.to(device) writer = SummaryWriter("logs") metric_logger_train = MetricLogger(delimiter=" ") # writer_test = SummaryWriter("runs/test") # metric_logger_test = MetricLogger(delimiter=" ", writer=writer_test) for epoch in metric_logger_train.log_every(range(num_epochs), print_freq=1, epoch=0, header="Training"): # train for one epoch, printing every 50 iterations train_metric = train_one_epoch(model, optimizer, data_loader, criterion, device, epoch, print_freq=40) # metric_logger_train.update(**train_metric) # update the learning rate lr_scheduler.step() # evaluate on the test dataset test_metric = evaluate(model, criterion, data_loader_test, device=device) # print results print_result_table(train_metric, test_metric) # metric_logger_test.update(**test_metric) for key in train_metric.keys(): writer.add_scalars("metrics/{}".format(key), { "{}_train".format(key, key): train_metric[key], "{}_test".format(key, key): test_metric[key], }, global_step=epoch) # save checkpoint if epoch in epoch_save_ckpt: save_checkpoint(model, optimizer, dir.as_posix(), epoch) writer.close() print("That's it!")
# LR Scheduler lr_scheduler = torch.optim.lr_scheduler.StepLR( optimizer=optimizer, step_size=cfg.LR_SCHED_STEP_SIZE, gamma=cfg.LR_SCHED_GAMMA) # Build data loaders data_loader, data_loader_test = build_loaders(args) # Loss weights = None if args.weighted_loss: weights = torch.zeros_like(data_loader.dataset[0][1]) for _, label in data_loader.dataset: weights += label weights = torch.as_tensor(1.0 / (weights / torch.min(weights)), device=get_device(args.use_cuda)) criterion = torch.nn.BCEWithLogitsLoss(weight=weights) print("Start training") train(model, optimizer, criterion, lr_scheduler, data_loader, data_loader_test, num_epochs=args.epochs, use_cuda=args.use_cuda, epoch_save_ckpt=args.checkpoints, dir=args.checkpoints_dir)
def test_tta(cfg, model, ds, criterion, nf): #epoch_loss,epoch_acc,pred_out = test_tta(cfg, model, valid_loader,criterion,nf) #ds, net, criterion, device,epoch = -1,n_tta = 10,n_class = 4 model.eval() device = get_device(cfg) logger = gl.get_value('logger') if cfg.DATASETS.K_FOLD == 1: best_model_fn = osp.join(cfg.MISC.OUT_DIR, f"{cfg.MODEL.NAME}-best.pth") else: best_model_fn = osp.join(cfg.MISC.OUT_DIR, f"{cfg.MODEL.NAME}-Fold-{nf}-best.pth") model.load_state_dict(torch.load(best_model_fn)) n_tta = cfg.MISC.N_TTA n_class = cfg.DATASETS.NUM_CLASS # in tta, default batch size =1 n_case = 0.0 y_true = list() y_pred = list() total_loss = AvgerageMeter() PREDS_ALL = [] PREDS_ALL_TTA = [] for idx in tqdm(range(len(ds))): #print(images.shape) with torch.no_grad(): # if cfg.MISC.TTA_MODE in ['mean','mean_softmax']: # pred_sum = torch.zeros((n_class),dtype = torch.float32) # else: # pred_sum = torch.ones((n_class),dtype = torch.float32) # #for n_t in range(n_tta): images, labels, meta_infos = parse_batch(ds[idx]) y_true.append(labels.item()) images = images.to(device) if meta_infos is not None: meta_infos = meta_infos.to(device) if meta_infos.dim() == 1: meta_infos = meta_infos[None, ...] if images.dim() > 3 and meta_infos.size(0) == 1: meta_infos = meta_infos.repeat(images.size(0), 1) labels = labels.to(device) labels = labels[None, ...] if images.dim() == 3: images = images[None, ...] if 'SingleView' in cfg.MODEL.NAME or 'SVBNN' in cfg.MODEL.NAME: outputs = model(images) elif model.mode == 'metasingleview': outputs = model(images, meta_infos) elif model.mode in ['sv_att', 'sv_db']: outputs = model(images, labels) if cfg.MISC.ONLY_TEST is False and cfg.DATASETS.NAMES == 'ISIC': loss = criterion(outputs, labels) total_loss.update(loss.item()) #if cfg.MODEL.LOSS_TYPE == 'pcs': #probs_0 = pcsoftmax(outputs,weight = torch.tensor(cfg.DATASETS.LABEL_W),dim=1)[0].cpu() #else: if isinstance(outputs, (list, tuple)): probs_0 = 0.5 * (F.softmax(outputs[0], dim=1)[0] + F.softmax(outputs[1], dim=1)[0]).cpu() else: if 'softmax' in cfg.MISC.TTA_MODE: probs_0 = outputs.cpu().numpy() else: probs_0 = F.softmax(outputs, dim=-1).cpu().numpy() #save outputs result #if cfg.MISC.ONLY_TEST is True: PREDS_ALL_TTA.append(outputs.cpu().numpy()) if cfg.MISC.TTA_MODE in ['mean', 'mean_softmax']: pred_sum = np.mean(probs_0, axis=0) else: pred_sum = np.prod(probs_0, axis=0) pred_sum = np.power(pred_sum, 1.0 / n_tta) n_case += 1 probs = np.round_(pred_sum, decimals=4) preds = np.argmax(pred_sum) y_pred.append(preds) if cfg.MISC.ONLY_TEST is False: PREDS_ALL.append([*probs, preds, int(labels.item())]) else: PREDS_ALL.append([*probs, preds]) PREDS_ALL = np.array(PREDS_ALL) PREDS_ALL_TTA = np.array(PREDS_ALL_TTA) #avg_acc = (PREDS_ALL[:,-2] == PREDS_ALL[:,-1]).sum()/n_case np.set_printoptions(precision=4) if cfg.MISC.ONLY_TEST is False: pred_stat = calc_stat(y_pred, y_true) logger.info(f"Valid K-fold: {nf}") if n_class <= 10: logger.info('confusion matix\n') cm = pred_stat['cm'] logger.info('{}\n'.format(cm)) logger.info("Num All Class: {}".format(np.sum(cm, axis=1))) logger.info("Acc All Class1: {}".format(pred_stat['cls_acc1'])) logger.info("Acc All Class2: {}".format(pred_stat['cls_acc2'])) logger.info("Acc All Class3: {}".format(pred_stat['cls_acc3'])) logger.info( f"Balance Acc 1 2 3 : {pred_stat['bal_acc1']:.4f} {pred_stat['bal_acc2']:.4f} {pred_stat['bal_acc3']:.4f}" ) logger.info(f"Average Loss: {total_loss.avg:.4f}, " + f"Average Acc: {pred_stat['avg_acc']}") return total_loss.avg, pred_stat['bal_acc1'], PREDS_ALL, PREDS_ALL_TTA else: return PREDS_ALL, PREDS_ALL_TTA
def test_tta_heatmap(cfg, model, ds, criterion, nf): #epoch_loss,epoch_acc,pred_out = test_tta(cfg, model, valid_loader,criterion,nf) #ds, net, criterion, device,epoch = -1,n_tta = 10,n_class = 4 # cfg.MISC.CALC_HEATMAP is True (Path(cfg.MISC.OUT_DIR) / 'heatmap').mkdir(exist_ok=True) model.eval() device = get_device(cfg) logger = gl.get_value('logger') if cfg.DATASETS.K_FOLD == 1: best_model_fn = osp.join(cfg.MISC.OUT_DIR, f"{cfg.MODEL.NAME}-best.pth") else: best_model_fn = osp.join(cfg.MISC.OUT_DIR, f"{cfg.MODEL.NAME}-Fold-{nf}-best.pth") model.load_state_dict(torch.load(best_model_fn)) n_tta = cfg.MISC.N_TTA n_class = cfg.DATASETS.NUM_CLASS # in tta, default batch size =1 n_case = 0.0 y_true = list() y_pred = list() total_loss = AvgerageMeter() PREDS_ALL = [] PREDS_ALL_TTA = [] for idx in tqdm(range(len(ds))): #print(images.shape) fn = ds.flist[idx] img_ori = cv2.imread(fn) img_ori = cv2.cvtColor(img_ori, cv2.COLOR_BGR2RGB) hh_ori, ww_ori, _ = img_ori.shape images, labels, meta_infos, aug_trans = parse_batch(ds[idx]) y_true.append(labels.item()) images = images.to(device) if meta_infos is not None: meta_infos = meta_infos.to(device) if meta_infos.dim() == 1: meta_infos = meta_infos[None, ...] if images.dim() > 3 and meta_infos.size(0) == 1: meta_infos = meta_infos.repeat(images.size(0), 1) labels = labels.to(device) labels = labels[None, ...] if images.dim() == 3: images = images[None, ...] if 'SingleView' in cfg.MODEL.NAME or 'SVBNN' in cfg.MODEL.NAME: outputs = model(images) elif model.mode == 'metasingleview': outputs = model(images, meta_infos) elif model.mode in ['sv_att', 'sv_db']: outputs = model(images, labels) if cfg.MISC.ONLY_TEST is False and cfg.DATASETS.NAMES == 'ISIC': loss = criterion(outputs, labels) total_loss.update(loss.item()) #if cfg.MODEL.LOSS_TYPE == 'pcs': #probs_0 = pcsoftmax(outputs,weight = torch.tensor(cfg.DATASETS.LABEL_W),dim=1)[0].cpu() #else: if isinstance(outputs, (list, tuple)): probs_0 = 0.5 * (F.softmax(outputs[0], dim=1)[0] + F.softmax(outputs[1], dim=1)[0]).cpu() else: if 'softmax' in cfg.MISC.TTA_MODE: probs_0 = outputs else: probs_0 = F.softmax(outputs, dim=-1) #save outputs result #if cfg.MISC.ONLY_TEST is True: PREDS_ALL_TTA.append(outputs.detach().cpu().numpy()) probs = probs_0.detach().cpu().numpy() if cfg.MISC.TTA_MODE in ['mean', 'mean_softmax']: pred_sum = np.mean(probs, axis=0) else: pred_sum = np.prod(probs, axis=0) pred_sum = np.power(pred_sum, 1.0 / n_tta) n_case += 1 probs = np.round_(pred_sum, decimals=4) preds = np.argmax(pred_sum) y_pred.append(preds) if cfg.MISC.ONLY_TEST is False: PREDS_ALL.append([*probs, preds, int(labels.item())]) else: PREDS_ALL.append([*probs, preds]) # heatmap probs_0 = torch.mean(probs_0, dim=0) probs_0[preds].backward() gradients_IMG = model.get_activations_gradient_IMG() #gradients_META = model.get_activations_gradient_META() # pool the gradients across the channels pooled_gradients_IMG = torch.mean(gradients_IMG, dim=[0, 2, 3]) #pooled_gradients_LAT = torch.mean(gradients_LAT, dim=[0, 2, 3]) #pooled_gradients_AP = torch.mean(torch.abs(gradients_AP), dim=[0, 2, 3]) #pooled_gradients_LAT = torch.mean(torch.abs(gradients_LAT), dim=[0, 2, 3]) # get the activations of the last layer activations_IMG = model.get_activations_IMG(images).detach() #activations_LAT = model.get_activations_LAT(img).detach() # weight the channels by corresponding gradients for i in range(pooled_gradients_IMG.shape[0]): activations_IMG[:, i, :, :] *= pooled_gradients_IMG[i] #activations_LAT[:, i, :, :] *= pooled_gradients_LAT[i] # average the channels of the activations heatmap_IMG = torch.mean(activations_IMG, dim=1).squeeze().cpu() #heatmap_LAT = torch.mean(activations_LAT, dim=1).squeeze().cpu() # relu on top of the heatmap #heatmap_IMG = np.maximum(heatmap_IMG, 0) heatmap_IMG = F.relu(heatmap_IMG) #heatmap_LAT = np.maximum(heatmap_LAT, 0) # normalize the heatmap heatmap_IMG /= torch.max(heatmap_IMG) #heatmap_LAT /= torch.max(heatmap_LAT) #heatmap_AP *= (heatmap_AP>0.4).float() #heatmap_LAT *= (heatmap_LAT>0.4).float() hms = heatmap_IMG.cpu().numpy() img_w_hm = np.zeros((hh_ori, ww_ori), dtype='float32') img_n_hm = np.zeros((hh_ori, ww_ori), dtype='float32') + 0.00001 # HM for hm, trans in zip(hms, aug_trans): hm_imin = cv2.resize(hm, (images.shape[3], images.shape[2])) img_w_hm += cv2.warpAffine(hm_imin, trans, (ww_ori, hh_ori), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) img_n_hm += cv2.warpAffine(np.ones_like(hm_imin), trans, (ww_ori, hh_ori), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) hm_out = img_w_hm / img_n_hm hm_out = hm_out * ((hm_out > 0.25).astype('float32')) hm_out0 = np.uint8(255 * hm_out) #hm_out = cv2.applyColorMap(hm_out, cv2.COLORMAP_JET) #superimposed_img_AP = hm_out * 0.4 + img_ori[:,:,::-1] hm_out = cv2.applyColorMap( hm_out0, cv2.COLORMAP_JET) * np.uint8(hm_out0[..., None] > 0.25) superimposed_img_AP = hm_out * 0.4 + img_ori[:, :, ::-1] #alpha = 0.5 #superimposed_img_AP = cv2.addWeighted(img_ori, alpha, hm_out, 1 - alpha, 0) #superimposed_img_AP = superimposed_img_AP[:,:,::-1] label_str = Path( fn).stem + ' ' + cfg.DATASETS.DICT_LABEL[preds] + ' prob = ' + str( probs[preds]) #cv2.rectangle(superimposed_img_AP, (0, 0), (200, 40), (0, 0, 0), -1) cv2.putText(superimposed_img_AP, label_str, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 2) fn_heatmap = Path(cfg.MISC.OUT_DIR) / 'heatmap' / ( Path(fn).stem + '_' + cfg.DATASETS.DICT_LABEL[preds] + '.jpg') cv2.imwrite(str(fn_heatmap), superimposed_img_AP) PREDS_ALL = np.array(PREDS_ALL) PREDS_ALL_TTA = np.array(PREDS_ALL_TTA) #avg_acc = (PREDS_ALL[:,-2] == PREDS_ALL[:,-1]).sum()/n_case np.set_printoptions(precision=4) if cfg.MISC.ONLY_TEST is False: pred_stat = calc_stat(y_pred, y_true) logger.info(f"Valid K-fold: {nf}") if n_class <= 10: logger.info('confusion matix\n') cm = pred_stat['cm'] logger.info('{}\n'.format(cm)) logger.info("Num All Class: {}".format(np.sum(cm, axis=1))) logger.info("Acc All Class1: {}".format(pred_stat['cls_acc1'])) logger.info("Acc All Class2: {}".format(pred_stat['cls_acc2'])) logger.info("Acc All Class3: {}".format(pred_stat['cls_acc3'])) logger.info( f"Balance Acc 1 2 3 : {pred_stat['bal_acc1']:.4f} {pred_stat['bal_acc2']:.4f} {pred_stat['bal_acc3']:.4f}" ) logger.info(f"Average Loss: {total_loss.avg:.4f}, " + f"Average Acc: {pred_stat['avg_acc']}") return total_loss.avg, pred_stat['bal_acc1'], PREDS_ALL, PREDS_ALL_TTA else: return PREDS_ALL, PREDS_ALL_TTA