示例#1
0
def eval_split(model, data_loader, device=None):
    with torch.no_grad():
        model.eval()
        y_true5, y_true14, y_score5, y_score14 = [], [], [], []
        for _, inp, targ, _ in data_loader:
            inp, _ = data_cuda(inp, targ, device=device, non_blocking=False)
            out = model(inp)
            probs = softmax(out.permute(0, 2, 1)[:, :, 1:3], dim=-1)
            probs = probs.detach().cpu().numpy()
            targ = targ.numpy()
            for i in range(probs.shape[0]):
                for j in range(probs.shape[1]):
                    true_val = 1 if targ[i][j] == 1 else 0
                    score_val = probs[i][j][1]
                    y_true14.append(true_val)
                    y_score14.append(score_val)
                    if j == 2 or j == 5 or j == 6 or j == 8 or j == 10:
                        y_true5.append(true_val)
                        y_score5.append(score_val)
    y_true5 = np.array(y_true5)
    y_score5 = np.array(y_score5)
    y_true14 = np.array(y_true14)
    y_score14 = np.array(y_score14)
    rocauc5 = roc_auc_score(y_true5, y_score5, average='macro')
    rocauc14 = roc_auc_score(y_true14, y_score14, average='macro')
    model.train()
    return rocauc5, rocauc14
示例#2
0
 def _nll_step(self, encoded_data, targ, device, non_blocking, ids=None):
     v = encoded_data
     targ = self.truncate_targ(targ)  # cut the sentence
     y = data_cuda(targ, device=device, non_blocking=non_blocking)
     words = self.decode_teacher_forcing(
         y, v)  # y : target sentence; v : visual encode
     return self.loss_nll(y, words), []
示例#3
0
 def _nll_step(self, encoded_data, targ, device, non_blocking, ids=None):
     vl, vg = encoded_data
     yl = np.zeros((targ.shape[0], self.DISEASE_NUM), dtype='long')
     for i, iid in enumerate(ids):
         did = iid.split(self.DOC_IMAGE_SEPARATOR)[0]
         yl[i] = self.chexpert_labels[did]
     yl = torch.tensor(yl)
     y = data_cuda(targ, device=device, non_blocking=non_blocking)
     yl = data_cuda(yl, device=device, non_blocking=non_blocking)
     words, dis = self.decode_teacher_forcing(y, vl, vg)
     loss, loss_dis, loss_word = self.loss_nll(y, yl, words, dis)
     if loss_dis is None:
         return loss, [float(loss_word.detach().cpu()), 0.0]
     else:
         return loss, [
             float(loss_word.detach().cpu()),
             float(loss_dis.detach().cpu())
         ]
示例#4
0
 def _nll_step(self, encoded_data, targ, device, non_blocking, ids=None):
     vl, vg = encoded_data
     y = data_cuda(targ, device=device, non_blocking=non_blocking)
     stops, words = self.decode_teacher_forcing(y, vl, vg)
     loss, loss_sent, loss_word = self.loss_nll(y, stops, words)
     return loss, [
         float(loss_word.detach().cpu()),
         float(loss_sent.detach().cpu())
     ]
示例#5
0
    def train_step(self, inp, targ, optimizers, ids=None, schedulers=None, meta=None, clip_grad=None, device='gpu',
                   non_blocking=False, epoch=None):
        """
        Process a single training step
        :param inp: Input images
        :param targ: Output texts
        :param optimizers: Optimizers
        :param ids: Instance IDs
        :param schedulers: Schedulers for batch update
        :param meta: Input meta data
        :param clip_grad: A clip gradient parameter
        :param device: A CUDA device
        :param non_blocking: A non-blocking option for CUDA data
        :param epoch: The current number of epoch
        :return: Losses and rewards
        """
        # Define update steps (_nll_step for NLL and _rl_step for RL)
        if self.rl_opts.epoch is not None and epoch >= self.rl_opts.epoch:
            # NLL+RL optimization (Individual RL weights are processed in self_critical_reward)
            steps = [(self._nll_step, self.rl_opts.weights[0]), (self._rl_step, 1.0)]
            self.rl_train = True
        else:
            # NLL optimization
            steps = [(self._nll_step, 1.0)]
            self.rl_train = False

        total_loss, vals = None, []
        inp = data_cuda(inp, device=device, non_blocking=non_blocking)
        meta = self.meta_cuda(meta, device=device, non_blocking=non_blocking)
        # Initialize optimizers
        for name, optimizer in optimizers.items():
            if self.optimize(name, epoch, self.image_finetune_epoch):
                optimizer.zero_grad()
        # Encode data (shared among multiple decoding processes)
        encoded_data = self.encode(inp, meta)
        # Decode data (NLL and/or RL)
        for step, weight in steps:
            loss, loss_reward_vals = step(encoded_data, targ, device, non_blocking, ids=ids)
            vals.append(float(loss.detach()))
            vals += loss_reward_vals
            loss *= weight
            total_loss = loss if total_loss is None else total_loss + loss
        # Optimize
        if not torch.isnan(total_loss):
            total_loss.backward()
        if clip_grad is not None:
            clip_grad_norm_(self.parameters(), clip_grad)
        if schedulers is not None:
            for _, scheduler in schedulers.items():
                scheduler.batch_step()
        for name, optimizer in optimizers.items():
            if self.optimize(name, epoch, self.image_finetune_epoch):
                optimizer.step()
        # Return losses and rewards
        vals = [float(total_loss.detach())] + vals
        return vals
示例#6
0
文件: sat.py 项目: TCBpenta8/ifcc-1
 def _nll_step(self, encoded_data, targ, device, non_blocking, ids=None):
     vl, vg = encoded_data
     y = data_cuda(targ, device=device, non_blocking=non_blocking)
     words, alphas = self.decode_teacher_forcing(y, vl, vg)
     loss, loss_alpha, loss_word = self.loss_nll(y, words, alphas)
     if loss_alpha is None:
         return loss, [float(loss_word.detach().cpu()), 0.0]
     else:
         return loss, [
             float(loss_word.detach().cpu()),
             float(loss_alpha.detach().cpu())
         ]
示例#7
0
 def _nll_step(self, encoded_data, targ, device, non_blocking, ids=None):
     vl, vg = encoded_data
     y = data_cuda(targ, device=device, non_blocking=non_blocking)
     words = self.decode_teacher_forcing(y, vl, vg)
     return self.loss_nll(y, words), []
示例#8
0
文件: eval.py 项目: TCBpenta8/ifcc
    def generate_and_eval(self, data_loader, progress_name=None, batch=False):
        # Evaluate generate outputs
        self.model.eval()
        with torch.no_grad():
            if progress_name is not None:
                pbar = tqdm(total=len(data_loader.dataset.samples))
                pbar.set_description('{0}'.format(progress_name + '-gen'))
                eval_interval = int(len(data_loader.dataset.samples) / 10)
            else:
                pbar, eval_interval = None, None
            report_ids, reports, hypos, refs, tqdm_interval = [], [], {}, {}, 0
            for rids, inp, targ, vp in data_loader:
                inp = data_cuda(inp,
                                device=self.device,
                                non_blocking=data_loader.pin_memory)
                meta = (vp, )
                meta = self.model.meta_cuda(
                    meta,
                    device=self.device,
                    non_blocking=data_loader.pin_memory)
                rec_words, _ = self.recover_words if self.verbose else None, None
                encoded_data = self.model.encode(inp, meta)
                if self.nucleus_p is not None:
                    words = []
                    for _ in range(self.beam_size):
                        w, _ = self.model.sample(encoded_data, self.nucleus_p)
                        words.append(w.unsqueeze(dim=1))
                    stops = self.model.dummy_stops(words[0])
                else:
                    stops, words, _ = self.model.decode_beam(
                        encoded_data,
                        self.beam_size,
                        recover_words=rec_words,
                        diversity_rate=self.beam_diversity)
                # Output all beams if diversity rate is set
                idxs = list(
                    range(self.beam_size)
                ) if self.beam_diversity > 0.0 or self.nucleus_p is not None else [
                    0
                ]
                for idx in idxs:
                    widxs = words[:, :,
                                  idx] if self.nucleus_p is None else words[idx]
                    reps, _ = self.recover_words(stops, widxs)
                    for rid, reference, candidate in zip(rids, targ, reps):
                        # Recovered Samples
                        if self.beam_diversity > 0.0 or self.nucleus_p is not None:
                            rid += '__{0}'.format(idx)
                        report_ids.append(rid)
                        reports.append(
                            candidate.replace('\n',
                                              ' ' + self.LINEBREAK + ' '))
                        hypos[rid] = [candidate.replace('\n', ' ')]
                        if data_loader.dataset.multi_instance:
                            reference = reference.split(
                                ToTokenizedTexts.INSTANCE_BREAK)
                        else:
                            reference = [reference]
                        refs[rid] = []
                        for ref in reference:
                            refs[rid].append(ref.replace('\n', ' '))
                tqdm_interval += inp.shape[0]
                if pbar is not None and tqdm_interval >= eval_interval:
                    pbar.update(tqdm_interval)
                    tqdm_interval = 0

            if pbar is not None:
                if tqdm_interval > 0:
                    pbar.update(tqdm_interval)
                pbar.close()
        self.model.train()
        # Calculate IDFs for NLI-TFIDF
        if self.nli is not None and SimpleNLI.COMPARE_TFIDF in self.nli_compare:
            tfidf_vectorizer = self.compute_tfidf_vectorizer(data_loader)
        else:
            tfidf_vectorizer = None
        # Evaluate with metrics
        if batch:
            scores, scores_detailed = self.eval_batch(
                report_ids,
                refs,
                hypos,
                tfidf_vectorizer,
                batch_size=self.EVAL_SIZE,
                progress_name=progress_name)
        else:
            scores, scores_detailed = self.eval(report_ids, refs, hypos,
                                                tfidf_vectorizer)
        return {
            self.EVAL_ID: report_ids,
            self.EVAL_SCORE: scores,
            self.EVAL_SCORE_DETAILED: scores_detailed,
            self.EVAL_REPORT: reports
        }
示例#9
0
 def meta_cuda(self, meta, device='gpu', non_blocking=False):
     vp = meta[0]
     if self.view_postion:
         vp = data_cuda(vp, device=device, non_blocking=non_blocking)
     return (vp, )
示例#10
0
 def forward(self, sent1s, sent2s):
     buffer, boundaries, max_len = [], [], 0
     for sent1, sent2 in zip(sent1s, sent2s):
         if self.force_lowercase:
             sent1 = sent1.lower()
             sent2 = sent2.lower()
         toks1 = self.tokenizer.tokenize(sent1)
         toks2 = self.tokenizer.tokenize(sent2)
         tokens = ['[CLS]'] + toks1 + ['[SEP]'] + toks2 + ['[SEP]']
         buffer.append(tokens)
         boundaries.append(len(toks1) + 2)
         if len(tokens) > max_len:
             max_len = len(tokens)
     if max_len > self.length:
         max_len = self.length
     token_ids, attn_mask = [], []
     seg_ids = [] if self.bert_type != 'distilbert' else None
     for idx, tokens in enumerate(buffer):
         if len(tokens) < max_len:
             for _ in range(max_len - len(tokens)):
                 tokens.append('[PAD]')
         elif len(tokens) > max_len:
             if self.verbose:
                 print('Truncating pair from {0}->{1}'.format(
                     len(tokens), max_len))
             tokens = tokens[:max_len]
         attn_mask.append(
             torch.tensor(
                 [1 if token != '[PAD]' else 0 for token in tokens]))
         token_ids.append(
             torch.tensor(self.tokenizer.convert_tokens_to_ids(tokens)))
         if seg_ids is not None:
             seg_ids.append(
                 torch.tensor([
                     0 if i < boundaries[idx] else 1
                     for i in range(len(tokens))
                 ]))
     token_ids = torch.stack(token_ids, dim=0)
     attn_mask = torch.stack(attn_mask, dim=0)
     if seg_ids is not None:
         seg_ids = torch.stack(seg_ids, dim=0)
         token_ids, attn_mask, seg_ids = data_cuda(token_ids,
                                                   attn_mask,
                                                   seg_ids,
                                                   device=self.device)
     else:
         token_ids, attn_mask = data_cuda(token_ids,
                                          attn_mask,
                                          device=self.device)
     if self.bert_type == 'distilbert':
         reps = self.bert(token_ids, attention_mask=attn_mask)
         reps = reps[0][:, 0]
         reps = self.dropout(reps)
         return self.linear(reps)
     else:
         reps, cls = self.bert(token_ids,
                               attention_mask=attn_mask,
                               token_type_ids=seg_ids)
         if self.cls == 'token':
             reps = reps[:, 0]
             reps = self.dropout(reps)
             return self.linear(reps)
         else:
             cls = self.dropout(cls)
             return self.linear(cls)