out = out[0].data.cpu().numpy()
            for o, caps in zip(out, this_captions):
                predictions.append(np.expand_dims(o, axis=0))
                gt_captions.append(caps)

        pbar.update()

predictions = np.concatenate(predictions, axis=0)
gen = {}
gts = {}
scores_nw = []

print("Computing sequence contrallabity results.")
for i, cap in enumerate(predictions):
    pred_cap = text_field.decode(cap, join_words=False)
    pred_cap = ' '.join([k for k, g in itertools.groupby(pred_cap)])

    score_nw = 0.
    for c in gt_captions[i]:
        score = nw_aligner.score(c, pred_cap)
        score_nw += score
    scores_nw.append(score_nw / len(gt_captions[i]))

    gts[i] = gt_captions[i]
    gen[i] = [pred_cap]

gts_t = PTBTokenizer.tokenize(gts)
gen_t = PTBTokenizer.tokenize(gen)

val_bleu, _ = Bleu(n=4).compute_score(gts_t, gen_t)
Exemplo n.º 2
0
                running_loss_gate += loss_gate.item()
                pbar.set_postfix(loss_cap=running_loss / (it+1), loss_gate=running_loss_gate / (it+1))
                pbar.update()

        scheduler.step()
    else:
        # Baseline captions
        model.eval()
        baselines = []
        with tqdm(desc='Epoch %d - baseline' % e, unit='it', total=len(iter(dataloader_train))) as pbar:
            with torch.no_grad():
                for it, (detections, ctrl_det_seqs, ctrl_det_gts, ctrl_det_seqs_test, _, caps_gt) in enumerate(iter(dataloader_train)):
                    detections, ctrl_det_seqs_test = detections.to(device), ctrl_det_seqs_test.to(device)
                    outs_baseline, _ = model.test(detections, ctrl_det_seqs_test)

                    caps_baseline = text_field.decode(outs_baseline.cpu().numpy(), join_words=False)

                    bas = []
                    for i, bas_i in enumerate(caps_baseline):
                        bas_i = ' '.join([k for k, g in itertools.groupby(bas_i)])
                        bas.append(bas_i)
                    baselines.append(bas)
                    pbar.update()

        # Training with self-critical
        model.train()
        running_loss = .0
        running_loss_gate = .0
        running_loss_nw = .0
        running_reward = .0
        running_reward_nw = .0