def inference(epoch, vocab, frames, lang):
    model = stochastic_cslr.load_model(1, epoch=epoch, lang=lang)
    #model = stochastic_cslr.load_model()
    model.to("cpu")
    model.eval()
    lpis = model([torch.tensor(frames).to("cpu")])
    prob = []

    prob += [lpi.exp().detach().cpu().numpy() for lpi in lpis]
    hyp = model.decode(prob=prob, beam_width=10, prune=0.01, nj=4)
    print([" ".join([vocab[i] for i in hi]) for hi in hyp])
    hyp = [sup(h) for h in hyp]
    hyp = [" ".join([vocab[i] for i in hi]) for hi in hyp]

    return hyp
Beispiel #2
0
    p_drop=0.5,
    random_drop=False,
    random_crop=False,
    crop_size=[224, 224],
    base_size=[256, 256],
)

data_loader = DataLoader(
    dataset,
    batch_size=args.batch_size,
    shuffle=False,  # result should be strictly ordered for evaluation.
    num_workers=args.nj,
    collate_fn=dataset.collate_fn,
)

model = stochastic_cslr.load_model(args.model == "sfl", epoch=args.epoch)
model.to(args.device)
model.eval()

prob = []
ref = []
j = 0

with torch.no_grad():
    for batch in tqdm(data_loader):
        ref += batch["label"]
        video = list(map(lambda v: v.to(args.device), batch["video"]))
        prob += [lpi.exp().cpu().numpy() for lpi in model(video)]

hyp = model.decode(prob=prob,
                   beam_width=args.beam_width,
    p_drop=0.5,
    random_drop=False,
    random_crop=False,
    crop_size=[224, 224],
    base_size=[256, 256],
)

data_loader = DataLoader(
    dataset,
    batch_size=args.batch_size,
    shuffle=False,  # result should be strictly ordered for evaluation.
    num_workers=args.nj,
    collate_fn=dataset.collate_fn,
)

model = stochastic_cslr.load_model(args.model == "sfl")
model.to(args.device)
model.eval()

result_dir = Path("results", args.model, args.split)
prob_path = result_dir / "prob.npz"

if args.use_lm:
    lm = dataset.corpus.create_lm()
else:
    lm = None

if prob_path.exists():
    prob = np.load(prob_path, allow_pickle=True)["prob"]
else:
    prob = []
video_path = "video.avi"
#frames = get_frames(video_path="video.avi")
frames = get_frames()
indices = sample_indices(n=len(frames), p_drop=p_drop, random_drop=random_drop)
frames = [frames[i] for i in indices]
frames = map(Image.open, frames)
frames = map(transform, frames)
frames = np.stack(list(frames))

#indices = sample_indices(n=len(frames), p_drop=p_drop, random_drop=random_drop)
#frames = [Image.fromarray(frames[i].asnumpy(), 'RGB') for i in indices]
#frames = map(transform, frames)
#frames = np.stack(list(frames))
model = stochastic_cslr.load_model(args.model == "sfl",
                                   epoch=args.epoch,
                                   lang="German")
model.to(args.device)
model.eval()

lpis = model([torch.tensor(frames).to(args.device)])
prob = []

prob += [lpi.exp().detach().cpu().numpy() for lpi in lpis]
hyp = model.decode(prob=prob,
                   beam_width=args.beam_width,
                   prune=args.prune,
                   nj=args.nj)


def sup(preds):