示例#1
0
def test(split, model, criterion, loader, epoch, args):
    model.eval()
    loss = 0.
    _ids = []
    predictions = []

    with torch.no_grad():
        bar = progressbar.ProgressBar(max_value=len(loader))
        for idx, data_batch in enumerate(loader):
            _ids.extend(data_batch['_id'])
            subj_batch_var = data_batch['subject']['embedding'].cuda()
            obj_batch_var = data_batch['object']['embedding'].cuda()
            predicate = data_batch['predicate'].cuda()
            if args.model == 'drnet':
                img = data_batch['bbox_img'].cuda()
                mask_batch_var = data_batch['bbox_mask'].cuda()
                output = model(subj_batch_var, obj_batch_var, img,
                               mask_batch_var, predicate)
            elif args.model == 'vtranse':
                img = data_batch['full_img'].cuda()
                ts_batch_var = data_batch['subject']['t'].cuda()
                to_batch_var = data_batch['object']['t'].cuda()
                bboxs_batch_var = data_batch['subject']['bbox'].cuda()
                bboxo_batch_var = data_batch['object']['bbox'].cuda()
                output = model(subj_batch_var, obj_batch_var, img,
                               ts_batch_var, to_batch_var, bboxs_batch_var,
                               bboxo_batch_var, predicate)
            elif args.model == 'vipcnn' or args.model == 'pprfcn':
                img = data_batch['full_img'].cuda()
                bbox_s = data_batch['subject']['bbox'].cuda()
                bbox_o = data_batch['object']['bbox'].cuda()
                output = model(img, bbox_s, bbox_o, predicate)

            predictions.append(output)

            if 'label' in data_batch:
                label_batch_var = torch.squeeze(data_batch['label']).cuda()
                loss_batch_var = criterion(output, label_batch_var)
                loss_batch = loss_batch_var.item()
                loss += (len(data_batch['label']) * loss_batch)

            bar.update(idx)

        if epoch is None:
            epoch = 'test'
        predictions = [v.item() for v in torch.cat(predictions)]
        if split == 'valid':
            pred_file = os.path.join(args.log_dir,
                                     'predictions/pred_%02d.pickle' % epoch)
        else:
            pred_file = os.path.join(args.log_dir,
                                     'predictions/pred_test.pickle')
        pickle.dump((_ids, predictions), open(pred_file, 'wb'))
        accs = accuracies(pred_file, args.datapath, split, split == 'test',
                          args)
        return loss, accs
示例#2
0
def test(split, model, criterion, loader, epoch, args):
    'validate/test the model'
    model.eval()
    loss = 0.
    acc = 0.
    _ids = []
    predictions = []

    with torch.no_grad():
        bar = progressbar.ProgressBar(max_value=len(loader))
        for idx, data_batch in enumerate(loader):
            _ids.extend(data_batch['_id'])
            subj_batch_var = data_batch['subject']['bbox']
            obj_batch_var = data_batch['object']['bbox']
            predi_batch_var = data_batch['predicate']
            if torch.cuda.is_available():
                subj_batch_var = subj_batch_var.cuda()
                obj_batch_var = obj_batch_var.cuda()
                predi_batch_var = predi_batch_var.cuda()

            output_var = model(subj_batch_var, obj_batch_var, predi_batch_var)
            predictions.append(output_var.detach())

            if 'label' in data_batch:
                label_batch_var = torch.squeeze(data_batch['label']).view(
                    -1, 1)
                if torch.cuda.is_available():
                    label_batch_var = label_batch_var.cuda()
                loss_batch_var = criterion(output_var, label_batch_var)
                loss_batch = loss_batch_var.item()
                loss += (len(data_batch['label']) * loss_batch)

            bar.update(idx)

    if loss != 0:
        loss /= len(_ids)
    predictions = [v.item() for v in torch.cat(predictions)]
    if split == 'valid':
        pred_file = os.path.join(args.log_dir,
                                 'predictions/pred_%02d.pickle' % epoch)
    else:
        pred_file = os.path.join(args.log_dir, 'predictions/pred_test.pickle')
    pickle.dump((_ids, predictions), open(pred_file, 'wb'))
    accs = accuracies(pred_file, args.datapath, split, False, args)
    return loss, accs
示例#3
0
def test(split, model, criterion, loader, epoch, args):
    "validate/test the model"
    model.eval()
    loss = 0.0
    acc = 0.0
    _ids = []
    predictions = []

    with torch.no_grad():
        bar = progressbar.ProgressBar(max_value=len(loader))
        for idx, data_batch in enumerate(loader):
            _ids.extend(data_batch["_id"])
            subj_batch_var = data_batch["subject"]["bbox"]
            obj_batch_var = data_batch["object"]["bbox"]
            predi_batch_var = data_batch["predicate"]
            if torch.cuda.is_available():
                subj_batch_var = subj_batch_var.cuda()
                obj_batch_var = obj_batch_var.cuda()
                predi_batch_var = predi_batch_var.cuda()

            output_var = model(subj_batch_var, obj_batch_var, predi_batch_var)
            predictions.append(output_var.detach())

            if "label" in data_batch:
                label_batch_var = torch.squeeze(data_batch["label"]).view(-1, 1)
                if torch.cuda.is_available():
                    label_batch_var = label_batch_var.cuda()
                loss_batch_var = criterion(output_var, label_batch_var)
                loss_batch = loss_batch_var.item()
                loss += len(data_batch["label"]) * loss_batch

            bar.update(idx)

    if loss != 0:
        loss /= len(_ids)
    predictions = [v.item() for v in torch.cat(predictions)]
    if split == "valid":
        pred_file = os.path.join(args.log_dir, "predictions/pred_%02d.pickle" % epoch)
    else:
        pred_file = os.path.join(args.log_dir, "predictions/pred_test.pickle")
    pickle.dump((_ids, predictions), open(pred_file, "wb"))
    accs = accuracies(pred_file, args.datapath, split, False, args)
    return loss, accs
示例#4
0
def test(split, model, criterion, loader, epoch, args):
    model.eval()
    loss = 0.0
    _ids = []
    predictions = []

    with torch.no_grad():
        bar = progressbar.ProgressBar(max_value=len(loader))
        for idx, data_batch in enumerate(loader):
            _ids.extend(data_batch["_id"])
            subj_batch_var = data_batch["subject"]["embedding"].cuda()
            obj_batch_var = data_batch["object"]["embedding"].cuda()
            predicate = data_batch["predicate"].cuda()
            if args.model == "drnet":
                img = data_batch["bbox_img"].cuda()
                mask_batch_var = data_batch["bbox_mask"].cuda()
                output = model(subj_batch_var, obj_batch_var, img,
                               mask_batch_var, predicate)
            elif args.model == "vtranse":
                img = data_batch["full_img"].cuda()
                ts_batch_var = data_batch["subject"]["t"].cuda()
                to_batch_var = data_batch["object"]["t"].cuda()
                bboxs_batch_var = data_batch["subject"]["bbox"].cuda()
                bboxo_batch_var = data_batch["object"]["bbox"].cuda()
                output = model(
                    subj_batch_var,
                    obj_batch_var,
                    img,
                    ts_batch_var,
                    to_batch_var,
                    bboxs_batch_var,
                    bboxo_batch_var,
                    predicate,
                )
            elif args.model == "vipcnn" or args.model == "pprfcn":
                img = data_batch["full_img"].cuda()
                bbox_s = data_batch["subject"]["bbox"].cuda()
                bbox_o = data_batch["object"]["bbox"].cuda()
                output = model(img, bbox_s, bbox_o, predicate)

            predictions.append(output)

            if "label" in data_batch:
                label_batch_var = torch.squeeze(data_batch["label"]).cuda()
                loss_batch_var = criterion(output, label_batch_var)
                loss_batch = loss_batch_var.item()
                loss += len(data_batch["label"]) * loss_batch

            bar.update(idx)

        if epoch is None:
            epoch = "test"
        predictions = [v.item() for v in torch.cat(predictions)]
        if split == "valid":
            pred_file = os.path.join(args.log_dir,
                                     "predictions/pred_%02d.pickle" % epoch)
        else:
            pred_file = os.path.join(args.log_dir,
                                     "predictions/pred_test.pickle")
        pickle.dump((_ids, predictions), open(pred_file, "wb"))
        accs = accuracies(pred_file, args.datapath, split, split == "test",
                          args)
        return loss, accs