コード例 #1
0
ファイル: eval_rels.py プロジェクト: zxydi1992/neural-motifs
from nmotif.lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from tqdm import tqdm
from nmotif.config import BOX_SCALE, IM_SCALE
import dill as pkl
import os

conf = ModelConfig()
if conf.model == 'motifnet':
    from nmotif.lib.rel_model import RelModel
elif conf.model == 'stanford':
    from nmotif.lib.rel_model_stanford import RelModelStanford as RelModel
else:
    raise ValueError()

train, val, test = VG.splits(num_val_im=conf.val_size,
                             filter_duplicate_rels=True,
                             use_proposals=conf.use_proposals,
                             filter_non_overlap=conf.mode == 'sgdet')
if conf.test:
    val = test
train_loader, val_loader = VGDataLoader.splits(train,
                                               val,
                                               mode='rel',
                                               batch_size=conf.batch_size,
                                               num_workers=conf.num_workers,
                                               num_gpus=conf.num_gpus)

detector = RelModel(
    classes=train.ind_to_classes,
    rel_classes=train.ind_to_predicates,
    num_gpus=conf.num_gpus,
    mode=conf.mode,
コード例 #2
0
cudnn.benchmark = True
conf = ModelConfig()

if conf.coco:
    train, val = CocoDetection.splits()
    val.ids = val.ids[:conf.val_size]
    train.ids = train.ids
    train_loader, val_loader = CocoDataLoader.splits(
        train,
        val,
        batch_size=conf.batch_size,
        num_workers=conf.num_workers,
        num_gpus=conf.num_gpus)
else:
    train, val, _ = VG.splits(num_val_im=conf.val_size,
                              filter_non_overlap=False,
                              filter_empty_rels=False,
                              use_proposals=conf.use_proposals)
    train_loader, val_loader = VGDataLoader.splits(
        train,
        val,
        batch_size=conf.batch_size,
        num_workers=conf.num_workers,
        num_gpus=conf.num_gpus)

detector = ObjectDetector(
    classes=train.ind_to_classes,
    num_gpus=conf.num_gpus,
    mode='rpntrain' if not conf.use_proposals else 'proposals',
    use_resnet=conf.use_resnet)
detector.cuda()
コード例 #3
0
    ixmax = np.minimum(gt_box[2], pred_boxes[:, 2])
    iymax = np.minimum(gt_box[3], pred_boxes[:, 3])
    iw = np.maximum(ixmax - ixmin + 1., 0.)
    ih = np.maximum(iymax - iymin + 1., 0.)
    inters = iw * ih

    # union
    uni = ((gt_box[2] - gt_box[0] + 1.) * (gt_box[3] - gt_box[1] + 1.) +
           (pred_boxes[:, 2] - pred_boxes[:, 0] + 1.) *
           (pred_boxes[:, 3] - pred_boxes[:, 1] + 1.) - inters)

    overlaps = inters / uni
    return overlaps


train, val, test = VG.splits()

result_dict_mine = {'sgdet_recall': {20: [], 50: [], 100: []}}
result_dict_theirs = {'sgdet_recall': {20: [], 50: [], 100: []}}

for img_i in trange(len(val)):
    gt_entry = {
        'gt_classes': val.gt_classes[img_i].copy(),
        'gt_relations': val.relationships[img_i].copy(),
        'gt_boxes': val.gt_boxes[img_i].copy(),
    }

    # Use shuffled GT boxes
    gt_indices = np.arange(
        gt_entry['gt_boxes'].shape[0]
    )  #np.random.choice(gt_entry['gt_boxes'].shape[0], 20)
コード例 #4
0
import numpy as np
import torch

from nmotif.config import ModelConfig
from nmotif.lib.pytorch_misc import optimistic_restore
from nmotif.lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from tqdm import tqdm
from nmotif.config import BOX_SCALE, IM_SCALE
from lib.fpn.box_utils import bbox_overlaps
from collections import defaultdict
from PIL import Image, ImageDraw, ImageFont
import os
from functools import reduce

conf = ModelConfig()
train, val, test = VG.splits(num_val_im=conf.val_size)
if conf.test:
    val = test

train_loader, val_loader = VGDataLoader.splits(train,
                                               val,
                                               mode='rel',
                                               batch_size=conf.batch_size,
                                               num_workers=conf.num_workers,
                                               num_gpus=conf.num_gpus)

detector = RelModel(
    classes=train.ind_to_classes,
    rel_classes=train.ind_to_predicates,
    num_gpus=conf.num_gpus,
    mode=conf.mode,
コード例 #5
0
from nmotif.lib.get_dataset_counts import get_counts, box_filter

from nmotif.config import ModelConfig, FG_FRACTION, RPN_FG_FRACTION, DATA_PATH, BOX_SCALE, IM_SCALE, PROPOSAL_FN
import torch.backends.cudnn as cudnn
from nmotif.lib.pytorch_misc import optimistic_restore, nonintersecting_2d_inds
from nmotif.lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from tqdm import tqdm
from copy import deepcopy
import dill as pkl

cudnn.benchmark = True
conf = ModelConfig()

MUST_OVERLAP=False
train, val, test = VG.splits(num_val_im=conf.val_size, filter_non_overlap=MUST_OVERLAP,
                             filter_duplicate_rels=True,
                             use_proposals=conf.use_proposals)
if conf.test:
    print("test data!")
    val = test
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
                                               batch_size=conf.batch_size,
                                               num_workers=conf.num_workers,
                                               num_gpus=conf.num_gpus)

fg_matrix, bg_matrix = get_counts(train_data=train, must_overlap=MUST_OVERLAP)

detector = ObjectDetector(classes=train.ind_to_classes, num_gpus=conf.num_gpus,
                          mode='rpntrain' if not conf.use_proposals else 'proposals', use_resnet=conf.use_resnet,
                          nms_filter_duplicates=True, thresh=0.01)
detector.eval()
コード例 #6
0
ファイル: motifs.py プロジェクト: zxydi1992/neural-motifs
"""
SCRIPT TO MAKE MEMES. this was from an old version of the code, so it might require some fixes to get working.

"""
from nmotif.dataloaders.visual_genome import VG
# import matplotlib
# # matplotlib.use('Agg')
from tqdm import tqdm
import seaborn as sns
import numpy as np
from nmotif.lib.fpn.box_intersections_cpu.bbox import bbox_overlaps
from collections import defaultdict
train, val, test = VG.splits(filter_non_overlap=False, num_val_im=2000)

count_threshold = 50
pmi_threshold = 10

o_type = []
f = open("object_types.txt")
for line in f.readlines():
    tabs = line.strip().split("\t")
    t = tabs[1].split("_")[0]
    o_type.append(t)

r_type = []
f = open("relation_types.txt")
for line in f.readlines():
    tabs = line.strip().split("\t")
    t = tabs[1].split("_")[0]
    r_type.append(t)