def get_counts(train_data=VG(mode='train', filter_duplicate_rels=False, num_val_im=5000), must_overlap=True): """ Get counts of all of the relations. Used for modeling directly P(rel | o1, o2) :param train_data: :param must_overlap: :return: """ fg_matrix = np.zeros(( train_data.num_classes, train_data.num_classes, train_data.num_predicates, ), dtype=np.int64) bg_matrix = np.zeros(( train_data.num_classes, train_data.num_classes, ), dtype=np.int64) for ex_ind in range(len(train_data)): gt_classes = train_data.gt_classes[ex_ind].copy() gt_relations = train_data.relationships[ex_ind].copy() gt_boxes = train_data.gt_boxes[ex_ind].copy() # For the foreground, we'll just look at everything o1o2 = gt_classes[gt_relations[:, :2]] for (o1, o2), gtr in zip(o1o2, gt_relations[:,2]): fg_matrix[o1, o2, gtr] += 1 # For the background, get all of the things that overlap. o1o2_total = gt_classes[np.array( box_filter(gt_boxes, must_overlap=must_overlap), dtype=int)] for (o1, o2) in o1o2_total: bg_matrix[o1, o2] += 1 return fg_matrix, bg_matrix
from nmotif.lib.evaluation.sg_eval import BasicSceneGraphEvaluator from tqdm import tqdm from nmotif.config import BOX_SCALE, IM_SCALE import dill as pkl import os conf = ModelConfig() if conf.model == 'motifnet': from nmotif.lib.rel_model import RelModel elif conf.model == 'stanford': from nmotif.lib.rel_model_stanford import RelModelStanford as RelModel else: raise ValueError() train, val, test = VG.splits(num_val_im=conf.val_size, filter_duplicate_rels=True, use_proposals=conf.use_proposals, filter_non_overlap=conf.mode == 'sgdet') if conf.test: val = test train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel', batch_size=conf.batch_size, num_workers=conf.num_workers, num_gpus=conf.num_gpus) detector = RelModel( classes=train.ind_to_classes, rel_classes=train.ind_to_predicates, num_gpus=conf.num_gpus, mode=conf.mode,
cudnn.benchmark = True conf = ModelConfig() if conf.coco: train, val = CocoDetection.splits() val.ids = val.ids[:conf.val_size] train.ids = train.ids train_loader, val_loader = CocoDataLoader.splits( train, val, batch_size=conf.batch_size, num_workers=conf.num_workers, num_gpus=conf.num_gpus) else: train, val, _ = VG.splits(num_val_im=conf.val_size, filter_non_overlap=False, filter_empty_rels=False, use_proposals=conf.use_proposals) train_loader, val_loader = VGDataLoader.splits( train, val, batch_size=conf.batch_size, num_workers=conf.num_workers, num_gpus=conf.num_gpus) detector = ObjectDetector( classes=train.ind_to_classes, num_gpus=conf.num_gpus, mode='rpntrain' if not conf.use_proposals else 'proposals', use_resnet=conf.use_resnet) detector.cuda()
ixmax = np.minimum(gt_box[2], pred_boxes[:, 2]) iymax = np.minimum(gt_box[3], pred_boxes[:, 3]) iw = np.maximum(ixmax - ixmin + 1., 0.) ih = np.maximum(iymax - iymin + 1., 0.) inters = iw * ih # union uni = ((gt_box[2] - gt_box[0] + 1.) * (gt_box[3] - gt_box[1] + 1.) + (pred_boxes[:, 2] - pred_boxes[:, 0] + 1.) * (pred_boxes[:, 3] - pred_boxes[:, 1] + 1.) - inters) overlaps = inters / uni return overlaps train, val, test = VG.splits() result_dict_mine = {'sgdet_recall': {20: [], 50: [], 100: []}} result_dict_theirs = {'sgdet_recall': {20: [], 50: [], 100: []}} for img_i in trange(len(val)): gt_entry = { 'gt_classes': val.gt_classes[img_i].copy(), 'gt_relations': val.relationships[img_i].copy(), 'gt_boxes': val.gt_boxes[img_i].copy(), } # Use shuffled GT boxes gt_indices = np.arange( gt_entry['gt_boxes'].shape[0] ) #np.random.choice(gt_entry['gt_boxes'].shape[0], 20)
import numpy as np import torch from nmotif.config import ModelConfig from nmotif.lib.pytorch_misc import optimistic_restore from nmotif.lib.evaluation.sg_eval import BasicSceneGraphEvaluator from tqdm import tqdm from nmotif.config import BOX_SCALE, IM_SCALE from lib.fpn.box_utils import bbox_overlaps from collections import defaultdict from PIL import Image, ImageDraw, ImageFont import os from functools import reduce conf = ModelConfig() train, val, test = VG.splits(num_val_im=conf.val_size) if conf.test: val = test train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel', batch_size=conf.batch_size, num_workers=conf.num_workers, num_gpus=conf.num_gpus) detector = RelModel( classes=train.ind_to_classes, rel_classes=train.ind_to_predicates, num_gpus=conf.num_gpus, mode=conf.mode,
from nmotif.lib.get_dataset_counts import get_counts, box_filter from nmotif.config import ModelConfig, FG_FRACTION, RPN_FG_FRACTION, DATA_PATH, BOX_SCALE, IM_SCALE, PROPOSAL_FN import torch.backends.cudnn as cudnn from nmotif.lib.pytorch_misc import optimistic_restore, nonintersecting_2d_inds from nmotif.lib.evaluation.sg_eval import BasicSceneGraphEvaluator from tqdm import tqdm from copy import deepcopy import dill as pkl cudnn.benchmark = True conf = ModelConfig() MUST_OVERLAP=False train, val, test = VG.splits(num_val_im=conf.val_size, filter_non_overlap=MUST_OVERLAP, filter_duplicate_rels=True, use_proposals=conf.use_proposals) if conf.test: print("test data!") val = test train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel', batch_size=conf.batch_size, num_workers=conf.num_workers, num_gpus=conf.num_gpus) fg_matrix, bg_matrix = get_counts(train_data=train, must_overlap=MUST_OVERLAP) detector = ObjectDetector(classes=train.ind_to_classes, num_gpus=conf.num_gpus, mode='rpntrain' if not conf.use_proposals else 'proposals', use_resnet=conf.use_resnet, nms_filter_duplicates=True, thresh=0.01) detector.eval()
""" SCRIPT TO MAKE MEMES. this was from an old version of the code, so it might require some fixes to get working. """ from nmotif.dataloaders.visual_genome import VG # import matplotlib # # matplotlib.use('Agg') from tqdm import tqdm import seaborn as sns import numpy as np from nmotif.lib.fpn.box_intersections_cpu.bbox import bbox_overlaps from collections import defaultdict train, val, test = VG.splits(filter_non_overlap=False, num_val_im=2000) count_threshold = 50 pmi_threshold = 10 o_type = [] f = open("object_types.txt") for line in f.readlines(): tabs = line.strip().split("\t") t = tabs[1].split("_")[0] o_type.append(t) r_type = [] f = open("relation_types.txt") for line in f.readlines(): tabs = line.strip().split("\t") t = tabs[1].split("_")[0] r_type.append(t)