def detect(): # dataset = ImagenetVidVRD('./vidvrd-dataset', './vidvrd-dataset/videos', ['train', 'test']) # dataset = VidOR('./vidor-dataset/annotation', './vidor-dataset/video', ['training', 'validation']) with open(os.path.join(get_model_path(), 'baseline_setting.json'), 'r') as fin: param = json.load(fin) short_term_relations = model.predict(dataset, param) # group short term relations by video video_st_relations = defaultdict(list) for index, st_rel in short_term_relations.items(): vid = index[0] video_st_relations[vid].append((index, st_rel)) # video-level visual relation detection by relational association print('greedy relational association ...') video_relations = dict() for vid in tqdm(video_st_relations.keys()): video_relations[vid] = association.greedy_relational_association( dataset, video_st_relations[vid], max_traj_num_in_clip=100) # save detection result with open( os.path.join(get_model_path(), 'baseline_relation_prediction.json'), 'w') as fout: output = {'version': 'VERSION 1.0', 'results': video_relations} json.dump(output, fout)
def detect(): dataset = Dataset() with open(os.path.join(get_model_path(), 'baseline_setting.json'), 'r') as fin: param = json.load(fin) short_term_relations = model.predict(dataset, param) # group short term relations by video video_st_relations = defaultdict(list) for index, st_rel in short_term_relations.items(): vid = index[0] video_st_relations[vid].append((index, st_rel)) # video-level vid_features relation detection by relational association print('greedy relational association ...') video_relations = dict() for vid in tqdm(video_st_relations.keys()): video_relations[vid] = association.greedy_relational_association( dataset, video_st_relations[vid], max_traj_num_in_clip=100) # save detection result with open(os.path.join(get_model_path(), 'baseline_video_relations.json'), 'w') as fout: json.dump(video_relations, fout)