def setup(self, bottom, top): if self.phase == 0: # train phase import train_config config = train_config.Config() else: # val or test phase import test_config config = test_config.Config() self.N = config.N self.context_dim = config.context_dim self.spatial_dim = config.spatial_dim self.HW = config.spatial_pool_map * config.spatial_pool_map self.T = config.T self.key_word_thresh = config.key_word_thresh self.hard_word_att_idx = [] # query_aware_context features for every image location top[0].reshape(self.N, self.context_dim + self.spatial_dim, self.HW)
def setup(self, bottom, top): if self.phase == 0: # train phase import train_config config = train_config.Config() else: # val or test phase import test_config config = test_config.Config() self.N = config.N self.context_dim = config.context_dim self.spatial_dim = config.spatial_dim self.HW = config.spatial_pool_map * config.spatial_pool_map self.T = config.T self.key_word_thresh = config.key_word_thresh self.hard_word_att_idx = [] # query-aware appear pool for every word top[0].reshape(self.N, self.context_dim, self.T) # query-aware spatial position pool for every word top[1].reshape(self.N, self.spatial_dim, self.T)
scores_net.forward() scores_val = scores_net.blobs['scores'].data.copy() scores_val = scores_val[:num_proposal, ...].reshape(-1) # Sort the scores for the proposals if config.use_nms: top_ids = eval_tools.nms(proposal.astype(np.float32), scores_val, config.nms_thresh) else: top_ids = np.argsort(scores_val)[::-1] # Evaluate on bounding boxes for n_eval_num in range(len(eval_bbox_num_list)): eval_bbox_num = eval_bbox_num_list[n_eval_num] bbox_correct[n_eval_num] += \ np.any(proposal_IoUs[top_ids[:eval_bbox_num]] >= config.correct_iou_thresh) bbox_total += 1 print('Final results on the whole test set') result_str = '' for n_eval_num in range(len(eval_bbox_num_list)): result_str += 'recall@%s = %f\n' % \ (str(eval_bbox_num_list[n_eval_num]), bbox_correct[n_eval_num]/bbox_total) print(result_str) if __name__ == '__main__': config = test_config.Config() inference(config)
from fastNLP.core.losses import LossBase from fastNLP.core.metrics import MetricBase from fastNLP.core.optimizer import Optimizer from fastNLP.core.batch import Batch from fastNLP.core.sampler import RandomSampler from fastNLP import Trainer from fastNLP import Tester from copy import deepcopy from fastNLP import CrossEntropyLoss from fastNLP import AccuracyMetric from fastNLP.core import Adam from fastNLP.core import SGD from fastNLP.core.callback import EarlyStopCallback opt = test_config.Config() def test(): model_path = opt.model_path test_data = pickle.load(open(opt.test_data_path, 'rb')) vocab = pickle.load(open(opt.vocab, 'rb')) word2idx = vocab.word2idx idx2word = vocab.idx2word input_size = len(word2idx) vocab_size = opt.class_num class_num = opt.class_num embedding_dim = opt.embedding_dim