def eval_dataset_cls(cfg_path, device=None): """分类问题的eval dataset: 等效于runner中的load_from + val,但可用来脱离runner进行独立的数据集验证 """ # 准备验证所用的对象 cfg = get_config(cfg_path) dataset = get_dataset(cfg.valset, cfg.transform_val) dataloader = get_dataloader(dataset, cfg.valloader) model = get_model(cfg) if device is None: device = torch.device(cfg.load_device) # TODO: 如下两句的顺序 load_checkpoint(model, cfg.load_from, device) model = model.to(device) # 开始验证 buffer = {'acc': []} n_correct = 0 model.eval() for c_iter, data_batch in enumerate(dataloader): with torch.no_grad(): # 停止反向传播,只进行前向计算 img = to_device(data_batch['img'], device) label = to_device(data_batch['gt_labels'], device) y_pred = model(img) label = torch.cat(label, dim=0) acc1 = accuracy(y_pred, label, topk=1) buffer['acc'].append(acc1) # 计算总体精度 n_correct += buffer['acc'][-1] * len(data_batch['gt_labels']) vis_loss_acc(buffer, title='eval dataset') print('ACC on dataset: %.3f', n_correct / len(dataset))
def batch_detector(model, data, device, return_loss=True, **kwargs): # kwargs用来兼容分类时传入的loss_fn # 数据送入设备:注意数据格式问题改为在to_tensor中完成,数据设备问题改为在to_device完成 imgs = to_device(data['img'], device) gt_bboxes = to_device(data['gt_bboxes'], device) gt_labels = to_device(data['gt_labels'], device) if data.get('gt_landmarks', None) is not None: gt_landmarks = to_device(data['gt_landmarks'], device) else: gt_landmarks = None img_metas = data['img_meta'] # 计算模型输出 if not return_loss: bbox_det = model(imgs, img_metas, return_loss=False) return bbox_det # (n_class,)(k,5) if return_loss: losses = model(imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, gt_landmarks=gt_landmarks, return_loss=True) # # 损失缩减:先分别对每种loss进行batch内的求和,并对不同种loss进行求和。 loss_sum = {} for name, value in zip(losses.keys(), losses.values()): loss_sum[name] = sum(data for data in value) loss = sum(data for data in loss_sum.values()) outputs = dict(loss=loss) outputs = {**outputs, **loss_sum} return outputs
def batch_segmentator(model, data, device, return_loss=True, **kwargs): imgs = to_device(data['img'], device) segs = to_device(data['seg'], device) if not return_loss: outs = model(imgs, return_loss=False, segs=segs) return outs if return_loss: loss = model(imgs, return_loss=True, segs=segs) return loss
def batch_classifier(model, data, device, return_loss=True, **kwargs): # 数据送入设备 img = to_device(data['img'], device) label = to_device(data['gt_labels'], device) if return_loss: outs = model(imgs=img, return_loss=True, labels=label) # pytorch交叉熵包含了前端的softmax/one_hot以及后端的mean return outs # 包括loss, acc else: outs = model(imgs=img, return_loss=False) return outs # tbd
def test_fusion(args, model_config, test_config): # define output dir test_dir = os.path.join(args['experiment'], 'tests', test_config.TEST.name) print(test_dir) output_dir = os.path.join(test_dir, 'output') if not os.path.exists(test_dir): os.makedirs(test_dir) os.makedirs(output_dir) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print("##############", device) model_config.MODEL.device = device # get test dataset data_config = setup.get_data_config(test_config, mode='test') #print("data config, ", data_config.DATA.input) dataset = setup.get_data(test_config.DATA.dataset, data_config) loader = torch.utils.data.DataLoader(dataset) # get test database database = setup.get_database(dataset, test_config, mode='test') print(database[0]['current'].size()) #print("!!!!!!!!!!!", model_config) # setup pipeline pipeline = Pipeline(model_config) print("input depth format: ", model_config.DATA.input) pipeline = pipeline.to(device) # loading neural networks model_path = os.path.join(args['experiment'], 'model/best.pth.tar') print("!!!!!!!!!!!!!!!!", model_path) loading.load_pipeline(model_path, pipeline) #routing_checkpoint = os.path.join('/home/yan/Work/opensrc/RoutedFusion/pretrained_models/routing/shapenet_noise_005/ori_best.pth.tar') routing_checkpoint = os.path.join( '/home/yan/Work/opensrc/learning/RoutedFusion/experiments/routing/finetuned_living2/model/best.pth.tar' ) loading.load_model(routing_checkpoint, pipeline._routing_network) pipeline.eval() for i, batch in tqdm(enumerate(loader), total=len(dataset)): # put all data on GPU #print("####", i) #print(batch.keys()) batch = transform.to_device(batch, device) # fusion pipeline pipeline.fuse(batch, database, device) database.filter(value=3.) #test_eval = database.evaluate() #print(test_eval) for scene_id in database.scenes_est.keys(): database.save(output_dir, scene_id)
def test_fusion(args, model_config, test_config): # define output dir test_dir = os.path.join(args['experiment'], 'tests', test_config.TEST.name) output_dir = os.path.join(test_dir, 'output') if not os.path.exists(test_dir): os.makedirs(test_dir) os.makedirs(output_dir) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model_config.MODEL.device = device # get test dataset data_config = setup.get_data_config(test_config, mode='test') dataset = setup.get_data(test_config.DATA.dataset, data_config) loader = torch.utils.data.DataLoader(dataset) # get test database database = setup.get_database(dataset, test_config, mode='test') # setup pipeline pipeline = Pipeline(model_config) pipeline = pipeline.to(device) # loading neural networks model_path = os.path.join(args['experiment'], 'model/best.pth.tar') loading.load_pipeline(model_path, pipeline) pipeline.eval() for i, batch in tqdm(enumerate(loader), total=len(dataset)): # put all data on GPU batch = transform.to_device(batch, device) # fusion pipeline pipeline.fuse(batch, database, device) database.filter(value=3.) test_eval = database.evaluate() print(test_eval) for scene_id in database.scenes_est.keys(): database.save(output_dir, scene_id)