# -*- coding: utf-8 -*- """ Created on Sat Jun 15 17:32:38 2019 @author: win10 """ import torch from densefuse_net import DenseFuseNet from utils import test device = 'cuda' model = DenseFuseNet().to(device) model.load_state_dict(torch.load('./train_result/model_weight.pkl')['weight']) test_path = './images/IV_images/' test(test_path, model, mode='add')
from densefuse_net import DenseFuseNet import torch from PIL import Image import torchvision.transforms as transforms import time import os from channel_fusion import channel_f as channel_fusion from utils import mkdir, Strategy _tensor = transforms.ToTensor() _pil_gray = transforms.ToPILImage() os.environ["CUDA_VISIBLE_DEVICES"] = "2" device = 'cuda' model = DenseFuseNet().to(device) checkpoint = torch.load('./train_result/H_best.pkl') # checkpoint = torch.load('./train_result/model_weight_new.pkl') model.load_state_dict(checkpoint['weight']) mkdir("outputs/fea/") mkdir("outputs/fea/vi/") mkdir("outputs/fea/ir/") mkdir("result") test_ir = './Test_ir/' test_vi = './Test_vi/' def load_img(img_path, img_type='gray'): img = Image.open(img_path) if img_type == 'gray': img = img.convert('L') return _tensor(img).unsqueeze(0).to(device)