Example #1
0
def inference(args):

    with open(args.config) as cfg:
        config = yaml.load(cfg)
    model = get_generator(config['model'])
    model.load_state_dict(torch.load(args.weights)['model'])
    model = model.cuda()

    os.makedirs(os.path.dirname(args.output), exist_ok=True)
    img_transforms = transforms.Compose(
        [transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
    size_transform = Compose([PadIfNeeded(736, 1280)])
    crop = CenterCrop(720, 1280)

    img = cv2.imread(args.input)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img_s = size_transform(image=img)['image']
    img_tensor = torch.from_numpy(
        np.transpose(img_s / 255, (2, 0, 1)).astype('float32'))
    img_tensor = img_transforms(img_tensor)

    with torch.no_grad():
        img_tensor = Variable(img_tensor.unsqueeze(0).cuda())
        result_image = model(img_tensor)
    result_image = result_image[0].cpu().float().numpy()
    result_image = (np.transpose(result_image, (1, 2, 0)) + 1) / 2.0 * 255.0
    result_image = crop(image=result_image)['image']
    result_image = result_image.astype('uint8')
    cv2.imwrite(args.output, cv2.cvtColor(result_image, cv2.COLOR_RGB2BGR))
Example #2
0
    def __init__(self, weights, model_name):
        super().__init__()

        parameters = {'g_name': model_name, 'norm_layer': 'instance'}
        self.impl = get_generator(parameters)
        checkpoint = torch.load(weights, map_location='cpu')['model']
        self.impl.load_state_dict(checkpoint)
        self.impl.train(True)
        remove_all_batch_norm(self.impl)
Example #3
0
 def __init__(self, weights_path: str, model_name: str = ''):
     with open('config/config.yaml') as cfg:
         config = yaml.load(cfg)
     model = get_generator(model_name or config['model'])
     model.load_state_dict(torch.load(weights_path)['model'])
     self.model = model.cuda()
     self.model.train(True)
     # GAN inference should be in train mode to use actual stats in norm layers,
     # it's not a bug
     self.normalize_fn = get_normalize()
Example #4
0
 def __init__(self, weights_path, model_name=''):
     with open('config/config.yaml') as cfg:
         config = yaml.load(cfg)
     model = get_generator(model_name or config['model'])
     model.load_state_dict(
         torch.load(weights_path,
                    map_location=lambda storage, loc: storage)['model'])
     if torch.cuda.is_available():
         self.model = model.cuda()
     else:
         self.model = model
     self.model.train(True)
     # GAN inference should be in train mode to use actual stats in norm layers,
     # it's not a bug
     self.normalize_fn = get_normalize()
Example #5
0
 def __init__(self,
              weights_path: str = "fpn_inception.h5",
              model_name: str = 'fpn_inception'):
     with open('config/config.yaml') as cfg:
         config = yaml.load(cfg)
     self.use_gpu = torch.cuda.is_available()
     model = get_generator(model_name, config)
     model.load_state_dict(
         torch.load(weights_path, map_location="cpu")['model'])
     if self.use_gpu:
         self.model = model.cuda()
     else:
         self.model = model
     self.model.train(True)
     # GAN inference should be in train mode to use actual stats in norm layers,
     # it's not a bug
     self.normalize_fn = get_normalize()
 def build(self):
     print("build is called")
     networks.get_generator()
Example #7
0
import torch
import torchvision
import torch.nn as nn
import torch
import torch.nn as nn
from pretrainedmodels import inceptionresnetv2
from torchsummary import summary
import torch.nn.functional as F
from torch.jit import ScriptModule, script_method, trace
import functools
from torchsummary import summary


with open('config/config.yaml') as cfg:
    config = yaml.load(cfg, Loader=yaml.FullLoader)
model = get_generator(config['model'])
weights_path = 'fpn_inception.h5'
model.load_state_dict(torch.load(weights_path)['model'])

model.eval()
model = model.cpu()
# summary(model, input_size=(3, 256, 256))

# model = model.cuda()
model.train(True)
#model = model.cpu()
example = torch.rand(1, 3, 128, 128)# .to('cuda')
traced_script_module = torch.jit.trace(model.module, example)
traced_script_module.save("model128.pt")