예제 #1
0
def srpgan_eval(images, generator_file, upscale_factor, use_cuda, num_workers=os.cpu_count()):
    """
    Turn a list of images to super resolution and returns them
    Args:
        num_workers (int): Number of processors to use
        use_cuda (bool): Whether or not to use the GPU
        upscale_factor (int): Either 2, 4 or 8
        images (list): List of Pillow images
        generator_file (file): The generator saved model file

    Returns:
        list: A list of SR images
    """
    netG = Generator(upscale_factor)
    learner = Learner(ClassifierCore(netG, None, None), use_cuda=use_cuda)
    ModelSaverCallback.restore_model_from_file(netG, generator_file, load_with_cpu=not use_cuda)
    eval_ds = EvalDataset(images)
    # One batch at a time as the pictures may differ in size
    eval_dl = DataLoader(eval_ds, 1, shuffle=False, num_workers=num_workers)

    images_pred = []
    predictions = learner.predict(eval_dl, flatten_predictions=False)
    tfs = transforms.Compose([
        transforms.ToPILImage(),
    ])
    for pred in predictions:
        pred = pred.view(pred.size()[1:])  # Remove batch size == 1
        images_pred.append(tfs(pred.cpu()))

    return images_pred
예제 #2
0
def train(args):
    num_workers = os.cpu_count()
    train_loader, valid_loader = get_loaders(args, num_workers)

    model_saver = ModelSaverCallback(saved_model_dir.absolute(), args.adv_epochs, every_n_epoch=10)

    netG = Generator(args.upscale_factor)
    netG.apply(weights_init)
    netD = Discriminator((3, args.crop_size, args.crop_size))
    netD.apply(weights_init)
    optimizer_g = optim.Adam(netG.parameters(), lr=1e-4)
    optimizer_d = optim.Adam(netD.parameters(), lr=1e-4)

    # Restore models if they exists
    if args.restore_models == 1:
        model_saver.restore_models([netG, netD], saved_model_dir.absolute())
    else:
        if args.gen_epochs > 0:
            print("---------------------- Generator training ----------------------")
            callbacks = [ReduceLROnPlateau(optimizer_g, loss_step="train")]
            loss = nn.MSELoss()
            learner = Learner(ClassifierCore(netG, optimizer_g, loss))
            learner.train(args.gen_epochs, None, train_loader, None, callbacks)

    print("----------------- Adversarial (SRPGAN) training -----------------")
    callbacks = [model_saver, ReduceLROnPlateau(optimizer_g, loss_step="valid"),
                 TensorboardVisualizerCallback(tensorboard_dir.absolute())]

    g_loss = GeneratorLoss()
    d_loss = DiscriminatorLoss()
    learner = Learner(SRPGanCore(netG, netD, optimizer_g, optimizer_d, g_loss, d_loss))
    learner.train(args.adv_epochs, [SSIM(), PSNR()], train_loader, valid_loader, callbacks)
예제 #3
0
    vectorizer=vectorizer,
)

#loss = torch.nn.MSELoss()
loss = torch.nn.CosineEmbeddingLoss(margin=0.2)


model = SiamesCos(
    word_emb_sizes=args.emb_size,
    conv_sizes=[int(args.netsize)],
    out_size=[args.netsize, args.netsize // 2],
    dropout=args.dropout,
)

if args.restore_model:
    ModelSaverCallback.restore_model_from_file(model, args.restore_model, load_with_cpu=(not args.cuda))

optimizer = optim.Adam(filter(lambda x: x.requires_grad, model.parameters()), lr=args.lr,
                       weight_decay=args.weight_decay)

run_name = args.run + '-' + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")

tb_dir = os.path.join(TB_DIR, run_name)
if not os.path.exists(tb_dir):
    os.mkdir(tb_dir)

metrics = [
    CosRocAucMetric(threshold=0)
]

예제 #4
0
if args.emb_path:
    vectorizer = ModelVectorizer(model_path=args.emb_path)
else:
    vectorizer = OnDiskVectorizer(mtx_path=args.mtx_path,
                                  meta_path=args.meta_path)

model = Matcher(
    word_emb_sizes=net_params['word_emb_sizes'],
    conv_sizes=net_params['conv_sizes'],
    out_size=net_params['out_size'],
    dropout=0.0,
)

ModelSaverCallback.restore_model_from_file(model,
                                           args.model,
                                           load_with_cpu=(not args.cuda))

model.eval()

with open(args.input_file) as fd:
    for line in fd:
        line = line.strip()
        sent_a = line
        sent_a_token = tokenizer(sent_a)
        if len(sent_a_token) <= 2:
            sent_a_token += ['xxx']

        sent_a_vect = vectorizer.convert([sent_a_token, sent_a_token])
        vect = model.cdssm_a.process_sentences(sent_a_vect)
예제 #5
0
from torchlite.torch.models.srpgan import Generator
from torchlite.torch.learner import Learner
from torchlite.torch.learner.cores import ClassifierCore
from torchlite.torch.train_callbacks import ModelSaverCallback
import os
import torchvision.transforms as transforms
from torch.utils.data import DataLoader

cur_path = os.path.dirname(os.path.abspath(__file__))
saved_model_dir = efiles.create_dir_if_not_exists(
    os.path.join(cur_path, "models"))

netG = Generator(4)
learner = Learner(ClassifierCore(netG, None, None), use_cuda=True)
ModelSaverCallback.restore_model_from_file(netG,
                                           saved_model_dir / "Generator.pth",
                                           load_with_cpu=not True)


def evaluate(imorig):
    """
    Method used for inference only
    """
    img_shape = imorig.shape
    h = 200
    num_h = img_shape[0] // h
    ori_h = h * num_h
    crop_h = img_shape[0] - ori_h
    w = 200
    num_w = img_shape[1] // w
    ori_w = w * num_w