Esempio n. 1
0
def python_sample(number):
    '''Sample.'''

    # Random must be set before torch model, it is realy strange !!! ...
    zcodes = torch.randn(number, 1, 1, 512)

    model_setenv()
    device = model_device()
    decoder = get_decoder()
    decoder = decoder.to(device)
    decoder.eval()

    transformer = get_transformer()
    transformer = transformer.to(device)
    transformer.eval()

    print("Generating samples ...")
    start_time = time.time()

    images = []
    for i in range(number):
        zcode = zcodes[i:i+1, :, :, :]
        zcode = zcode.to(device)
        with torch.no_grad():
            wcode = transformer(zcode)
            image = decoder(wcode)
        images.append(image)

    spend_time = time.time() - start_time
    print("Spend time: {:.2f} seconds".format(spend_time))

    nrow = int(math.sqrt(number) + 0.5) 
    image = grid_image(torch.cat(images, dim=0), nrow=nrow)
    image.show()
    image.save("output/sample-9.png")
Esempio n. 2
0
def onnx_sample(number):
    # Random must be set before torch model, it is realy strange !!! ...
    zcodes = torch.randn(number, 1, 1, 512)

    model_setenv()
    device = model_device()

    decoder = onnx_model_load("output/image_gandecoder.onnx")
    transformer = onnx_model_load("output/image_gantransformer.onnx")

    print("Generating onnx samples ...")
    start_time = time.time()

#     bad = torch.Tensor([0.04,-0.05,-1.74,0.12,0.83,0.21,0.40,0.51,1.94,1.19,1.17,0.43,-0.85,2.37,-0.71,-1.39,-1.10,0.07,0.34,0.00,0.31,0.37,-2.07,0.00,-0.60,-0.96,-0.38,-0.35,-0.06,0.13,-1.68,0.57,-0.11,-1.36,-0.71,-0.53,0.24,0.27,-0.72,-1.61,-0.23,1.47,1.21,2.23,-1.90,0.32,-0.47,-1.28,-0.60,0.35,1.71,-0.62,0.52,-1.33,0.95,-0.66,0.13,-1.03,1.34,-0.68,1.91,-0.76,0.33,-0.53,-0.35,-0.92,-2.00,-0.27,0.30,0.20,1.01,0.03,-0.47,1.35,-0.12,-0.00,1.62,0.32,2.51,0.06,0.77,0.15,1.79,0.90,-0.24,0.05,1.15,0.11,-0.53,-0.44,2.07,-0.25,1.94,0.24,0.17,2.58,-1.56,-0.80,-0.56,-0.30,-0.96,0.01,1.74,1.04,-1.79,0.73,-0.43,-0.39,0.12,0.60,-1.54,-0.32,-0.47,-2.44,0.42,-0.92,0.86,-0.34,1.54,-1.07,-0.34,0.51,0.56,-1.87,0.35,0.03,-0.92,0.34,-1.09,-0.15,-1.02,-0.67,-1.02,-1.94,0.00,-0.39,-0.85,1.11,-0.18,1.44,-0.50,2.39,-1.88,-1.37,0.27,-1.22,-0.11,1.32,0.50,-1.62,1.01,1.23,0.36,0.64,-1.64,0.14,-1.22,0.22,-1.39,0.75,-1.70,-1.14,1.02,-0.83,-0.45,-1.44,2.15,-0.29,-1.17,-0.21,-0.23,0.12,0.92,1.04,-1.61,-1.90,2.14,-1.01,0.20,1.11,-0.45,-1.46,-0.65,1.23,0.28,-0.82,-0.16,1.20,-0.47,0.31,0.60,-0.37,1.82,-0.82,-0.96,-1.14,-0.09,0.08,2.12,1.09,-0.98,1.21,-0.70,-0.17,1.45,-0.50,-1.45,1.40,-0.37,1.19,-3.06,1.20,0.92,-1.83,0.72,0.70,-0.75,0.93,0.14,0.39,0.97,-0.36,0.88,-0.07,-0.43,-1.56,1.93,0.03,2.11,-0.57,-0.55,-0.25,1.20,0.19,1.24,-0.31,0.86,0.22,-0.46,-0.48,-0.36,1.01,-1.03,0.20,1.05,-0.45,1.03,1.38,1.72,0.41,-1.00,0.42,1.05,2.35,0.38,-0.25,-0.88,0.09,0.30,1.45,-1.63,-0.87,-0.39,-0.86,0.66,-0.55,-0.21,-0.94,1.12,0.13,1.10,1.13,0.70,-0.30,-1.46,1.50,1.02,0.11,-1.59,-0.53,0.64,-1.15,1.35,-0.98,-0.84,-0.72,-1.11,0.10,2.08,-0.37,2.06,-0.24,1.63,-0.73,-0.03,-1.04,0.31,-2.15,0.61,-1.13,-0.56,0.07,1.21,-2.38,-1.23,-0.30,0.50,-0.06,2.54,-0.04,-1.07,-1.12,1.17,0.11,0.03,0.26,-0.29,1.69,0.51,-0.84,-0.37,-0.32,-1.23,0.94,0.06,0.69,1.01,0.22,-0.10,-2.64,-0.74,0.33,-0.33,0.16,-1.12,-0.42,0.84,0.16,1.41,-0.13,1.86,0.03,0.37,0.98,0.24,0.51,0.13,-1.58,1.03,-0.30,1.10,-0.50,0.46,0.48,-0.25,0.08,0.35,-0.42,-0.42,0.82,1.17,-0.04,-0.36,0.51,0.04,-1.17,-2.64,1.25,-0.50,-0.33,1.33,0.71,1.87,-0.55,0.26,-1.42,1.73,-0.84,2.50,-0.13,1.43,-0.46,-0.62,1.01,0.55,-0.16,0.43,-0.35,1.49,-1.13,-0.31,-0.82,0.94,-0.06,-0.28,0.79,-0.33,-0.67,0.01,0.24,0.11,-0.82,-0.71,-0.62,-0.02,1.41,0.91,-0.92,-0.26,-0.13,0.36,-0.90,1.40,0.02,-2.10,-0.37,0.71,0.37,0.43,1.41,2.03,-0.26,-0.62,2.33,-1.22,0.96,0.52,0.42,-0.84,2.54,-0.21,0.09,0.28,-0.85,-0.93,0.27,-0.53,-0.33,0.35,-0.47,0.76,-0.10,0.03,0.15,0.22,-0.63,0.16,0.68,1.61,-0.35,0.72,-0.96,0.67,0.25,-0.03,0.26,-0.21,-2.23,0.78,0.19,0.96,-1.52,1.85,1.49,-1.41,-0.92,0.73,-0.97,0.98,-0.58,-1.44,-0.21,-0.43,1.24,-0.31,-1.02,-0.26,-0.43,-0.86,-1.37,0.70,-0.44,0.29,0.15,-1.03,-0.38,1.38,0.90,1.41,0.77,-1.23,-0.72,-1.19,0.53,-0.03,-0.28,0.42,0.15,-0.39,-2.07,-0.09,1.15,-0.16,1.77,0.28,-1.71,-0.82,-1.09,-0.43,1.21,0.68,-0.00]
# )
#     bad = bad.reshape(1, 1, 1, 512)

    images = []
    for i in range(number):
        zcode = zcodes[i:i+1, :, :, :].to(device)
        wcode = onnx_model_forward(transformer, zcode)
        image = onnx_model_forward(decoder, wcode)

        images.append(image.cpu())
    
    spend_time = time.time() - start_time
    print("Spend time: {:.2f} seconds".format(spend_time))

    nrow = int(math.sqrt(number) + 0.5) 
    image = grid_image(torch.cat(images, dim=0), nrow=nrow)
    image.show()
    image.save("output/sample-onnx-9.png")
Esempio n. 3
0
def rate(img_path):
    """
    Returns: Scores, mean, std
    """
    model_setenv()
    device = model_device()
    model = get_model()
    model_name = 'models/ImageNima.pth'
    model_load(model, model_name)
    model = model.to(device)
    model.eval()

    image_filenames = sorted(glob.glob(img_path))

    transform = transforms.Compose([
        transforms.Resize(224),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
    ])
    weighted_votes = torch.arange(10, dtype=torch.float) + 1
    weighted_votes = weighted_votes.to(device)

    for index, filename in enumerate(image_filenames):
        img = Image.open(filename).convert('RGB')
        img = transform(img).to(device)

        with torch.no_grad():
            scores = model(img.view(1, 3, 224, 224))
            mean = torch.matmul(scores, weighted_votes)
            std = torch.sqrt((scores * torch.pow(
                (weighted_votes - mean.view(-1, 1)), 2)).sum(dim=1))

        print("{:.4f} {:.4f}--- {}".format(mean.item(), std.item(), filename))
Esempio n. 4
0
#
import argparse
import os
import pdb  # For debug

import torch

from data import get_data
from model import get_model, model_device, valid_epoch

if __name__ == "__main__":
    """Test model."""

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--checkpoint',
                        type=str,
                        default="output/{{ . }}.pth",
                        help="checkpoint file")
    parser.add_argument('--bs', type=int, default=2, help="batch size")
    args = parser.parse_args()

    # get model
    model = get_model(args.checkpoint)
    device = model_device()
    model = model.to(device)

    print("Start testing ...")
    test_dl = get_data(trainning=False, bs=args.bs)
    valid_epoch(test_dl, model, device, tag='test')