Esempio n. 1
0
def main(step,dataset,data_dir):
    global args
    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)    

    # Create model and optimizer
    model = GenerativeQueryNetworkVal(x_dim=3, v_dim=6, r_dim=256, h_dim=128, z_dim=64, L=12)
    model.set_multiple_gpus()
    if step > 0:
        model_dir = data_dir+'/model/model_'+str(step)+'.pkl'
        model.load_state_dict(torch.load(model_dir))
    if args.cuda:
        model.cuda()
    cudnn.benchmark = True
    
    # Model optimisations
    #model = nn.DataParallel(model) if args.data_parallel else model
    #model = model.half() if args.fp16 else model

   

    # Load the dataset
    kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {}
    tot = 0
    for t in range(0,len(scene_name)):
        if t == 1:
            return

        for g in range(0,view_num): 
            data_root = data_dir + '/torch' + '/val/temporal/' + scene_name[t] + '/view' + str(g) + '/'
            val_loader = torch.utils.data.DataLoader(ShepardMetzler(root_dir=data_root),#, target_transform=transform_viewpoin
                                                     batch_size = args.val_batch_size,
                                                     shuffle=False, **kwargs)
            val_dir = data_dir + '/val/' 
            if os.path.exists(val_dir) == False:
                os.mkdir(val_dir)
            val_dir = val_dir + '/temporal/' 
            if os.path.exists(val_dir) == False:
                os.mkdir(val_dir)
            val_dir = val_dir + '/' + scene_name[t] + '/' 
            if os.path.exists(val_dir) == False:
                os.mkdir(val_dir)

            val_dir = val_dir + '/' +'view' + str(g) + '/' 
            if os.path.exists(val_dir) == False:
                os.mkdir(val_dir)
            for m in range(1,2):
                print('------------'+ 'scene'+ str(t)+'---'+'view' + str(g)+'---'+'observation' + str(m)+'--------------')
                valTemporal(model,val_loader,val_dir,m)
Esempio n. 2
0
def main(step, dataset, data_dir):
    global args
    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    # Create model and optimizer
    model = GenerativeQueryNetworkVal(x_dim=3,
                                      v_dim=6,
                                      r_dim=256,
                                      h_dim=128,
                                      z_dim=64,
                                      L=12)
    model.set_multiple_gpus()
    if step > 0:
        model_dir = data_dir + '/model/model_' + str(step) + '.pkl'
        model.load_state_dict(torch.load(model_dir))
    if args.cuda:
        model.cuda()
    cudnn.benchmark = True

    # Model optimisations
    #model = nn.DataParallel(model) if args.data_parallel else model
    #model = model.half() if args.fp16 else model

    # Load the dataset
    kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {}

    val_loader = torch.utils.data.DataLoader(
        ShepardMetzler(
            root_dir=data_dir + '/torch' +
            '/val/spatial/'),  #, target_transform=transform_viewpoint
        batch_size=args.val_batch_size,
        shuffle=False,
        **kwargs)

    val_dir = data_dir + '/val/'
    if os.path.exists(val_dir) == False:
        os.mkdir(val_dir)

    val_dir = val_dir + '/spatial/'
    if os.path.exists(val_dir) == False:
        os.mkdir(val_dir)
    for g in range(1, 5):
        valSpatial(model, val_loader, val_dir, g)
def main(step, data_bias, data_dir, directoy):

    global args, model, netContent

    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    netG = GeneratorMM(args.upscale_factor)
    netD = DiscriminatorMM()
    netG.set_multiple_gpus()
    netD.set_multiple_gpus()
    if step > 0:
        model_dir = data_bias + '/retrain/model/modelG_' + str(step) + '.pkl'
        netG.load_state_dict(torch.load(model_dir))

        model_dir = data_bias + '/retrain/model/modelD_' + str(step) + '.pkl'
        netD.load_state_dict(torch.load(model_dir))
    if args.cuda:
        netG = netG.cuda()
        netD = netD.cuda()

    cudnn.benchmark = True

    # Load the dataset
    kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {}

    eval_loader = torch.utils.data.DataLoader(
        ShepardMetzler(root_dir=data_dir),
        batch_size=args.eval_batch_size,
        shuffle=False,
        **kwargs)

    eval_dir = directoy

    evaluation(netG, netD, eval_loader, eval_dir)
    parser.add_argument('--data_parallel', type=bool, help='whether to parallelise based on data (default: False)', default=False)
    args = parser.parse_args()

    # Create model and optimizer
    #model = GenerativeQueryNetwork(x_dim=3, v_dim=7, r_dim=256, h_dim=128, z_dim=64, L=8).to(device)
    model = GenerativeQueryNetwork(x_dim=3, v_dim=7, r_dim=256, h_dim=64, z_dim=32, L=3).to(device)
    model = nn.DataParallel(model) if args.data_parallel else model

    optimizer = torch.optim.Adam(model.parameters(), lr=5 * 10 ** (-5))

    # Rate annealing schemes
    sigma_scheme = Annealer(2.0, 0.7, 80000)
    mu_scheme = Annealer(5 * 10 ** (-6), 5 * 10 ** (-6), 1.6 * 10 ** 5)

    # Load the dataset
    train_dataset = ShepardMetzler(root_dir=args.data_dir, fraction=args.fraction)
    valid_dataset = ShepardMetzler(root_dir=args.data_dir, fraction=args.fraction, train=False)

    kwargs = {'num_workers': args.workers, 'pin_memory': True} if cuda else {}
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
    valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)

    def step(engine, batch):
        model.train()

        x, v = batch
        x, v = x.to(device), v.to(device)
        x, v, x_q, v_q = partition(x, v)

        # Reconstruction, representation and divergence
        x_mu, _, kl = model(x, v, x_q, v_q)
                        type=int,
                        help='number of data loading workers',
                        default=2)
    parser.add_argument('--fp16',
                        type=bool,
                        help='whether to use FP16 (default: False)',
                        default=False)
    parser.add_argument(
        '--data_parallel',
        type=bool,
        help='whether to parallelise based on data (default: False)',
        default=False)

    args = parser.parse_args()

    dataset = ShepardMetzler(root_dir=args.data_dir,
                             target_transform=transform_viewpoint)

    # Pixel variance
    sigma_f, sigma_i = 0.7, 2.0

    # Learning rate
    mu_f, mu_i = 5 * 10**(-5), 5 * 10**(-4)
    mu, sigma = mu_f, sigma_f

    # Create model and optimizer
    model = GenerativeQueryNetwork(x_dim=3,
                                   v_dim=7,
                                   r_dim=256,
                                   h_dim=128,
                                   z_dim=64,
                                   L=12).to(device)
    parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
    parser.add_argument('--data_parallel', type=bool, help='whether to parallelise based on data (default: False)', default=False)
    args = parser.parse_args()

    # Create model and optimizer
    model = GenerativeQueryNetwork(x_dim=3, v_dim=7, r_dim=256, h_dim=128, z_dim=64, L=12).to(device)
    model = nn.DataParallel(model) if args.data_parallel else model

    optimizer = torch.optim.Adam(model.parameters(), lr=5 * 10 ** (-4))

    # Rate annealing schemes
    sigma_scheme = Annealer(2.0, 0.7, 2 * 10 ** 5)
    mu_scheme = Annealer(5 * 10 ** (-4), 5 * 10 ** (-5), 1.6 * 10 ** 6)

    # Load the dataset
    train_dataset = ShepardMetzler(root_dir=args.data_dir)
    valid_dataset = ShepardMetzler(root_dir=args.data_dir, train=False)

    kwargs = {'num_workers': args.workers, 'pin_memory': True} if cuda else {}
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
    valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)

    def step(engine, batch):
        model.train()

        x, v = batch
        x, v = x.to(device), v.to(device)
        x, v, x_q, v_q = partition(x, v)

        # Reconstruction, representation and divergence
        x_mu, _, kl = model(x, v, x_q, v_q)
Esempio n. 7
0
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn

# Load dataset
from shepardmetzler import ShepardMetzler
from torch.utils.data import DataLoader
from gqn import GenerativeQueryNetwork, partition

dataset = ShepardMetzler(
    "./shepard_metzler_5_parts")  ## <= Choose your data location
loader = DataLoader(dataset, batch_size=1, shuffle=True)

# Load model parameters onto CPU
state_dict = torch.load("./20_epoch_run/checkpoint_model_20.pth",
                        map_location="cpu")  ## <= Choose your model location

# Initialise new model with the settings of the trained one
model_settings = dict(x_dim=3, v_dim=7, r_dim=256, h_dim=128, z_dim=64, L=8)
model = GenerativeQueryNetwork(**model_settings)

# Load trained parameters, un-dataparallel if needed
if True in ["module" in m for m in list(state_dict().keys())]:
    model = nn.DataParallel(model)
    model.load_state_dict(state_dict())
    model = model.module
else:
    model.load_state_dict(state_dict())

model
def main(step, dataset, data_dir):
    global args
    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    # Pixel variance
    global sigma_f, sigma_i
    sigma_f, sigma_i = 0.7, 2.0

    # Learning rate
    global mu_f, mu_i
    mu_f, mu_i = 5 * 10**(-5), 5 * 10**(-4)
    global mu, sigma
    mu, sigma = mu_i, sigma_i
    s = 0

    if step > 0:
        file_dir = data_dir + '/rate/model_' + str(step) + '_hyper.txt'
        temp = np.loadtxt(file_dir)
        mu = temp[0]
        sigma = temp[1]
        s = int(temp[2])

    # Create model and optimizer
    model = GenerativeQueryNetwork(x_dim=3,
                                   v_dim=5,
                                   r_dim=256,
                                   h_dim=128,
                                   z_dim=64,
                                   L=12)
    n_parameters = sum([p.data.nelement() for p in model.parameters()])
    print('  + Number of params GQN: {}'.format(n_parameters))

    # Model optimisations
    model.set_multiple_gpus()
    if step > 0:
        model_dir = data_dir + '/model/model_' + str(step) + '.pkl'
        model.load_state_dict(torch.load(model_dir))
    if args.cuda:
        model.cuda()

    cudnn.benchmark = True

    optimizer = torch.optim.Adam(model.parameters(), lr=mu)

    # Load the dataset
    kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {}

    train_loader_spatial = torch.utils.data.DataLoader(
        ShepardMetzler(root_dir=data_dir + '/torch' +
                       '/train/'),  #, target_transform=transform_viewpoint
        batch_size=args.batch_size,
        shuffle=True,
        **kwargs)

    lRecord = []
    reconstruction_loss_train = []
    kl_divergence_train = []
    temp_loss_train = []
    full_loss_train = []

    for epoch in range(step + 1, args.epochs + step + 1):
        hyper = []
        print('------------------Spatial-------------------------')
        train_loader = train_loader_spatial
        lRecord.append('------------------Spatial-------------------------')

        reconstruction_loss, kl_divergence, temp_loss, full_loss = train(
            model, train_loader, optimizer, epoch, lRecord)

        s = s + 1
        reconstruction_loss_train.append(reconstruction_loss)
        kl_divergence_train.append(kl_divergence)
        temp_loss_train.append(temp_loss)
        full_loss_train.append(full_loss)

        # Anneal learning rate
        mu = max(mu_f + (mu_i - mu_f) * (1 - s / (1.6 * 10**6)), mu_f)
        for group in optimizer.param_groups:
            group["lr"] = mu * math.sqrt(1 - 0.999**s) / (1 - 0.9**s)

        # Anneal pixel variance
        sigma = max(sigma_f + (sigma_i - sigma_f) * (1 - s / (2 * 10**5)),
                    sigma_f)

        hyper.append(mu)
        hyper.append(sigma)
        hyper.append(s)

        if epoch % args.log_interval_record == 0:
            SaveRecord(data_dir, epoch, model, reconstruction_loss_train,
                       kl_divergence_train, temp_loss_train, full_loss_train,
                       lRecord, hyper)
Esempio n. 9
0
modelPath = "D:\\Projekte\\MachineLearning\\generative-query-network-pytorch\\model\\"
modelFullPath = modelPath + modelName

model_trained = GenerativeQueryNetwork(x_dim=3,
                                       v_dim=7,
                                       r_dim=256,
                                       h_dim=128,
                                       z_dim=64,
                                       L=8)
pretrained_dict = torch.load(modelFullPath, map_location='cpu')  #.to(device)
model_trained.load_state_dict(pretrained_dict)
model_trained = model_trained.to(device)

# datapath
datapath = "D:\\Projekte\\MachineLearning\\DataSets\\shepard_metzler_7_parts"
valid_dataset = ShepardMetzler(root_dir=datapath, fraction=1.0, train=False)
valid_loader = DataLoader(valid_dataset, batch_size=1, shuffle=True)

valid_imgs, valid_viewpoints = next(iter(valid_loader))
part_valid_imgs, part_valid_viewpoints, part_context_imgs, part_context_viewpoints = partition(
    valid_imgs, valid_viewpoints)

batch_size, num_views, channels, height, width = part_valid_imgs.shape

model_trained.eval()

with torch.no_grad():

    for valid_imgs, viewpoints, context_img, context_viewpoint in zip(
            part_valid_imgs, part_valid_viewpoints, part_context_imgs,
            part_context_viewpoints):
Esempio n. 10
0
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn

# Load dataset
from shepardmetzler import ShepardMetzler
from torch.utils.data import DataLoader
from gqn import GenerativeQueryNetwork, partition

dataset = ShepardMetzler("./our_data_vol2") ## <= Choose your data location
loader = DataLoader(dataset, batch_size=1, shuffle=True)

# Load model parameters onto CPU
state_dict = torch.load("./checkpoint_model_253.pth", map_location="cpu") ## <= Choose your model location

# Initialise new model with the settings of the trained one
model_settings = dict(x_dim=3, v_dim=7, r_dim=512, h_dim=128, z_dim=64, L=8)
model = GenerativeQueryNetwork(**model_settings)

# Load trained parameters, un-dataparallel if needed
if True in ["module" in m for m in list(state_dict().keys())]:
    model = nn.DataParallel(model)
    model.load_state_dict(state_dict())
    model = model.module
else:
    model.load_state_dict(state_dict())

model

# We load a batch of a single image containing a single object seen from 15 different viewpoints.
Esempio n. 11
0
def main(step, dataset, data_dir, data_dir_bias, model_name):

    global args, model, netContent, lr

    args = parser.parse_args()
    lr = args.lr
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    netG = GeneratorMM(args.upscale_factor)
    n_parameters = sum([p.data.nelement() for p in netG.parameters()])
    print('  + Number of params: {}'.format(n_parameters))

    netD = DiscriminatorMM()
    n_parameters = sum([p.data.nelement() for p in netD.parameters()])
    print('  + Number of params: {}'.format(n_parameters))

    generator_criterion = GeneratorLoss()
    netG.set_multiple_gpus()
    netD.set_multiple_gpus()
    if step > 0:
        model_dir = data_dir + '/model/modelG_' + str(step) + '.pkl'
        netG.load_state_dict(torch.load(model_dir))

        model_dir = data_dir + '/model/modelD_' + str(step) + '.pkl'
        netD.load_state_dict(torch.load(model_dir))
    if args.cuda:
        netG = netG.cuda()
        netD = netD.cuda()
        generator_criterion = generator_criterion.cuda()
    cudnn.benchmark = True

    optimizerG = optim.Adam(netG.parameters(), lr=args.lr, betas=(0.9, 0.999))
    optimizerD = optim.Adam(netD.parameters(), lr=args.lr, betas=(0.9, 0.999))

    # Load the dataset
    kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {}

    train_loader = torch.utils.data.DataLoader(
        ShepardMetzler(root_dir=data_dir_bias + '/torch_super/' + model_name +
                       '/train/' + '/bias_0/'),
        batch_size=args.batch_size,
        shuffle=True,
        **kwargs)
    test_loader = torch.utils.data.DataLoader(
        ShepardMetzler(root_dir=data_dir_bias + '/torch_super/' + model_name +
                       '/test/' + '/bias_0/'),
        batch_size=args.test_batch_size,
        shuffle=False,
        **kwargs)

    lRecord = []
    generator_loss_train = []
    discriminator_loss_train = []
    a_loss_train = []
    p_loss_train = []
    i_loss_train = []
    t_loss_train = []

    generator_loss_test = []
    discriminator_loss_test = []
    a_loss_test = []
    p_loss_test = []
    i_loss_test = []
    t_loss_test = []
    start = 0
    for epoch in range(step + 1, args.epochs + step + 1):
        generator_loss, a_loss, p_loss, i_loss, t_loss, discriminator_loss = train(
            train_loader, optimizerG, optimizerD, netG, netD,
            generator_criterion, epoch, lRecord)
        generator_loss_train.append(generator_loss)
        a_loss_train.append(a_loss)
        p_loss_train.append(p_loss)
        i_loss_train.append(i_loss)
        t_loss_train.append(t_loss)
        discriminator_loss_train.append(discriminator_loss)

        lr = adjust_learning_rate(optimizerG, epoch - 1)
        for param_group in optimizerG.param_groups:
            param_group["lr"] = lr

        lr = adjust_learning_rate(optimizerD, epoch - 1)
        for param_group in optimizerD.param_groups:
            param_group["lr"] = lr

        if epoch % args.log_interval_test == 0:
            test_dir = data_dir + '/test/' + 'model' + str(
                epoch) + '_scene' + str(start + 1) + '/'
            if os.path.exists(test_dir) == False:
                os.mkdir(test_dir)

            generator_loss, a_loss, p_loss, i_loss, t_loss, discriminator_loss = test(
                netG, netD, start, test_loader, epoch, generator_criterion,
                lRecord, test_dir)
            start = (start + 1) % len(test_loader)
            generator_loss_test.append(generator_loss)
            a_loss_test.append(a_loss)
            p_loss_test.append(p_loss)
            i_loss_test.append(i_loss)
            t_loss_test.append(t_loss)
            discriminator_loss_test.append(discriminator_loss)

        if epoch % args.log_interval_record == 0:
            SaveRecord(data_dir, epoch, netG, netD, generator_loss_train,
                       a_loss_train, p_loss_train, i_loss_train, t_loss_train,
                       discriminator_loss_train, generator_loss_test,
                       a_loss_test, p_loss_test, i_loss_test, t_loss_test,
                       discriminator_loss_test, lRecord)
Esempio n. 12
0
                                   r_dim=256,
                                   h_dim=128,
                                   z_dim=64,
                                   L=8).to(device)
    model = nn.DataParallel(model) if args.data_parallel else model

    #model = nn.DataParallel(model) if args.data_parallel else model

    optimizer = torch.optim.Adam(model.parameters(), lr=5 * 10**(-5))

    # Rate annealing schemes
    sigma_scheme = Annealer(2.0, 0.7, 80000)
    mu_scheme = Annealer(5 * 10**(-6), 5 * 10**(-6), 1.6 * 10**5)
    print('Creating train dataset')
    # Load the dataset
    train_dataset = ShepardMetzler(root_dir=args.data_dir, fraction=args.fraction, \
                    dataset_folder_length=args.dataset_folder_length_train)
    print('Creating test dataset')
    valid_dataset = ShepardMetzler(root_dir=args.data_dir, fraction=args.fraction, \
                    dataset_folder_length=args.dataset_folder_length_test, train=False)

    kwargs = {'num_workers': args.workers, 'pin_memory': True} if cuda else {}
    print('train set:', len(train_dataset))
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              **kwargs)
    print('test set:', len(valid_dataset))
    valid_loader = DataLoader(valid_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              **kwargs)