Exemple #1
0
def initrun(dataloader, netD, netG, args):
    # model train or inference
    if args.mode in ['trainsingle', 'trainrandom']:
        mes = train_single_DRGAN(dataloader, netD, netG, args)
    if args.mode=='trainmulti':
        mes = train_multiple_DRGAN(dataloader, netD, netG, args)
    if args.mode in ['gensingle', 'genmulti']:
        mes = generate_image(dataloader, netG, args)
    if args.mode in ['idensingle', 'idenmulti']:
        mes = representation_learning(dataloader, netG, args)

    return mes
Exemple #2
0
            else:
                D = multi_model.Discriminator(Nd, Np, channel_num)
                G = multi_model.Generator(Np, Nz, channel_num,
                                          args.images_perID)
    else:
        print('\nLoading model from [%s]...' % args.snapshot)
        try:
            D = torch.load('{}_D.pt'.format(args.snapshot))
            G = torch.load('{}_G.pt'.format(args.snapshot))
        except:
            print("Sorry, This snapshot doesn't exist.")
            exit()

    if not (args.generate):
        if not (args.multi_DRGAN):
            train_single_DRGAN(images, id_labels, pose_labels, Nd, Np, Nz, D,
                               G, args)
        else:
            if args.batch_size % args.images_perID == 0:
                train_multiple_DRGAN(images, id_labels, pose_labels, Nd, Np,
                                     Nz, D, G, args)
            else:
                print(
                    "Please give valid combination of batch_size, images_perID"
                )
                exit()
    else:
        # pose_code = [] # specify arbitrary pose code for every image
        pose_code = np.random.uniform(-1, 1, (images.shape[0], Np))
        features = Generate_Image(images, pose_code, Nz, G, args)