示例#1
0
    def load_network_stageII(self):
        from model import G_NET, D_NET

        netG = G_NET()
        netG.apply(weights_init)
        print(netG)
        if cfg.NET_G != '':
            state_dict = \
                torch.load(cfg.NET_G,
                           map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dict)
            print('Load from: ', cfg.NET_G)

        netD = D_NET()
        netD.apply(weights_init)
        if cfg.NET_D != '':
            state_dict = \
                torch.load(cfg.NET_D,
                           map_location=lambda storage, loc: storage)
            netD.load_state_dict(state_dict)
            print('Load from: ', cfg.NET_D)
        print(netD)

        if cfg.CUDA:
            netG.cuda()
            netD.cuda()
        return netG, netD
示例#2
0
def models(modelname, cfg, word_len):
    #print(word_len)
    text_encoder = cache.get(modelname + '_text_encoder', None)
    if text_encoder is None:
        #print("text_encoder not cached")
        text_encoder = RNN_ENCODER(word_len, nhidden=cfg.TEXT.EMBEDDING_DIM)
        state_dict = torch.load(cfg.TRAIN.NET_E,
                                map_location=lambda storage, loc: storage)
        text_encoder.load_state_dict(state_dict)
        if cfg.CUDA:
            text_encoder.cuda()
        text_encoder.eval()
        cache[modelname + '_text_encoder'] = text_encoder

    netG = cache.get(modelname + '_netG', None)
    if netG is None:
        #print("netG not cached")
        netG = G_NET()
        state_dict = torch.load(cfg.TRAIN.NET_G,
                                map_location=lambda storage, loc: storage)
        netG.load_state_dict(state_dict)
        if cfg.CUDA:
            netG.cuda()
        netG.eval()
        cache[modelname + '_netG'] = netG

    return text_encoder, netG
示例#3
0
def models(word_len):
    print('Loading Model', word_len)
    text_encoder = cache.get('text_encoder')
    print('Text enconder', text_encoder)
    if text_encoder is None:
        print("text_encoder not cached")
        text_encoder = RNN_ENCODER(word_len, nhidden=256)
        state_dict = torch.load('../DAMSMencoders/coco/text_encoder100.pth', map_location=lambda storage, loc: storage)
        text_encoder.load_state_dict(state_dict)
        print('loaded text encoder')
        text_encoder.cuda()
        print('text encoder cuda')
        text_encoder.eval()
        print('text encoder eval')
        #cache.set('text_encoder', text_encoder, timeout=60 * 60 * 24)

    print('Got Text Encoder, moving to netG')
    netG = cache.get('netG')
    if netG is None:
        print("netG not cached")
        netG = G_NET()
        state_dict = torch.load('../models/coco_AttnGAN2.pth', map_location=lambda storage, loc: storage)
        netG.load_state_dict(state_dict)
        if cfg.CUDA:
            netG.cuda()
        netG.eval()
        #cache.set('netG', netG, timeout=60 * 60 * 24)
    print('Got NetG')
    return text_encoder, netG
示例#4
0
def test(cfg_file, embedding_t7_path):

    cfg_from_file(cfg_file)

    cfg.GPU_ID = 0

    print('Using config:')
    pprint.pprint(cfg)

    netG = G_NET()
    netG.apply(weights_init)
    netG = torch.nn.DataParallel(netG, device_ids=[0])
    print(netG)
    #state_dict = torch.load('../gan/models/birds_3stages/netG_26000.pth')
    print(cfg.TRAIN.NET_G)
    state_path = '/home/ubuntu/GANTextToImage/gan/' + cfg.TRAIN.NET_G
    state_dict = torch.load(state_path, map_location=lambda storage, loc: storage)
    netG.load_state_dict(state_dict)
    # print('Load ', '../gan/models/flowers_1stage/netG_6000.pth')

    # the path to save generated images
    s_tmp = cfg.TRAIN.NET_G
    istart = s_tmp.rfind('_') + 1
    iend = s_tmp.rfind('.')
    iteration = int(s_tmp[istart:iend])
    s_tmp = s_tmp[:s_tmp.rfind('/')]
    save_dir = '%s/iteration%d' % (s_tmp, iteration)

    nz = cfg.GAN.Z_DIM
    noise = Variable(torch.FloatTensor(1, nz))
    if cfg.CUDA:
        netG.cuda()
        noise = noise.cuda()

    # switch to evaluate mode
    netG.eval()

    t_embedding = load_lua(embedding_t7_path)
    t_embedding = t_embedding.unsqueeze(0)
    print(t_embedding.size())   

    if cfg.CUDA:
        t_embedding = Variable(t_embedding).cuda()
    else:
        t_embedding = Variable(t_embedding)
    # print(t_embeddings[:, 0, :], t_embeddings.size(1))

    embedding_dim = t_embedding.size(1)
    noise.data.resize_(1, nz)
    noise.data.normal_(0, 1)

    fake_img_list = []
    # for i in range(embedding_dim):
    fake_imgs, _, _ = netG(noise, t_embedding[:, 0, :])
    if cfg.TEST.B_EXAMPLE:
            # fake_img_list.append(fake_imgs[0].data.cpu())
            # fake_img_list.append(fake_imgs[1].data.cpu())
        fake_img_list.append(fake_imgs[2].data.cpu())
    else:
        save_singleimages(fake_imgs[-1], '/home/ubuntu/GANTextToImage/static', 0, 256)
示例#5
0
def models(word_len):
    #print(word_len)
    text_encoder = cache.get('text_encoder')
    if text_encoder is None:
        #print("text_encoder not cached")
        text_encoder = RNN_ENCODER(word_len, nhidden=cfg.TEXT.EMBEDDING_DIM)
        state_dict = torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)
        text_encoder.load_state_dict(state_dict)
        if cfg.CUDA:
            text_encoder.cuda()
        text_encoder.eval()
        cache.set('text_encoder', text_encoder, timeout=60 * 60 * 24)

    netG = cache.get('netG')
    if netG is None:
        #print("netG not cached")
        netG = G_NET()
        state_dict = torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage)
        netG.load_state_dict(state_dict)
        if cfg.CUDA:
            netG.cuda()
        netG.eval()
        cache.set('netG', netG, timeout=60 * 60 * 24)

    return text_encoder, netG
示例#6
0
def load_network(gpus):
    netG = G_NET()
    netG.apply(weights_init)
    netG = torch.nn.DataParallel(netG, device_ids=gpus)
    print(netG)

    netsD = []
    if cfg.TREE.BRANCH_NUM > 0:
        netsD.append(D_NET64())
    if cfg.TREE.BRANCH_NUM > 1:
        netsD.append(D_NET128())
    if cfg.TREE.BRANCH_NUM > 2:
        netsD.append(D_NET256())
    if cfg.TREE.BRANCH_NUM > 3:
        netsD.append(D_NET512())
    if cfg.TREE.BRANCH_NUM > 4:
        netsD.append(D_NET1024())
    # TODO: if cfg.TREE.BRANCH_NUM > 5:

    for i in range(len(netsD)):
        netsD[i].apply(weights_init)
        netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus)
        # print(netsD[i])
    print('# of netsD', len(netsD))

    count = 0
    if cfg.TRAIN.NET_G != '':
        state_dict = torch.load(cfg.TRAIN.NET_G)
        netG.load_state_dict(state_dict)
        print('Load ', cfg.TRAIN.NET_G)

        try:
            istart = cfg.TRAIN.NET_G.rfind('_') + 1
            iend = cfg.TRAIN.NET_G.rfind('.')
            count = cfg.TRAIN.NET_G[istart:iend]
            count = int(count)
        except:
            last_run_dir = cfg.DATA_DIR + '/' + cfg.LAST_RUN_DIR + '/Model'
            with open(last_run_dir + '/count.txt', 'r') as f:
                count = int(f.read())

        count = int(count) + 1

    if cfg.TRAIN.NET_D != '':
        for i in range(len(netsD)):
            print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i))
            state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i))
            netsD[i].load_state_dict(state_dict)

    inception_model = INCEPTION_V3()

    if cfg.CUDA:
        netG.cuda()
        for i in range(len(netsD)):
            netsD[i].cuda()
        inception_model = inception_model.cuda()
    inception_model.eval()

    return netG, netsD, len(netsD), inception_model, count
示例#7
0
def load_network(gpus, dictionary=None):
    netG = G_NET()
    netG.apply(weights_init)
    netG = torch.nn.DataParallel(netG, device_ids=gpus)
    print(netG)

    netsD = []
    if cfg.TREE.BRANCH_NUM > 0:
        netsD.append(D_NET64())
    if cfg.TREE.BRANCH_NUM > 1:
        netsD.append(D_NET128())
    if cfg.TREE.BRANCH_NUM > 2:
        netsD.append(D_NET256())
    if cfg.TREE.BRANCH_NUM > 3:
        netsD.append(D_NET512())
    if cfg.TREE.BRANCH_NUM > 4:
        netsD.append(D_NET1024())
    # TODO: if cfg.TREE.BRANCH_NUM > 5:

    for i in range(len(netsD)):
        netsD[i].apply(weights_init)
        netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus)
        # print(netsD[i])
    print('# of netsD', len(netsD))

    count = 0
    if cfg.TRAIN.NET_G != '':
        state_dict = torch.load(cfg.TRAIN.NET_G)
        netG.load_state_dict(state_dict)
        print('Load ', cfg.TRAIN.NET_G)

        istart = cfg.TRAIN.NET_G.rfind('_') + 1
        iend = cfg.TRAIN.NET_G.rfind('.')
        count = cfg.TRAIN.NET_G[istart:iend]
        count = int(count) + 1

    if cfg.TRAIN.NET_D != '':
        for i in range(len(netsD)):
            print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i))
            state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i))
            netsD[i].load_state_dict(state_dict)

    inception_model = INCEPTION_V3()

    embedding_model = load_embedding_model(dictionary)
    for param in embedding_model.parameters():
        param.requires_grad = False

    if cfg.CUDA:
        netG.cuda()
        for i in range(len(netsD)):
            netsD[i].cuda()
        inception_model = inception_model.cuda()
        embedding_model.cuda()

    inception_model.eval()

    return netG, netsD, len(netsD), inception_model, embedding_model, count
示例#8
0
def generate(captions, copies):
    x = pickle.load(open('data/captions.pickle', 'rb'))
    # print(x)
    ixtoword = x[2]
    wordtoix = x[3]
    del x
    word_len = len(wordtoix)

    text_encoder = RNN_ENCODER(word_len, nhidden=cfg.TEXT.EMBEDDING_DIM)
    state_dict = torch.load(cfg.TRAIN.NET_E,
                            map_location=lambda storage, loc: storage)
    text_encoder.load_state_dict(state_dict)
    text_encoder.eval()

    netG = G_NET()
    state_dict = torch.load(cfg.TRAIN.NET_G,
                            map_location=lambda storage, loc: storage)
    netG.load_state_dict(state_dict)
    # netG.eval()
    # seed = 100
    # random.seed(seed)
    # np.random.seed(seed)
    # torch.manual_seed(seed)
    # load word vector
    captions, cap_lens = vectorize_caption(wordtoix, captions, copies)
    n_words = len(wordtoix)

    # only one to generate
    batch_size = captions.shape[0]
    nz = cfg.GAN.Z_DIM
    captions = Variable(torch.from_numpy(captions), volatile=True)
    cap_lens = Variable(torch.from_numpy(cap_lens), volatile=True)
    noise = Variable(torch.FloatTensor(batch_size, nz), volatile=True)

    #######################################################
    # (1) Extract text embeddings
    #######################################################
    hidden = text_encoder.init_hidden(batch_size)
    words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
    mask = (captions == 0)

    #######################################################
    # (2) Generate fake images
    #######################################################
    noise.data.normal_(0, 1)
    fake_imgs, attention_maps, _, _ = netG(noise, sent_emb, words_embs, mask)

    # G attention
    cap_lens_np = cap_lens.cpu().data.numpy()

    #    # storing to blob storage
    # container_name = "images"
    # full_path = "https://attgan.blob.core.windows.net/images/%s"
    # prefix = datetime.now().strftime('%Y/%B/%d/%H_%M_%S_%f')
    imgs = []
    # only look at first one
    #j = 0
    return imglist(fake_imgs, batch_size)
示例#9
0
    def evaluate_finegan(self):
        if cfg.TRAIN.NET_G == '':
            print('Error: the path for model not found!')
        else:
            # Build and load the generator
            netG = G_NET()
            netG.apply(weights_init)
            netG = torch.nn.DataParallel(netG, device_ids=self.gpus)
            model_dict = netG.state_dict()

            state_dict = \
                torch.load(cfg.TRAIN.NET_G,
                           map_location=lambda storage, loc: storage)['birds128']

            state_dict = {k: v for k, v in state_dict.items() if k in model_dict}

            model_dict.update(state_dict)
            netG.load_state_dict(model_dict)
            print('Load ', cfg.TRAIN.NET_G)

            # Uncomment this to print Generator layers
            print(netG)

            sys

            nz = cfg.GAN.Z_DIM
            noise = torch.FloatTensor(self.batch_size, nz)
            noise.data.normal_(0, 1)

            if cfg.CUDA:
                netG.cuda()
                noise = noise.cuda()

            netG.eval()

            background_class = cfg.TEST_BACKGROUND_CLASS
            parent_class = cfg.TEST_PARENT_CLASS
            child_class = cfg.TEST_CHILD_CLASS
            bg_code = torch.zeros([self.batch_size, cfg.FINE_GRAINED_CATEGORIES])
            p_code = torch.zeros([self.batch_size, cfg.SUPER_CATEGORIES])
            c_code = torch.zeros([self.batch_size, cfg.FINE_GRAINED_CATEGORIES])

            for j in range(self.batch_size):
                bg_code[j][background_class] = 1
                p_code[j][parent_class] = 1
                c_code[j][child_class] = 1

            fake_imgs, fg_imgs, mk_imgs, fgmk_imgs = netG(noise, c_code, p_code, bg_code) # Forward pass through the generator

            self.save_image(fake_imgs[0][0], self.save_dir, 'background')
            self.save_image(fake_imgs[1][0], self.save_dir, 'parent_final')
            self.save_image(fake_imgs[2][0], self.save_dir, 'child_final')
            self.save_image(fg_imgs[0][0], self.save_dir, 'parent_foreground')
            self.save_image(fg_imgs[1][0], self.save_dir, 'child_foreground')
            self.save_image(mk_imgs[0][0], self.save_dir, 'parent_mask')
            self.save_image(mk_imgs[1][0], self.save_dir, 'child_mask')
            self.save_image(fgmk_imgs[0][0], self.save_dir, 'parent_foreground_masked')
            self.save_image(fgmk_imgs[1][0], self.save_dir, 'child_foreground_masked')
示例#10
0
def load_network(gpus):
    netEn_img = MLP_ENCODER_IMG()
    netEn_img.apply(weights_init)
    netEn_img = torch.nn.DataParallel(netEn_img, device_ids=gpus)
    print(netEn_img)

    netG = G_NET()
    netG.apply(weights_init)
    netG = torch.nn.DataParallel(netG, device_ids=gpus)
    print(netG)

    netsD = []
    if cfg.TREE.BRANCH_NUM > 0:
        netsD.append(D_NET64())
    if cfg.TREE.BRANCH_NUM > 1:
        netsD.append(D_NET128())
    if cfg.TREE.BRANCH_NUM > 2:
        netsD.append(D_NET256())

    for i in xrange(len(netsD)):
        netsD[i].apply(weights_init)
        netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus)
    print('# of netsD', len(netsD))

    count = 0
    if cfg.TRAIN.NET_G != '':
        state_dict = torch.load(cfg.TRAIN.NET_G)
        netG.load_state_dict(state_dict)
        print('Load ', cfg.TRAIN.NET_G)

        istart = cfg.TRAIN.NET_G.rfind('_') + 1
        iend = cfg.TRAIN.NET_G.rfind('.')
        count = cfg.TRAIN.NET_G[istart:iend]
        count = int(count) + 1

    if cfg.TRAIN.NET_D != '':
        for i in xrange(len(netsD)):
            print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i))
            state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i))
            netsD[i].load_state_dict(state_dict)

    if cfg.TRAIN.NET_MLP_IMG != '':
        state_dict = torch.load(cfg.TRAIN.NET_MLP_IMG)
        netEn_img.load_state_dict(state_dict)
        print('Load ', cfg.TRAIN.NET_MLP_IMG)

    inception_model = INCEPTION_V3()

    if cfg.CUDA:
        netG.cuda()
        netEn_img = netEn_img.cuda()
        for i in xrange(len(netsD)):
            netsD[i].cuda()
        inception_model = inception_model.cuda()
    inception_model.eval()

    return netG, netsD, netEn_img, inception_model, len(netsD), count
示例#11
0
    def build_generator(self, net_G):
        # Load trained generator model
        netG = G_NET()
        state_dict = torch.load(net_G,
                                map_location=lambda storage, loc: storage)
        netG.load_state_dict(state_dict)
        netG.eval()

        return netG
示例#12
0
    def evaluate(self, split_dir):
        if cfg.TRAIN.NET_G == '':
            print('Error: the path for morels is not found!')
        else:
            # Build and load the generator
            netG = G_NET()
            netG.apply(weights_init)
            netG = torch.nn.DataParallel(netG, device_ids=self.gpus)
            print(netG)
            # state_dict = torch.load(cfg.TRAIN.NET_G)
            state_dict = \
                torch.load(cfg.TRAIN.NET_G,
                           map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dict)
            print('Load ', cfg.TRAIN.NET_G)

            # the path to save generated images
            s_tmp = cfg.TRAIN.NET_G
            istart = s_tmp.rfind('_') + 1
            iend = s_tmp.rfind('.')
            iteration = int(s_tmp[istart:iend])
            s_tmp = s_tmp[:s_tmp.rfind('/')]
            save_dir = '%s/iteration%d/%s' % (s_tmp, iteration, split_dir)
            if cfg.TEST.B_EXAMPLE:
                folder = '%s/super' % (save_dir)
            else:
                folder = '%s/single' % (save_dir)
            print('Make a new folder: ', folder)
            mkdir_p(folder)

            nz = cfg.GAN.Z_DIM
            noise = Variable(torch.FloatTensor(self.batch_size, nz))
            if cfg.CUDA:
                netG.cuda()
                noise = noise.cuda()

            # switch to evaluate mode
            netG.eval()
            num_batches = int(cfg.TEST.SAMPLE_NUM / self.batch_size)
            cnt = 0
            for step in range(num_batches):
                noise.data.normal_(0, 1)
                #hmxhmxhmxhmxhmxhmxhmxhmxhmxhmxhmxhmxhmxstart
                fake_imgs, layers_output, _, _ = netG(noise)
                if len(layers_output) != len(lamdas):
                    print("please check lamdas length")
                #hmxhmxhmxhmxhmxhmxhmxhmxhmxhmxhmxhmxhmxend
                if cfg.TEST.B_EXAMPLE:
                    self.save_superimages(fake_imgs[-1], folder, cnt, 256)
                else:
                    self.save_singleimages(fake_imgs[-1], folder, cnt, 256)
                    # self.save_singleimages(fake_imgs[-2], folder, 128)
                    # self.save_singleimages(fake_imgs[-3], folder, 64)
                cnt += self.batch_size
示例#13
0
def load_network(gpus):
    netG = G_NET()
    netG.apply(weights_init)
    netG = torch.nn.DataParallel(netG, device_ids=gpus)
    print(netG)

    netsD = []
    if cfg.TREE.BRANCH_NUM > 0:
        netsD.append(D_NET64())
    if cfg.TREE.BRANCH_NUM > 1:
        netsD.append(D_NET128())
    if cfg.TREE.BRANCH_NUM > 2:
        netsD.append(D_NET256())
    if cfg.TREE.BRANCH_NUM > 3:
        netsD.append(D_NET512())
    if cfg.TREE.BRANCH_NUM > 4:
        netsD.append(D_NET1024())
    # TODO: if cfg.TREE.BRANCH_NUM > 5:

    for i in range(len(netsD)):
        netsD[i].apply(weights_init)
        netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus)
        # print(netsD[i])
    print('# of netsD', len(netsD))

    count = 0
    if cfg.TRAIN.NET_G != '':
        state_dict = torch.load(cfg.TRAIN.NET_G)
        netG.load_state_dict(state_dict)
        print('Load ', cfg.TRAIN.NET_G)

        istart = cfg.TRAIN.NET_G.rfind('_') + 1
        iend = cfg.TRAIN.NET_G.rfind('.')
        count = cfg.TRAIN.NET_G[istart:iend]
        count = int(count) + 1

    if cfg.TRAIN.NET_D != '':
        for i in range(len(netsD)):
            print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i))
            state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i))
            netsD[i].load_state_dict(state_dict)

    inception_model = INCEPTION_V3()

    if cfg.CUDA:
        netG.cuda()
        for i in range(len(netsD)):
            netsD[i].cuda()
        inception_model = inception_model.cuda()
    inception_model.eval()

    return netG, netsD, len(netsD), inception_model, count
示例#14
0
def load_network(gpus):
    netG = G_NET()
    netG.apply(weights_init)
    netG = torch.nn.DataParallel(netG, device_ids=gpus)
    print(netG)

    netsD = []
    # 128 * 128
    if cfg.TREE.BRANCH_NUM > 1:
        for i in range(
                3):  # 3 discriminators for background, parent and child stage
            netsD.append(D_NET128(i))

    # 256 * 256
    if cfg.TREE.BRANCH_NUM > 2:
        for i in range(
                3):  # 3 discriminators for background, parent and child stage
            netsD.append(D_NET256(i))

    # for i in range(3): # 3 discriminators for background, parent and child stage
    #     netsD.append(D_NET128(i))

    for i in range(len(netsD)):
        netsD[i].apply(weights_init)
        netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus)
        print(netsD[i])

    count = 0

    if cfg.TRAIN.NET_G != '':
        state_dict = torch.load(cfg.TRAIN.NET_G)
        netG.load_state_dict(state_dict)
        print('Load ', cfg.TRAIN.NET_G)

        istart = cfg.TRAIN.NET_G.rfind('_') + 1
        iend = cfg.TRAIN.NET_G.rfind('.')
        count = cfg.TRAIN.NET_G[istart:iend]
        count = int(count) + 1

    if cfg.TRAIN.NET_D != '':
        for i in range(len(netsD)):
            print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i))
            state_dict = torch.load('%s_%d.pth' % (cfg.TRAIN.NET_D, i))
            netsD[i].load_state_dict(state_dict)

    if cfg.CUDA:
        netG.cuda()
        for i in range(len(netsD)):
            netsD[i].cuda()

    return netG, netsD, len(netsD), count
示例#15
0
文件: trainer.py 项目: zxs789/Obj-GAN
    def build_models(self):
        netG = G_NET(len(self.cats_index_dict))
        netINSD = INS_D_NET(len(self.cats_index_dict))
        netGLBD = GLB_D_NET(len(self.cats_index_dict))

        netG.apply(weights_init)
        netINSD.apply(weights_init)
        netGLBD.apply(weights_init)

        if cfg.CUDA:
            netG.cuda()
            netINSD.cuda()
            netGLBD.cuda()

            if len(cfg.GPU_IDS) > 1:
                netG = nn.DataParallel(netG)
                netG.to(self.device)
                netINSD = nn.DataParallel(netINSD)
                netINSD.to(self.device)
                netGLBD = nn.DataParallel(netGLBD)
                netGLBD.to(self.device)

        # ########################################################### #
        epoch = 0
        if cfg.TRAIN.NET_G != '':
            state_dict = \
                torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dict)
            print('Load G from: ', cfg.TRAIN.NET_G)
            filename = path_leaf(cfg.TRAIN.NET_G)
            istart = filename.rfind('_') + 1
            iend = filename.rfind('.')
            epoch = filename[istart:iend]
            epoch = int(epoch) + 1

            Gname = cfg.TRAIN.NET_G
            s_tmp = Gname[:Gname.rfind('/')]
            Dname = '%s/netINSD.pth' % (s_tmp)
            print('Load INSD from: ', Dname)
            state_dict = \
                torch.load(Dname, map_location=lambda storage, loc: storage)
            netINSD.load_state_dict(state_dict)

            s_tmp = Gname[:Gname.rfind('/')]
            Dname = '%s/netGLBD.pth' % (s_tmp)
            print('Load GLBD from: ', Dname)
            state_dict = \
                torch.load(Dname, map_location=lambda storage, loc: storage)
            netGLBD.load_state_dict(state_dict)

        return [netG, netINSD, netGLBD, epoch]
示例#16
0
    def evaluate(self, split_dir):
        if cfg.TRAIN.NET_G == '':
            print('Error: the path for morels is not found!')
        else:
            # Build and load the generator
            netG = G_NET()
            netG.apply(weights_init)
            netG = torch.nn.DataParallel(netG, device_ids=self.gpus)
            print(netG)
            # state_dict = torch.load(cfg.TRAIN.NET_G)
            state_dict = \
                torch.load(cfg.TRAIN.NET_G,
                           map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dict)
            print('Load ', cfg.TRAIN.NET_G)

            # the path to save generated images
            s_tmp = cfg.TRAIN.NET_G
            istart = s_tmp.rfind('_') + 1
            iend = s_tmp.rfind('.')
            iteration = int(s_tmp[istart:iend])
            s_tmp = s_tmp[:s_tmp.rfind('/')]
            save_dir = '%s/iteration%d/%s' % (s_tmp, iteration, split_dir)
            if cfg.TEST.B_EXAMPLE:
                folder = '%s/super' % (save_dir)
            else:
                folder = '%s/single' % (save_dir)
            print('Make a new folder: ', folder)
            mkdir_p(folder)

            nz = cfg.GAN.Z_DIM
            noise = Variable(torch.FloatTensor(self.batch_size, nz))
            if cfg.CUDA:
                netG.cuda()
                noise = noise.cuda()

            # switch to evaluate mode
            netG.eval()
            num_batches = int(cfg.TEST.SAMPLE_NUM / self.batch_size)
            cnt = 0
            for step in xrange(num_batches):
                noise.data.normal_(0, 1)
                fake_imgs, _, _ = netG(noise)
                if cfg.TEST.B_EXAMPLE:
                    self.save_superimages(fake_imgs[-1], folder, cnt, 256)
                else:
                    self.save_singleimages(fake_imgs[-1], folder, cnt, 256)
                    # self.save_singleimages(fake_imgs[-2], folder, 128)
                    # self.save_singleimages(fake_imgs[-3], folder, 64)
                cnt += self.batch_size
示例#17
0
def load_checkpoint(modelpath):
    s_gpus = cfg.GPU_ID.split(',')
    gpus = [int(ix) for ix in s_gpus]
    torch.cuda.set_device(gpus[0])
    state_dict = torch.load(modelpath, map_location=lambda storage, loc: storage)
    #print(checkpoint.keys())
    #model = checkpoint['model']
    #model.load_state_dict(checkpoint['state_dict'])
    #for parameter in model.parameters():
    #    parameter.requires_grad = False
    netG = G_NET()
    netG.apply(weights_init)
    netG = torch.nn.DataParallel(netG, device_ids=gpus)
    netG.load_state_dict(state_dict)
    netG.eval()
    return netG
示例#18
0
def load_network(gpus):
    netG = G_NET(start_depth)
    netG.apply(weights_init)
    netG = torch.nn.DataParallel(netG, device_ids=gpus)
    print(netG)

    netsD = []
    netsD.append(D_NET_BG(start_depth))
    netsD.append(D_NET_PC(1, start_depth))
    netsD.append(D_NET_PC(2, start_depth))
    netsD.append(D_NET_BG_PG(start_depth))

    for i in range(len(netsD)):
        netsD[i].apply(weights_init)
        netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus)
        print(netsD[i])

    count = 0

    if cfg.TRAIN.NET_G != '':
        state_dict = torch.load(cfg.TRAIN.NET_G)
        netG.load_state_dict(state_dict)
        print('Load ', cfg.TRAIN.NET_G)

        istart = cfg.TRAIN.NET_G.rfind('netG_') + 5
        iend = cfg.TRAIN.NET_G.rfind('_depth')
        count = cfg.TRAIN.NET_G[istart:iend]
        count = int(count)
        istart = cfg.TRAIN.NET_G.rfind('depth')
        iend = cfg.TRAIN.NET_G.rfind('.')
        _depth = cfg.TRAIN.NET_G[istart:iend]

    if cfg.TRAIN.NET_D != '':
        for i in range(len(netsD)):
            print('Load %s%d_%s.pth' % (cfg.TRAIN.NET_D, i, _depth))
            state_dict = torch.load('%s%d_%s.pth' %
                                    (cfg.TRAIN.NET_D, i, _depth))
            netsD[i].load_state_dict(state_dict)

    if cfg.CUDA:
        netG.cuda()
        for i in range(len(netsD)):
            netsD[i].cuda()

    return netG, netsD, len(netsD), count
示例#19
0
def models(word_len):
    text_encoder = cache.get('text_encoder')
    if text_encoder is None:
        text_encoder = RNN_ENCODER(word_len, nhidden=256)
        state_dict = torch.load('../DAMSMencoders/coco/text_encoder100.pth', map_location=lambda storage, loc: storage)
        text_encoder.load_state_dict(state_dict)
        text_encoder.cuda()
        text_encoder.eval()
        #cache.set('text_encoder', text_encoder, timeout=60 * 60 * 24)

    netG = cache.get('netG')
    if netG is None:
        netG = G_NET()
        state_dict = torch.load('../models/coco_AttnGAN2.pth', map_location=lambda storage, loc: storage)
        netG.load_state_dict(state_dict)
        if cfg.CUDA:
            netG.cuda()
        netG.eval()
        #cache.set('netG', netG, timeout=60 * 60 * 24)
    return text_encoder, netG
def load_checkpoint(modelpath):
    # s_gpus = cfg.GPU_ID.split(',')
    # gpus = [int(ix) for ix in s_gpus]
    # torch.cuda.set_device(gpus[0])
    # state_dict = torch.load(modelpath, map_location=lambda storage, loc: storage)
    # netG = G_NET()
    # netG.apply(weights_init)
    # netG = torch.nn.DataParallel(netG, device_ids=gpus)
    # netG.load_state_dict(state_dict)
    # netG.eval()
    state_dict = torch.load(modelpath, map_location='cpu')
    new_state_dict = {}
    for k, v in state_dict.items():
        new_state_dict[k[7:]] = v
    netG = G_NET()

    netG.load_state_dict(new_state_dict)
    netG.eval()

    return netG
示例#21
0
torch.manual_seed(manualSeed)
torch.cuda.manual_seed_all(manualSeed)

gpus = [0]
num_gpus = 1
torch.cuda.set_device(gpus[0])
cudnn.benchmark = True
batch_size = 2

netG = G_NET()
netG.apply(weights_init)
netG = torch.nn.DataParallel(netG, device_ids=gpus)
state_dict = \
    torch.load(cfg.TRAIN.NET_G,
               map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)

nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(batch_size, nz))

netG.cuda()
netG.eval()
noise = noise.cuda()

t_embeddings = load_lua(txt_dir)
t_embeddings = t_embeddings.view(-1,1024)

t_embeddings = torch.cat((t_embeddings,t_embeddings), 0)
t_embeddings = Variable(t_embeddings).cuda()

embedding_dim = t_embeddings.size(1)
示例#22
0
    def evaluate(self, split_dir):
        if cfg.TRAIN.NET_G == '':
            print('Error: the path for morels is not found!')
        else:
            # Build and load the generator
            if split_dir == 'test':
                split_dir = 'valid'
            netG = G_NET()
            netG.apply(weights_init)
            netG = torch.nn.DataParallel(netG, device_ids=self.gpus)
            print(netG)
            # state_dict = torch.load(cfg.TRAIN.NET_G)
            state_dict = \
                torch.load('/content/drive/My Drive/Colab Notebooks/StackGAN-v2-master/models/netG_210000.pth',
                           map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dict)
            print('Load ', cfg.TRAIN.NET_G)

            # the path to save generated images
            s_tmp = cfg.TRAIN.NET_G
            istart = s_tmp.rfind('_') + 1
            iend = s_tmp.rfind('.')
            iteration = int(s_tmp[istart:iend])
            s_tmp = s_tmp[:s_tmp.rfind('/')]
            save_dir = '%s/iteration%d' % (s_tmp, iteration)

            nz = cfg.GAN.Z_DIM
            noise = Variable(torch.FloatTensor(self.batch_size, nz))
            if cfg.CUDA:
                netG.cuda()
                noise = noise.cuda()

            # switch to evaluate mode
            netG.eval()
            for step, data in enumerate(self.data_loader, 0):
                imgs, t_embeddings, filenames = data
                if cfg.CUDA:
                    t_embeddings = Variable(t_embeddings).cuda()
                else:
                    t_embeddings = Variable(t_embeddings)
                # print(t_embeddings[:, 0, :], t_embeddings.size(1))

                embedding_dim = t_embeddings.size(1)
                batch_size = imgs[0].size(0)
                noise.data.resize_(batch_size, nz)
                noise.data.normal_(0, 1)

                fake_img_list = []
                for i in range(embedding_dim):
                    fake_imgs, _, _ = netG(noise, t_embeddings[:, i, :])
                    if cfg.TEST.B_EXAMPLE:
                        # fake_img_list.append(fake_imgs[0].data.cpu())
                        # fake_img_list.append(fake_imgs[1].data.cpu())
                        fake_img_list.append(fake_imgs[2].data.cpu())
                    else:
                        self.save_singleimages(fake_imgs[-1], filenames,
                                               save_dir, split_dir, i, 256)
                        # self.save_singleimages(fake_imgs[-2], filenames,
                        #                        save_dir, split_dir, i, 128)
                        # self.save_singleimages(fake_imgs[-3], filenames,
                        #                        save_dir, split_dir, i, 64)
                    # break
                if cfg.TEST.B_EXAMPLE:
                    # self.save_superimages(fake_img_list, filenames,
                    #                       save_dir, split_dir, 64)
                    # self.save_superimages(fake_img_list, filenames,
                    #                       save_dir, split_dir, 128)
                    self.save_superimages(fake_img_list, filenames,
                                          save_dir, split_dir, 256)
示例#23
0
    def evaluate(self, split_dir):
        inception_model = INCEPTION_V3()
        # fid_model = FID_INCEPTION()
        if cfg.CUDA:
            inception_model.cuda()
        #     fid_model.cuda()
        inception_model.eval()
        # fid_model.eval()

        if cfg.TRAIN.NET_G == '':
            print('Error: the path for models is not found!')
        else:
            # Build and load the generator
            if split_dir == 'test':
                split_dir = 'valid'
            netG = G_NET()
            netG.apply(weights_init)
            netG = torch.nn.DataParallel(netG, device_ids=self.gpus)
            # print(netG)
            # state_dict = torch.load(cfg.TRAIN.NET_G)
            state_dict = \
                torch.load(cfg.TRAIN.NET_G,
                           map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dict)
            print('Load ', cfg.TRAIN.NET_G)

            # the path to save generated images
            # s_tmp = cfg.TRAIN.NET_G
            # istart = s_tmp.rfind('_') + 1
            # iend = s_tmp.rfind('.')
            # iteration = int(s_tmp[istart:iend])
            # s_tmp = s_tmp[:s_tmp.rfind('/')]
            # save_dir = '%s/iteration%d' % (s_tmp, iteration)
            # save_dir = 'C:\\Users\\alper\\PycharmProjects\\MSGAN\\StackGAN++-Mode-Seeking\\results'
            save_dir = "D:\\results"

            nz = cfg.GAN.Z_DIM
            n_samples = 50
            # noise = Variable(torch.FloatTensor(self.batch_size, nz))
            noise = Variable(torch.FloatTensor(n_samples, nz))
            if cfg.CUDA:
                netG.cuda()
                noise = noise.cuda()

            # switch to evaluate mode
            netG.eval()
            for step, data in enumerate(tqdm(self.data_loader)):
                # if step == 8:
                #     break
                imgs, t_embeddings, filenames = data
                if cfg.CUDA:
                    t_embeddings = Variable(t_embeddings).cuda()
                else:
                    t_embeddings = Variable(t_embeddings)
                # print(t_embeddings[:, 0, :], t_embeddings.size(1))

                embedding_dim = t_embeddings.size(1)
                # batch_size = imgs[0].size(0)
                # noise.data.resize_(batch_size, nz)
                noise.data.normal_(0, 1)

                fake_img_list = []
                inception_score_list = []
                fid_list = []
                score_list = []
                predictions = []
                fids = []
                for i in range(embedding_dim):
                    inception_score_list.append([])
                    fid_list.append([])
                    score_list.append([])

                    emb_imgs = []
                    for j in range(n_samples):
                        noise_j = noise[j].unsqueeze(0)
                        t_embeddings_i = t_embeddings[:, i, :]
                        fake_imgs, _, _ = netG(noise_j, t_embeddings_i)
                        # filenames_number ='_sample_%2.2d'%(j)
                        # filenames_new = []
                        # filenames_new.append(filenames[-1]+filenames_number)
                        # filenames_new = tuple(filenames_new)

                        # for selecting reasonable images
                        pred = inception_model(fake_imgs[-1].detach())
                        pred = pred.data.cpu().numpy()
                        predictions.append(pred)
                        bird_indices = [
                            7, 8, 9, 10, 11, 13, 15, 16, 17, 18, 19, 21, 23,
                            81, 84, 85, 86, 88, 90, 91, 93, 94, 95, 96, 97, 99,
                            129, 130, 133, 134, 135, 138, 141, 142, 143, 144,
                            146, 517
                        ]
                        score = np.max(pred[0, bird_indices])
                        score_list[i].append((j, score))
                        emb_imgs.append(fake_imgs[2].data.cpu())
                        if cfg.TEST.B_EXAMPLE:
                            # fake_img_list.append(fake_imgs[0].data.cpu())
                            # fake_img_list.append(fake_imgs[1].data.cpu())
                            fake_img_list.append(fake_imgs[2].data.cpu())
                        else:
                            self.save_singleimages(fake_imgs[-1], filenames, j,
                                                   save_dir, split_dir, i, 256)
                        # self.save_singleimages(fake_imgs[-2], filenames,
                        #                        save_dir, split_dir, i, 128)
                        # self.save_singleimages(fake_imgs[-3], filenames,
                        #                        save_dir, split_dir, i, 64)
                    # break
                    score_list[i] = sorted(score_list[i],
                                           key=lambda x: x[1],
                                           reverse=True)[:5]
                    # for FID score
                    # ffi = [i[0].numpy() for i in emb_imgs]
                    fake_filtered_images = [
                        fake_img_list[i][0].numpy()
                        for i in range(len(fake_img_list))
                    ]
                    img_dir = os.path.join(cfg.DATA_DIR, "CUB_200_2011",
                                           "images",
                                           filenames[0].split("/")[0])
                    img_files = [
                        os.path.join(img_dir, i) for i in os.listdir(img_dir)
                    ]

                    # act_real = get_activations(img_files, fid_model)
                    # mu_real, sigma_real = get_fid_stats(act_real)
                    # print("mu_real: {}, sigma_real: {}".format(mu_real, sigma_real))

                    np_imgs = np.array(fake_filtered_images)
                    # print(np_imgs.shape)

                    # # print(type(np_imgs[0]))
                    # act_fake = get_activations(np_imgs, fid_model, img=True)
                    # mu_fake, sigma_fake = get_fid_stats(act_fake)
                    # fid_score = frechet_distance(mu_real, sigma_real, mu_fake, sigma_fake)
                    # fids.append(fid_score)
                    # print("mu_fake: {}, sigma_fake: {}".format(mu_fake, sigma_fake))
                # print(inception_score_list)

                # # calculate inception score
                # predictions = np.concatenate(predictions, 0)
                # mean, std = compute_inception_score(predictions, 10)
                # mean_nlpp, std_nlpp = \
                #     negative_log_posterior_probability(predictions, 10)
                # inception_score_list.append((mean, std, mean_nlpp, std_nlpp))

                # # for FID score
                # fake_filtered_images = [fake_img_list[i*n_samples + k[0]][0].numpy() for i, j in enumerate(score_list) for k in j]
                # # fake_filtered_images = [fake_img_list[i][0].numpy() for i in range(len(fake_img_list))]
                # img_dir = os.path.join(cfg.DATA_DIR, "CUB_200_2011", "images", filenames[0].split("/")[0])
                # img_files = [os.path.join(img_dir, i) for i in os.listdir(img_dir)]
                #
                # act_real = get_activations(img_files, fid_model)
                # mu_real, sigma_real = get_fid_stats(act_real)
                # # print("mu_real: {}, sigma_real: {}".format(mu_real, sigma_real))
                #
                # np_imgs = np.array(fake_filtered_images)
                # # print(np_imgs.shape)
                #
                # # print(type(np_imgs[0]))
                # act_fake = get_activations(np_imgs, fid_model, img=True)
                # mu_fake, sigma_fake = get_fid_stats(act_fake)
                # # print("mu_fake: {}, sigma_fake: {}".format(mu_fake, sigma_fake))
                #
                # # fid_score = frechet_distance(mu_real, sigma_real, mu_fake, sigma_fake)
                # fid_score = np.mean(fids)
                # fid_list.append(fid_score)
                # stats = 'step: {}, FID: {}, inception_score: {}, nlpp: {}\n'.format(step, fid_score, (mean, std), (mean_nlpp, std_nlpp))
                # with open("results\\stats.txt", "a+") as f:
                #     f.write(stats)
                # print(stats)

                if cfg.TEST.B_EXAMPLE:
                    # self.save_superimages(fake_img_list, filenames,
                    #                       save_dir, split_dir, 64)
                    # self.save_superimages(fake_img_list, filenames,
                    #                       save_dir, split_dir, 128)
                    if cfg.TEST.FILTER:
                        images_to_save = [
                            fake_img_list[i * n_samples + k[0]]
                            for i, j in enumerate(score_list) for k in j
                        ]
                    else:
                        images_to_save = fake_img_list
                    self.save_superimages(images_to_save, filenames, save_dir,
                                          split_dir, 256)
示例#24
0
def gen_example(n_words, wordtoix, ixtoword, model_dir):
    '''generate images from example sentences'''
    # filepath = 'example_captions.txt'
    filepath = 'caption.txt'
    data_dic = {}
    with open(filepath, "r") as f:
        filenames = f.read().split('\n')

        captions = []
        cap_lens = []

        for sent in filenames:
            if len(sent) == 0:
                continue
            sent = sent.replace("\ufffd\ufffd", " ")
            tokenizer = RegexpTokenizer(r'\w+')
            tokens = tokenizer.tokenize(sent.lower())
            if len(tokens) == 0:
                print('sentence token == 0 !')
                continue

            rev = []
            for t in tokens:
                t = t.encode('ascii', 'ignore').decode('ascii')
                if len(t) > 0 and t in wordtoix:
                    rev.append(wordtoix[t])
            captions.append(rev)
            cap_lens.append(len(rev))

        max_len = np.max(cap_lens)
        sorted_indices = np.argsort(cap_lens)[::-1]
        cap_lens = np.asarray(cap_lens)
        cap_lens = cap_lens[sorted_indices]
        cap_array = np.zeros((len(captions), max_len), dtype='int64')

        for i in range(len(captions)):
            idx = sorted_indices[i]
            cap = captions[idx]
            c_len = len(cap)
            cap_array[i, :c_len] = cap
        # key = name[(name.rfind('/') + 1):]
        key = 0
        data_dic[key] = [cap_array, cap_lens, sorted_indices]

    # algo.gen_example(data_dic)
    text_encoder = RNN_ENCODER(n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
    state_dict = torch.load(cfg.TRAIN.NET_E,
                            map_location=lambda storage, loc: storage)
    text_encoder.load_state_dict(state_dict)
    print('Load text encoder from:', cfg.TRAIN.NET_E)
    text_encoder.eval()

    netG = G_NET()
    netG.apply(weights_init)
    # netG.cuda()
    netG.eval()
    state_dict = torch.load(model_dir,
                            map_location=lambda storage, loc: storage)
    netG.load_state_dict(state_dict)
    print('Load G from: ', model_dir)

    save_dir = 'results'
    mkdir_p(save_dir)
    for key in data_dic:
        captions, cap_lens, sorted_indices = data_dic[key]

        batch_size = captions.shape[0]
        nz = cfg.GAN.Z_DIM

        with torch.no_grad():
            captions = Variable(torch.from_numpy(captions))
            cap_lens = Variable(torch.from_numpy(cap_lens))

            # captions = captions.cuda()
            # cap_lens = cap_lens.cuda()

        for i in range(image_per_caption):  # 16
            with torch.no_grad():
                noise = Variable(torch.FloatTensor(batch_size, nz))
                # noise = noise.cuda()

            # (1) Extract text embeddings
            hidden = text_encoder.init_hidden(batch_size)
            words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
            mask = (captions == 0)
            # (2) Generate fake images
            noise.data.normal_(0, 1)
            fake_imgs, attention_maps, _, _ = netG(noise, sent_emb, words_embs,
                                                   mask)

            cap_lens_np = cap_lens.data.numpy()

            for j in range(batch_size):
                save_name = '%s/%d_%d' % (save_dir, i, sorted_indices[j])
                for k in range(len(fake_imgs)):
                    im = fake_imgs[k][j].data.cpu().numpy()
                    im = (im + 1.0) * 127.5
                    im = im.astype(np.uint8)
                    # print('im', im.shape)
                    im = np.transpose(im, (1, 2, 0))
                    # print('im', im.shape)
                    im = Image.fromarray(im)
                    fullpath = '%s_g%d.png' % (save_name, k)
                    im.save(fullpath)

                for k in range(len(attention_maps)):
                    if len(fake_imgs) > 1:
                        im = fake_imgs[k + 1]
                    else:
                        im = fake_imgs[0]
                    attn_maps = attention_maps[k]
                    att_sze = attn_maps.size(2)
                    img_set, sentences = \
                        build_super_images2(im[j].unsqueeze(0),
                                            captions[j].unsqueeze(0),
                                            [cap_lens_np[j]], ixtoword,
                                            [attn_maps[j]], att_sze)
                    if img_set is not None:
                        im = Image.fromarray(img_set)
                        fullpath = '%s_a%d_attention.png' % (save_name, k)
                        im.save(fullpath)
示例#25
0
    def gen_samples(self, idx):

        text_encoder = RNN_ENCODER(self.n_words,
                                   nhidden=cfg.TEXT.EMBEDDING_DIM)
        state_dict = torch.load(cfg.TRAIN.NET_E,
                                map_location=lambda storage, loc: storage)
        text_encoder.load_state_dict(state_dict)
        print('Load text encoder from: {}'.format(cfg.TRAIN.NET_E))
        text_encoder = text_encoder.cuda()
        text_encoder.eval()

        netG = G_NET()
        state_dict = torch.load(cfg.TRAIN.NET_G,
                                map_location=lambda storage, loc: storage)
        netG.load_state_dict(state_dict)
        print('Load G from: {}'.format(cfg.TRAIN.NET_G))
        netG.cuda()
        netG.eval()

        s_tmp = cfg.TRAIN.NET_G[:cfg.TRAIN.NET_G.rfind('.pth')]
        save_dir = '%s/samples' % (s_tmp)
        mkdir_p(save_dir)

        batch_size = self.batch_size
        nz = cfg.GAN.Z_DIM
        with torch.no_grad():
            noise = Variable(torch.FloatTensor(batch_size, nz))
        noise = noise.cuda()

        step = 0
        data_iter = iter(self.data_loader)

        while step < self.num_batches:
            data = data_iter.next()
            imgs, captions, cap_lens, class_ids, sorted_cap_indices = self.prepare_data(
                data)
            hidden = text_encoder.init_hidden(batch_size)
            words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
            mask = (captions == 0)
            num_words = words_embs.size(2)
            if mask.size(1) > num_words:
                mask = mask[:, :num_words]
            for i in range(10):
                noise.data.normal_(0, 1)
                fake_imgs, attention_maps, _, _ = netG(noise, sent_emb,
                                                       words_embs, mask)
                cap_lens_np = cap_lens.cpu().data.numpy()
                for j in range(batch_size):
                    right_idx = step * batch_size + sorted_cap_indices[j]
                    save_name = '%s/%d_s_%d' % (save_dir, i, right_idx)
                    original_idx = idx[right_idx]
                    shutil.copyfile(
                        '/.local/AttnGAN/data/FashionSynthesis/test/original/test128_{}.png'
                        .format(original_idx + 1),
                        save_dir + '/test128_{0}_{1}.png'.format(
                            original_idx + 1, right_idx))
                    for k in range(len(fake_imgs)):
                        im = fake_imgs[k][j].data.cpu().numpy()
                        im = (im + 1.0) * 127.5
                        im = im.astype(np.uint8)
                        im = np.transpose(im, (1, 2, 0))
                        im = Image.fromarray(im)
                        fullpath = '%s_g%d.png' % (save_name, k)
                        im.save(fullpath)
                    for k in range(len(attention_maps)):
                        if len(fake_imgs) > 1:
                            im = fake_imgs[k + 1].detach().cpu()
                        else:
                            im = fake_imgs[0].detach().cpu()
                        attn_maps = attention_maps[k]
                        att_sze = attn_maps.size(2)
                        img_set, sentences = \
                            build_super_images2(im[j].unsqueeze(0),
                                                captions[j].unsqueeze(0),
                                                [cap_lens_np[j]], self.ixtoword,
                                                [attn_maps[j]], att_sze)
                        if img_set is not None:
                            im = Image.fromarray(img_set)
                            fullpath = '%s_a%d.png' % (save_name, k)
                            im.save(fullpath)
            step += 1
示例#26
0
    def evaluate(self, split_dir):
        if cfg.TRAIN.NET_G == '':
            print('Error: the path for morels is not found!')
        else:
            # Build and load the generator
            if split_dir == 'test':
                split_dir = 'valid'
            netG = G_NET()
            netG.apply(weights_init)
            netG = torch.nn.DataParallel(netG, device_ids=self.gpus)
            print(netG)
            #state_dict = torch.load(cfg.TRAIN.NET_G)
            state_dict = \
                torch.load(cfg.TRAIN.NET_G,
                           map_location=lambda storage, loc: storage)

            netG.load_state_dict(state_dict)
            print('Load ', cfg.TRAIN.NET_G)

            # the path to save generated images
            # s_tmp = cfg.TRAIN.NET_G
            # istart = s_tmp.rfind('_') + 1
            # iend = s_tmp.rfind('.')
            # iteration = int(s_tmp[istart:iend])
            # s_tmp = s_tmp[:s_tmp.rfind('/')]
            # save_dir = '%s/iteration%d' % (s_tmp, iteration)
            save_dir = 'J:\\qimao\\Text-to-image\\results\\Plugin-v1-210K-random50'

            nz = cfg.GAN.Z_DIM
            n_samples = 50
            # noise = Variable(torch.FloatTensor(self.batch_size, nz))
            noise = Variable(torch.FloatTensor(n_samples, nz))
            if cfg.CUDA:
                netG.cuda()
                noise = noise.cuda()

            # switch to evaluate mode
            netG.eval()
            for step, data in enumerate(self.data_loader, 0):
                imgs, t_embeddings, filenames = data
                if cfg.CUDA:
                    t_embeddings = Variable(t_embeddings).cuda()
                else:
                    t_embeddings = Variable(t_embeddings)
                # print(t_embeddings[:, 0, :], t_embeddings.size(1))

                embedding_dim = t_embeddings.size(1)
                # batch_size = imgs[0].size(0)
                # noise.data.resize_(batch_size, nz)
                noise.data.normal_(0, 1)

                fake_img_list = []
                for i in range(embedding_dim):
                    for j in range(n_samples):
                        noise_j = noise[j].unsqueeze(0)
                        t_embeddings_i = t_embeddings[:, i, :]

                        #hmxhmxhmxhmxhmxhmxhmxhmxhmxhmxhmxhmxhmxstart
                        fake_imgs, layers_output, _, _ = netG(
                            noise_j, t_embeddings_i)
                        if len(layers_output) != len(lamdas):
                            print("please check lamdas length")
                        #hmxhmxhmxhmxhmxhmxhmxhmxhmxhmxhmxhmxhmxend
                        # filenames_number ='_sample_%2.2d'%(j)
                        # filenames_new = []
                        # filenames_new.append(filenames[-1]+filenames_number)
                        # filenames_new = tuple(filenames_new)

                        if cfg.TEST.B_EXAMPLE:
                            # fake_img_list.append(fake_imgs[0].data.cpu())
                            # fake_img_list.append(fake_imgs[1].data.cpu())
                            fake_img_list.append(fake_imgs[2].data.cpu())
                        else:
                            self.save_singleimages(fake_imgs[-1], filenames, j,
                                                   save_dir, split_dir, i, 256)
                        # self.save_singleimages(fake_imgs[-2], filenames,
                        #                        save_dir, split_dir, i, 128)
                        # self.save_singleimages(fake_imgs[-3], filenames,
                        #                        save_dir, split_dir, i, 64)
                    # break
                if cfg.TEST.B_EXAMPLE:
                    # self.save_superimages(fake_img_list, filenames,
                    #                       save_dir, split_dir, 64)
                    # self.save_superimages(fake_img_list, filenames,
                    #                       save_dir, split_dir, 128)
                    self.save_superimages(fake_img_list, filenames, save_dir,
                                          split_dir, 256)
示例#27
0
    def build_models(self):
        # ###################encoders######################################## #
        if cfg.TRAIN.NET_E == '':
            print('Error: no pretrained text-image encoders')
            return

        image_encoder = CNN_ENCODER(cfg.TEXT.EMBEDDING_DIM)
        img_encoder_path = cfg.TRAIN.NET_E.replace('text_encoder',
                                                   'image_encoder')
        state_dict = \
            torch.load(img_encoder_path, map_location=lambda storage, loc: storage)
        image_encoder.load_state_dict(state_dict)
        for p in image_encoder.parameters():
            p.requires_grad = False
        print('Load image encoder from:', img_encoder_path)
        image_encoder.eval()

        text_encoder = \
            RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
        state_dict = \
            torch.load(cfg.TRAIN.NET_E,
                       map_location=lambda storage, loc: storage)
        text_encoder.load_state_dict(state_dict)
        for p in text_encoder.parameters():
            p.requires_grad = False
        print('Load text encoder from:', cfg.TRAIN.NET_E)
        text_encoder.eval()

        # #######################generator and discriminators############## #
        netG = G_NET(len(self.cats_index_dict))
        netsPatD, netsShpD = [], []
        if cfg.TREE.BRANCH_NUM > 0:
            netsPatD.append(PAT_D_NET64())
            netsShpD.append(SHP_D_NET64(len(self.cats_index_dict)))
        if cfg.TREE.BRANCH_NUM > 1:
            netsPatD.append(PAT_D_NET128())
            netsShpD.append(SHP_D_NET128(len(self.cats_index_dict)))
        if cfg.TREE.BRANCH_NUM > 2:
            netsPatD.append(PAT_D_NET256())
            netsShpD.append(SHP_D_NET256(len(self.cats_index_dict)))

        netObjSSD = OBJ_SS_D_NET(len(self.cats_index_dict))
        netObjLSD = OBJ_LS_D_NET(len(self.cats_index_dict))

        netG.apply(weights_init)
        netObjSSD.apply(weights_init)
        netObjLSD.apply(weights_init)
        for i in range(len(netsPatD)):
            netsPatD[i].apply(weights_init)
            netsShpD[i].apply(weights_init)
        print('# of netsPatD', len(netsPatD))
        # ########################################################### #
        if cfg.CUDA:
            text_encoder = text_encoder.cuda()
            image_encoder = image_encoder.cuda()
            netG.cuda()
            netObjSSD.cuda()
            netObjLSD.cuda()
            for i in range(len(netsPatD)):
                netsPatD[i].cuda()
                netsShpD[i].cuda()

            if len(cfg.GPU_IDS) > 1:
                text_encoder = nn.DataParallel(text_encoder)
                text_encoder.to(self.device)
                image_encoder = nn.DataParallel(image_encoder)
                image_encoder.to(self.device)
                netG = nn.DataParallel(netG)
                netG.to(self.device)
                netObjSSD = nn.DataParallel(netObjSSD)
                netObjSSD.to(self.device)
                netObjLSD = nn.DataParallel(netObjLSD)
                netObjLSD.to(self.device)
                for i in range(len(netsPatD)):
                    netsPatD[i] = nn.DataParallel(netsPatD[i])
                    netsPatD[i].to(self.device)

                    netsShpD[i] = nn.DataParallel(netsShpD[i])
                    netsShpD[i].to(self.device)
        #
        epoch = 0
        if cfg.TRAIN.NET_G != '':
            state_dict = \
                torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dict)
            print('Load G from: ', cfg.TRAIN.NET_G)
            istart = cfg.TRAIN.NET_G.rfind('_') + 1
            iend = cfg.TRAIN.NET_G.rfind('.')
            epoch = cfg.TRAIN.NET_G[istart:iend]
            epoch = int(epoch) + 1

            Gname = cfg.TRAIN.NET_G
            for i in range(len(netsPatD)):
                s_tmp = Gname[:Gname.rfind('/')]

                Dname = '%s/netPatD%d.pth' % (s_tmp, i)
                print('Load PatD from: ', Dname)
                state_dict = \
                    torch.load(Dname, map_location=lambda storage, loc: storage)
                netsPatD[i].load_state_dict(state_dict)

                Dname = '%s/netShpD%d.pth' % (s_tmp, i)
                print('Load ShpD from: ', Dname)
                state_dict = \
                    torch.load(Dname, map_location=lambda storage, loc: storage)
                netsShpD[i].load_state_dict(state_dict)

            s_tmp = Gname[:Gname.rfind('/')]
            Dname = '%s/netObjSSD.pth' % (s_tmp)
            print('Load ObjSSD from: ', Dname)
            state_dict = \
                torch.load(Dname, map_location=lambda storage, loc: storage)
            netObjSSD.load_state_dict(state_dict)

            s_tmp = Gname[:Gname.rfind('/')]
            Dname = '%s/netObjLSD.pth' % (s_tmp)
            print('Load ObjLSD from: ', Dname)
            state_dict = \
                torch.load(Dname, map_location=lambda storage, loc: storage)
            netObjLSD.load_state_dict(state_dict)

        return [
            text_encoder, image_encoder, netG, netsPatD, netsShpD, netObjSSD,
            netObjLSD, epoch
        ]
示例#28
0
文件: trainer.py 项目: zxs789/Obj-GAN
    def sampling(self, split_dir):
        if cfg.TRAIN.NET_G == '':
            print('Error: the path for morels is not found!')
        else:
            if split_dir == 'test':
                split_dir = 'valid'
            # Build and load the generator
            netG = G_NET(len(self.cats_index_dict))
            netG.apply(weights_init)
            netG.eval()

            if cfg.CUDA:
                netG.cuda()

            if len(cfg.GPU_IDS) > 1:
                netG = nn.DataParallel(netG)
                netG.to(self.device)

            batch_size = self.batch_size
            nz = cfg.GAN.Z_DIM
            noise = Variable(
                torch.FloatTensor(batch_size, cfg.ROI.BOXES_NUM,
                                  len(self.cats_index_dict) * 4))
            noise = noise.cuda()

            model_dir = cfg.TRAIN.NET_G
            state_dict = \
                torch.load(model_dir, map_location=lambda storage, loc: storage)
            # state_dict = torch.load(cfg.TRAIN.NET_G)
            netG.load_state_dict(state_dict)
            print('Load G from: ', model_dir)

            # the path to save generated images
            s_tmp = model_dir[:model_dir.rfind('.pth')]
            save_dir = '%s/%s' % (s_tmp, split_dir)
            mkdir_p(save_dir)

            cnt = 0

            for _ in range(1):  # (cfg.TEXT.CAPTIONS_PER_IMAGE):
                for step, data in enumerate(self.data_loader, 0):
                    cnt += batch_size
                    if step % 100 == 0:
                        print('step: ', step)
                    # if step > 50:
                    #     break
                    imgs, pooled_hmaps, hmaps, bbox_maps_fwd, bbox_maps_bwd, bbox_fmaps, \
                        rois, fm_rois, num_rois, class_ids, keys = prepare_data(data)
                    num_rois = num_rois.data.cpu().numpy()

                    cats_list = []
                    for batch_index in range(self.batch_size):
                        cats = []
                        for roi_index in range(num_rois[batch_index]):
                            rela_cat_id = int(rois[batch_index, roi_index, 4])
                            abs_cat_id = self.cats_dict[rela_cat_id][0]
                            cat = self.ixtoword[abs_cat_id].encode(
                                'ascii', 'ignore').decode('ascii')
                            cats.append(cat)
                        cats_list.append(cats)

                    #######################################################
                    # (2) Generate fake images
                    ######################################################
                    max_num_roi = max(num_rois)
                    noise.data.normal_(0, 1)
                    fake_hmaps = netG(noise[:, :max_num_roi], bbox_maps_fwd,
                                      bbox_maps_bwd, bbox_fmaps)
                    fake_hmaps = fake_hmaps.repeat(1, 1, 3, 1, 1)
                    for j in range(batch_size):
                        s_tmp = '%s/single/%s' % (save_dir, keys[j])
                        folder = s_tmp[:s_tmp.rfind('/')]
                        if not os.path.isdir(folder):
                            print('Make a new folder: ', folder)
                            mkdir_p(folder)
                        k = 0
                        # for k in range(len(fake_imgs)):
                        im = fake_hmaps[j][k].data.cpu().numpy()

                        minV = im.min()
                        maxV = im.max()
                        im = (im - minV) / (maxV - minV)
                        im *= 255
                        im = im.astype(np.uint8)
                        im = np.transpose(im, (1, 2, 0))
                        im = Image.fromarray(im)

                        cat = cats_list[j][k]
                        fullpath = '{0}_{1}.png'.format(s_tmp, cat)
                        im.save(fullpath)
示例#29
0
    def sample(self, split_dir, num_samples=25, draw_bbox=False):
        from PIL import Image, ImageDraw, ImageFont
        import cPickle as pickle
        import torchvision
        import torchvision.utils as vutils

        if cfg.TRAIN.NET_G == '':
            print('Error: the path for model NET_G is not found!')
        else:
            if split_dir == 'test':
                split_dir = 'valid'
            # Build and load the generator
            text_encoder = RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
            state_dict = \
                torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)
            text_encoder.load_state_dict(state_dict)
            print('Load text encoder from:', cfg.TRAIN.NET_E)
            text_encoder = text_encoder.cuda()
            text_encoder.eval()

            batch_size = cfg.TRAIN.BATCH_SIZE
            nz = cfg.GAN.Z_DIM

            model_dir = cfg.TRAIN.NET_G
            state_dict = torch.load(model_dir, map_location=lambda storage, loc: storage)
            # state_dict = torch.load(cfg.TRAIN.NET_G)
            netG = G_NET()
            print('Load G from: ', model_dir)
            netG.apply(weights_init)

            netG.load_state_dict(state_dict["netG"])
            netG.cuda()
            netG.eval()

            # the path to save generated images
            s_tmp = model_dir[:model_dir.rfind('.pth')]
            save_dir = '%s_%s' % (s_tmp, split_dir)
            mkdir_p(save_dir)
            #######################################
            noise = Variable(torch.FloatTensor(9, nz))

            imsize = 256

            for step, data in enumerate(self.data_loader, 0):
                if step >= num_samples:
                    break

                imgs, captions, cap_lens, class_ids, keys, transformation_matrices, label_one_hot, bbox = \
                    prepare_data(data, eval=True)
                transf_matrices_inv = transformation_matrices[1][0].unsqueeze(0)
                label_one_hot = label_one_hot[0].unsqueeze(0)

                img = imgs[-1][0]
                val_image = img.view(1, 3, imsize, imsize)

                hidden = text_encoder.init_hidden(batch_size)
                # words_embs: batch_size x nef x seq_len
                # sent_emb: batch_size x nef
                words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
                words_embs, sent_emb = words_embs[0].unsqueeze(0).detach(), sent_emb[0].unsqueeze(0).detach()
                words_embs = words_embs.repeat(9, 1, 1)
                sent_emb = sent_emb.repeat(9, 1)
                mask = (captions == 0)
                mask = mask[0].unsqueeze(0)
                num_words = words_embs.size(2)
                if mask.size(1) > num_words:
                    mask = mask[:, :num_words]
                mask = mask.repeat(9, 1)
                transf_matrices_inv = transf_matrices_inv.repeat(9, 1, 1, 1)
                label_one_hot = label_one_hot.repeat(9, 1, 1)

                #######################################################
                # (2) Generate fake images
                ######################################################
                noise.data.normal_(0, 1)
                inputs = (noise, sent_emb, words_embs, mask, transf_matrices_inv, label_one_hot)
                with torch.no_grad():
                    fake_imgs, _, mu, logvar = nn.parallel.data_parallel(netG, inputs, self.gpus)

                data_img = torch.FloatTensor(10, 3, imsize, imsize).fill_(0)
                data_img[0] = val_image
                data_img[1:10] = fake_imgs[-1]

                if draw_bbox:
                    for idx in range(3):
                        x, y, w, h = tuple([int(imsize*x) for x in bbox[0, idx]])
                        w = imsize-1 if w > imsize-1 else w
                        h = imsize-1 if h > imsize-1 else h
                        if x <= -1:
                            break
                        data_img[:10, :, y, x:x + w] = 1
                        data_img[:10, :, y:y + h, x] = 1
                        data_img[:10, :, y+h, x:x + w] = 1
                        data_img[:10, :, y:y + h, x + w] = 1

                # get caption
                cap = captions[0].data.cpu().numpy()
                sentence = ""
                for j in range(len(cap)):
                    if cap[j] == 0:
                        break
                    word = self.ixtoword[cap[j]].encode('ascii', 'ignore').decode('ascii')
                    sentence += word + " "
                sentence = sentence[:-1]
                vutils.save_image(data_img, '{}/{}_{}.png'.format(save_dir, sentence, step), normalize=True, nrow=10)
            print("Saved {} files to {}".format(step, save_dir))
示例#30
0
class Generator:
    def __init__(self, caption_file, saveable, cuda=False, profile=False):
        # flags
        self.cuda = cuda
        self.profile = profile

        if self.profile:
            print('Initializing Generator...')
            print('cuda={}\nprofile={}'.format(self.cuda, self.profile))

        # load caption indices
        x = pickle.load(open(caption_file, 'rb'))
        self.ixtoword = x[2]
        self.wordtoix = x[3]
        del x

        # load text encoder
        self.text_encoder = RNN_ENCODER(len(self.wordtoix), nhidden=cfg.TEXT.EMBEDDING_DIM)
        state_dict = torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)
        self.text_encoder.load_state_dict(state_dict)
        if self.cuda:
            self.text_encoder.cuda()
            
        self.text_encoder.eval()

        # load generative model
        self.netG = G_NET()
        state_dict = torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage)
        self.netG.load_state_dict(state_dict)
        if self.cuda:
            self.netG.cuda()
            
        self.netG.eval()

        # saveable items -> push to storage
        self.saveable = saveable

    def vectorize_caption(self, caption, copies):
        # create caption vector
        tokens = caption.split(' ')
        cap_v = []
        for t in tokens:
            t = t.strip().encode('ascii', 'ignore').decode('ascii')
            if len(t) > 0 and t in self.wordtoix:
                cap_v.append(self.wordtoix[t])

        # expected state for single generation
        captions = np.zeros((copies, len(cap_v)))
        for i in range(copies):
            captions[i,:] = np.array(cap_v)
        cap_lens = np.zeros(copies) + len(cap_v)

        return captions.astype(int), cap_lens.astype(int), len(self.wordtoix)

    def generate(self, caption, copies=2):
        # load word vector
        captions, cap_lens, n_words = self.vectorize_caption(caption, copies)

        # only one to generate
        batch_size = captions.shape[0]

        nz = cfg.GAN.Z_DIM
        captions = Variable(torch.from_numpy(captions), volatile=True)
        cap_lens = Variable(torch.from_numpy(cap_lens), volatile=True)
        noise = Variable(torch.FloatTensor(batch_size, nz), volatile=True)

        if self.cuda:
            captions = captions.cuda()
            cap_lens = cap_lens.cuda()
            noise = noise.cuda()

        #######################################################
        # (1) Extract text embeddings
        #######################################################
        hidden = self.text_encoder.init_hidden(batch_size)
        words_embs, sent_emb = self.text_encoder(captions, cap_lens, hidden)
        mask = (captions == 0)

        #######################################################
        # (2) Generate fake images
        #######################################################
        noise.data.normal_(0, 1)
        fake_imgs, attention_maps, _, _ = self.netG(noise, sent_emb, words_embs, mask)

        # G attention
        cap_lens_np = cap_lens.cpu().data.numpy()

        # prefix for partitioning images
        prefix = datetime.now().strftime('%Y/%B/%d/%H_%M_%S_%f')
        urls = []
        # only look at first one
        for j in range(batch_size):
            for k in range(len(fake_imgs)):
                im = fake_imgs[k][j].data.cpu().numpy()
                im = (im + 1.0) * 127.5
                im = im.astype(np.uint8)
                im = np.transpose(im, (1, 2, 0))

                # save using saveable
                birdy = 'bird_g{}'.format(k)
                if copies > 2:
                    item = self.saveable.save('{}/{}'.format(prefix, j), birdy, im)
                else:
                    item = self.saveable.save(prefix, birdy, im)

                urls.append(item)

            if copies == 2:
                for k in range(len(attention_maps)):
                    if len(fake_imgs) > 1:
                        im = fake_imgs[k + 1].detach().cpu()
                    else:
                        im = fake_imgs[0].detach().cpu()
                            
                    attn_maps = attention_maps[k]
                    att_sze = attn_maps.size(2)

                    img_set, sentences = \
                        build_super_images2(im[j].unsqueeze(0),
                                            captions[j].unsqueeze(0),
                                            [cap_lens_np[j]], self.ixtoword,
                                            [attn_maps[j]], att_sze)

                    if img_set is not None:
                        attnmap = 'attmaps_a{}'.format(k)
                        item = self.saveable.save(prefix, attnmap, img_set)
                        urls.append(item)
            if copies == 2:
                break

        return urls
    def sampling(self):
        if self.args.netG == '':
            print('Error: the path for models is not found!')
        else:
            data_dir = cfg.DATA_DIR
            if self.args.split == "test_unseen":
                filepath = os.path.join(data_dir,
                                        "test_unseen/class_data.pickle")
            else:  #test_seen
                filepath = os.path.join(data_dir,
                                        "test_seen/class_data.pickle")
            if os.path.isfile(filepath):
                with open(filepath, "rb") as f:
                    data_dic = pkl.load(f)
            class_names = data_dic['classes']
            class_ids = data_dic['class_info']

            att_dir = os.path.join(data_dir, "CUB_200_2011/attributes")
            att_np = np.zeros((312, 200))  #for CUB
            with open(att_dir + "/class_attribute_labels_continuous.txt",
                      "r") as f:
                for ind, line in enumerate(f.readlines()):
                    line = line.strip("\n")
                    line = list(map(float, line.split()))
                    att_np[:, ind] = line

            if self.args.kl_loss:
                netG = G_NET()
            else:
                netG = G_NET_not_CA()
            test_model = "netG_epoch_600.pth"
            model_path = os.path.join(self.args.netG, "Model", test_model)  ##
            state_dic = torch.load(model_path,
                                   map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dic)
            netG.cuda()
            netG.eval()

            noise = torch.FloatTensor(self.batch_size, cfg.GAN.Z_DIM)

            for class_name, class_id in zip(class_names, class_ids):
                print("now generating, ", class_name)
                class_dir = os.path.join(self.args.netG, 'valid',
                                         test_model[:test_model.rfind(".")],
                                         self.args.split, class_name)
                atts = att_np[:, class_id - 1]
                atts = np.expand_dims(atts, axis=0)
                atts = atts.repeat(self.batch_size, axis=0)
                assert atts.shape == (self.batch_size, 312)

                if cfg.CUDA:
                    noise = noise.cuda()
                    atts = torch.cuda.FloatTensor(atts)
                else:
                    atts = torch.FloatTensor(atts)

                for i in range(self.sample_num):
                    noise.normal_(0, 1)
                    if self.args.kl_loss:
                        fake_imgs, _, _ = nn.parallel.data_parallel(
                            netG, (noise, atts), self.gpus)
                    else:
                        fake_imgs = nn.parallel.data_parallel(
                            netG, (noise, atts), self.gpus)
                    for stage in range(len(fake_imgs)):
                        for num, im in enumerate(fake_imgs[stage]):
                            im = im.detach().cpu()
                            im = im.add_(1).div_(2).mul_(255)
                            im = im.numpy().astype(np.uint8)
                            im = np.transpose(im, (1, 2, 0))
                            im = Image.fromarray(im)
                            stage_dir = os.path.join(class_dir,
                                                     "stage_%d" % stage)
                            mkdir_p(stage_dir)
                            img_path = os.path.join(stage_dir,
                                                    "single_%d.png" % num)
                            im.save(img_path)
                        for j in range(int(self.batch_size /
                                           20)):  ## cfg.batch_size==100
                            one_set = [
                                fake_imgs[0][j * 20:(j + 1) * 20],
                                fake_imgs[1][j * 20:(j + 1) * 20],
                                fake_imgs[2][j * 20:(j + 1) * 20]
                            ]
                            img_set = build_images(one_set)
                            img_set = Image.fromarray(img_set)
                            super_dir = os.path.join(class_dir, "super")
                            mkdir_p(super_dir)
                            img_path = os.path.join(super_dir,
                                                    "super_%d.png" % j)
                            img_set.save(img_path)
示例#32
0
    def build_models(self):
        # ###################encoders######################################## #
        if cfg.TRAIN.NET_E == '':
            print('Error: no pretrained text-image encoders')
            return

        image_encoder = CNN_ENCODER(cfg.TEXT.EMBEDDING_DIM)
        img_encoder_path = cfg.TRAIN.NET_E.replace('text_encoder', 'image_encoder')
        state_dict = \
            torch.load(img_encoder_path, map_location=lambda storage, loc: storage)
        image_encoder.load_state_dict(state_dict)
        for p in image_encoder.parameters():
            p.requires_grad = False
        print('Load image encoder from:', img_encoder_path)
        image_encoder.eval()

        text_encoder = \
            RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
        state_dict = \
            torch.load(cfg.TRAIN.NET_E,
                       map_location=lambda storage, loc: storage)
        text_encoder.load_state_dict(state_dict)
        for p in text_encoder.parameters():
            p.requires_grad = False
        print('Load text encoder from:', cfg.TRAIN.NET_E)
        text_encoder.eval()

        # #######################generator and discriminators############## #
        netsD = []
        from model import D_NET64, D_NET128, D_NET256
        netG = G_NET()
        if cfg.TREE.BRANCH_NUM > 0:
            netsD.append(D_NET64())
        if cfg.TREE.BRANCH_NUM > 1:
            netsD.append(D_NET128())
        if cfg.TREE.BRANCH_NUM > 2:
            netsD.append(D_NET256())

        netG.apply(weights_init)
        # print(netG)
        for i in range(len(netsD)):
            netsD[i].apply(weights_init)
            # print(netsD[i])
        print('# of netsD', len(netsD))
        epoch = 0

        if self.resume:
            checkpoint_list = sorted([ckpt for ckpt in glob.glob(self.model_dir + "/" + '*.pth')])
            latest_checkpoint = checkpoint_list[-1]
            state_dict = torch.load(latest_checkpoint, map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dict["netG"])
            for i in range(len(netsD)):
                netsD[i].load_state_dict(state_dict["netD"][i])
            epoch = int(latest_checkpoint[-8:-4]) + 1
            print("Resuming training from checkpoint {} at epoch {}.".format(latest_checkpoint, epoch))

        #
        if cfg.TRAIN.NET_G != '':
            state_dict = \
                torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dict)
            print('Load G from: ', cfg.TRAIN.NET_G)
            istart = cfg.TRAIN.NET_G.rfind('_') + 1
            iend = cfg.TRAIN.NET_G.rfind('.')
            epoch = cfg.TRAIN.NET_G[istart:iend]
            epoch = int(epoch) + 1
            if cfg.TRAIN.B_NET_D:
                Gname = cfg.TRAIN.NET_G
                for i in range(len(netsD)):
                    s_tmp = Gname[:Gname.rfind('/')]
                    Dname = '%s/netD%d.pth' % (s_tmp, i)
                    print('Load D from: ', Dname)
                    state_dict = \
                        torch.load(Dname, map_location=lambda storage, loc: storage)
                    netsD[i].load_state_dict(state_dict)
        # ########################################################### #
        if cfg.CUDA:
            text_encoder = text_encoder.cuda()
            image_encoder = image_encoder.cuda()
            netG.cuda()
            for i in range(len(netsD)):
                netsD[i].cuda()
        return [text_encoder, image_encoder, netG, netsD, epoch]
示例#33
0
    def evaluate(self, split_dir):
        if cfg.TRAIN.NET_G == '':
            print('Error: the path for morels is not found!')
        else:
            # Build and load the generator
            if split_dir == 'test':
                split_dir = 'valid'
            netG = G_NET()
            netG.apply(weights_init)
            netG = torch.nn.DataParallel(netG, device_ids=self.gpus)
            print(netG)
            # state_dict = torch.load(cfg.TRAIN.NET_G)
            state_dict = \
                torch.load(cfg.TRAIN.NET_G,
                           map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dict)
            print('Load ', cfg.TRAIN.NET_G)

            # the path to save generated images
            s_tmp = cfg.TRAIN.NET_G
            istart = s_tmp.rfind('_') + 1
            iend = s_tmp.rfind('.')
            iteration = int(s_tmp[istart:iend])
            s_tmp = s_tmp[:s_tmp.rfind('/')]
            save_dir = '%s/iteration%d' % (s_tmp, iteration)

            nz = cfg.GAN.Z_DIM
            noise = Variable(torch.FloatTensor(self.batch_size, nz))
            if cfg.CUDA:
                netG.cuda()
                noise = noise.cuda()

            # switch to evaluate mode
            netG.eval()
            for step, data in enumerate(self.data_loader, 0):
                imgs, t_embeddings, filenames = data
                if cfg.CUDA:
                    t_embeddings = Variable(t_embeddings).cuda()
                else:
                    t_embeddings = Variable(t_embeddings)
                # print(t_embeddings[:, 0, :], t_embeddings.size(1))

                embedding_dim = t_embeddings.size(1)
                batch_size = imgs[0].size(0)
                noise.data.resize_(batch_size, nz)
                noise.data.normal_(0, 1)

                fake_img_list = []
                for i in range(embedding_dim):
                    fake_imgs, _, _ = netG(noise, t_embeddings[:, i, :])
                    if cfg.TEST.B_EXAMPLE:
                        # fake_img_list.append(fake_imgs[0].data.cpu())
                        # fake_img_list.append(fake_imgs[1].data.cpu())
                        fake_img_list.append(fake_imgs[2].data.cpu())
                    else:
                        self.save_singleimages(fake_imgs[-1], filenames,
                                               save_dir, split_dir, i, 256)
                        # self.save_singleimages(fake_imgs[-2], filenames,
                        #                        save_dir, split_dir, i, 128)
                        # self.save_singleimages(fake_imgs[-3], filenames,
                        #                        save_dir, split_dir, i, 64)
                    # break
                if cfg.TEST.B_EXAMPLE:
                    # self.save_superimages(fake_img_list, filenames,
                    #                       save_dir, split_dir, 64)
                    # self.save_superimages(fake_img_list, filenames,
                    #                       save_dir, split_dir, 128)
                    self.save_superimages(fake_img_list, filenames,
                                          save_dir, split_dir, 256)