Esempio n. 1
0
def main():
	opt = get_opt()
	print(opt)

	model = UnetGenerator(25, 4, 6, ngf=64, norm_layer=nn.InstanceNorm2d)
	load_checkpoint(model, opt.checkpoint)
	#model.cuda()
	model.eval()

	mode = 'test'
	print('Run on {} data'.format(mode.upper()))
	dataset = TOMDataset(opt, mode, data_list=mode+'_pairs.txt', train=False)
	dataloader = DataLoader(dataset, batch_size=opt.batch_size, num_workers=opt.n_worker, shuffle=False)   
	with torch.no_grad():
		run(opt, model, dataloader, mode)
	print('Successfully completed')
Esempio n. 2
0
class CPVTON(object):
    def __init__(self, gmm_path, tom_path):
        '''
        初始化两个模型的预训练数据
        init pretrained models
        '''
        self.gmm = GMM()
        load_checkpoint(self.gmm, gmm_path)
        self.gmm.eval()
        self.tom = UnetGenerator(23,
                                 4,
                                 6,
                                 ngf=64,
                                 norm_layer=nn.InstanceNorm2d)
        load_checkpoint(self.tom, tom_path)
        self.tom.eval()
        self.gmm.cuda()
        self.tom.cuda()

    def predict(self, parse_array, pose_map, human, c):
        '''
        传入的前四个都是array. shape为(*,256,192)
        input 4 np array with the shape of (*,256,192)
        '''
        im = transformer(human)
        c = transformer(c)  # [-1,1]

        # parse -> shape

        parse_shape = (parse_array > 0).astype(np.float32)

        # 模糊化,下采样+上采样
        # blur, downsample + upsample
        parse_shape = Image.fromarray((parse_shape * 255).astype(np.uint8))
        parse_shape = parse_shape.resize((192 // 16, 256 // 16),
                                         Image.BILINEAR)
        parse_shape = parse_shape.resize((192, 256), Image.BILINEAR)
        shape = transformer(parse_shape)

        parse_head = (parse_array == 1).astype(np.float32) + \
            (parse_array == 2).astype(np.float32) + \
            (parse_array == 4).astype(np.float32) + \
            (parse_array == 13).astype(np.float32) + \
            (parse_array == 9).astype(np.float32)
        phead = torch.from_numpy(parse_head)  # [0,1]
        im_h = im * phead - (1 - phead)

        agnostic = torch.cat([shape, im_h, pose_map], 0)

        # batch==1
        agnostic = agnostic.unsqueeze(0).cuda()
        c = c.unsqueeze(0).cuda()

        # warp result
        grid, theta = self.gmm(agnostic.cuda(), c.cuda())
        c_warp = F.grid_sample(c.cuda(), grid, padding_mode='border')

        tensor = (c_warp.detach().clone() + 1) * 0.5 * 255
        tensor = tensor.cpu().clamp(0, 255)
        array = tensor.numpy().astype('uint8')

        c_warp = transformer(np.transpose(array[0], axes=(1, 2, 0)))
        c_warp = c_warp.unsqueeze(0)

        outputs = self.tom(torch.cat([agnostic.cuda(), c_warp.cuda()], 1))
        p_rendered, m_composite = torch.split(outputs, 3, 1)
        p_rendered = torch.tanh(p_rendered)
        m_composite = torch.sigmoid(m_composite)
        p_tryon = c_warp.cuda() * m_composite + p_rendered * (1 - m_composite)

        return (p_tryon, c_warp)
Esempio n. 3
0
embedder_model = Embedder()
load_checkpoint(embedder_model,
                "checkpoints/identity_embedding_for_test/step_045000.pth")
image_embedder = embedder_model.embedder_b.cuda()
prod_embedder = embedder_model.embedder_a.cuda()

model = G()
if not opt.checkpoint == '' and os.path.exists(opt.checkpoint):
    load_checkpoint(model, opt.checkpoint)
model.cuda()

model.eval()
gmm_model.eval()
image_embedder.eval()
generator.eval()
prod_embedder.eval()

pbar = tqdm(enumerate(data_loader), total=len(data_loader))

product_embeddings = []
outfit_embeddings = []
transfer_embeddings = []

product_embeddings_gt = []
outfit_embeddings_gt = []

for i, (inputs, inputs_2) in pbar:

    im = inputs['image'].cuda()
    im_pose = inputs['pose_image']