def __init__(self, args): device = args.device _, _, txt_encoder, _, _ = train_retrieval.load_model( args.retrieval_model, device) ckpt_args, _, netG, _, _, _ = train_cookgan.load_model( args.ckpt_path, device) netG = netG.eval().to(device) txt_encoder = txt_encoder.eval().to(device) imsize = ckpt_args.base_size * (2**(ckpt_args.levels - 1)) train_transform = transforms.Compose([ transforms.Resize(int(imsize * 76 / 64)), transforms.CenterCrop(imsize) ]) dataset = FoodDataset(recipe_file=ckpt_args.recipe_file, img_dir=ckpt_args.img_dir, levels=ckpt_args.levels, part='val', food_type=ckpt_args.food_type, base_size=ckpt_args.base_size, transform=train_transform) dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, num_workers=4) self.ckpt_args = ckpt_args self.netG = netG self.txt_encoder = txt_encoder self.dataloader = dataloader self.batch_size = args.batch_size self.device = device self.fixed_noise = torch.randn(self.batch_size, self.ckpt_args.z_dim).to(self.device)
part='train', food_type=args.food_type, base_size=args.base_size, transform=train_transform) train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, drop_last=True, shuffle=True, num_workers=int(args.workers)) print('train data info:', len(train_set), len(train_loader)) ############################## # model ############################## ckpt_args, _, txt_encoder, img_encoder, _ = train_retrieval.load_model( args.retrieval_model, device) requires_grad(txt_encoder, False) requires_grad(img_encoder, False) txt_encoder = txt_encoder.eval() img_encoder = img_encoder.eval() if args.ckpt_path: ckpt_args, batch, netG, optimizerG, netsD, optimizersD = load_model( args.ckpt_path, device) wandb_run_id = args.ckpt_path.split('/')[-2] batch_start = batch + 1 else: netG, netsD, optimizerG, optimizersD = create_model(args, device) wandb_run_id = '' batch_start = 0
# prepare to write f = open(filename, mode='a') writer = csv.writer(f, delimiter=',') ckpt_paths = glob(os.path.join(args.ckpt_dir, '*.ckpt')) ckpt_paths = sorted(ckpt_paths) print('records:', ckpt_paths) print('computed:', computed) data_loader = None w2i = None for ckpt_path in ckpt_paths: print() print(f'working on {ckpt_path}') ckpt_args, _, text_encoder, image_encoder, _ = load_model( ckpt_path, device) if not data_loader: print('loading dataset') dataset = Dataset( part='val', recipe_file=ckpt_args.recipe_file, img_dir=ckpt_args.img_dir, word2vec_file=ckpt_args.word2vec_file, permute_ingrs=ckpt_args.permute_ingrs, transform=val_transform, ) w2i = dataset.w2i dataset = torch.utils.data.Subset(dataset, indices=np.random.choice( len(dataset), 5000))