config = yaml.load(f) out_dir, logger = init_logging(opt.log_dir) logger.info(opt) logger.info(yaml.dump(config)) if opt.mode == "train": batch_size = config["batch_size"] img_shape = 2 * [config["spatial_size"]] + [3] data_shape = [batch_size] + img_shape init_shape = [config["init_batches"] * batch_size] + img_shape box_factor = config["box_factor"] data_index = config["data_index"] batches = get_batches(data_shape, data_index, train=True, box_factor=box_factor) init_batches = get_batches(init_shape, data_index, train=True, box_factor=box_factor) valid_batches = get_batches(data_shape, data_index, train=False, box_factor=box_factor) logger.info("Number of training samples: {}".format(batches.n)) logger.info("Number of validation samples: {}".format(valid_batches.n)) model = Model(config, out_dir, logger) if opt.checkpoint is not None: model.restore_graph(opt.checkpoint)
model_path = '/Volumes/Qin-Warehouse/Warehouse-Data/Variational-U-Net/log/2019-02-06T18-10-49/checkpoints/model.ckpt-100000' config_path = 'deepfashion_local.yaml' with open(config_path) as f: config = yaml.load(f) batch_size = config["batch_size"] img_shape = 2 * [config["spatial_size"]] + [3] data_index = config["data_index"] box_factor = config["box_factor"] data_shape = [batch_size] + img_shape init_shape = [config["init_batches"] * batch_size] + img_shape testing_batches = get_batches(data_shape, data_index, train=False, box_factor=box_factor, shuffle=False) parser = argparse.ArgumentParser() parser.add_argument("--likelihood_loss", choices=['l1', 'vgg_perception']) parser.set_defaults(retrain=False) opt = parser.parse_args() model = Model(config, out_dir, logger, opt.likelihood_loss) print('restoring the graph ... ') model.restore_graph(model_path) def restore_launch(mission_type, bch_limit=None): print('mission type: ', mission_type)