model.load(config_obj.data.get_string("model_save_path")) input_ones = np.ones( [1, dataset.input_size(), dataset.input_size(), dataset.input_size(), 1]) full_block_latent = model.encode_from_placeholder( input_ones * -dataset.truncation_threshold) empty_block_latent = model.encode_from_placeholder( input_ones * dataset.truncation_threshold) data_iterator = dataset.load_custom_data( args.data_path, fast_inference=True, input_is_latent=True, num_threads=1, full_block_latent=full_block_latent, empty_block_latent=empty_block_latent, empty_block_detection_threshold=args.empty_block_detection_threshold) model.set_iterators(eval_from_latent_iterator=data_iterator) model.load(config_obj.data.get_string("model_save_path")) batch_container = np.zeros([ dataset.number_of_blocks_per_voxelgrid(), dataset.block_size, dataset.block_size, dataset.block_size, 1 ]) start = time() try: while True: section_start = time()
help="The path where to look for subdirectories with .h5py files.") parser.add_argument('--batch_size', type=int, default=256) parser.add_argument('--threads', type=int, default=4, help="Number of threads to use in the input pipeline.") args = parser.parse_args() config_path = os.path.join(os.path.dirname(__file__), "config.json") config_obj = ConfigReader(config_path) dataset = Dataset(config_obj) dataset.batch_size = args.batch_size data_iterator = dataset.load_custom_data(args.path, fast_inference=True, num_threads=args.threads) model = Autoencoder(config_obj, dataset) model.set_iterators(eval_from_input_iterator=data_iterator, eval_from_placeholder=True, eval_uses_fast_inference=True) model.load(config_obj.data.get_string("model_save_path")) model.summary() input_ones = np.ones( [1, dataset.input_size(), dataset.input_size(), dataset.input_size(), 1])