Exemple #1
0
        help='Number of CPU threads to use for parallel decoding.')

    args = parser.parse_args()

    assert args.resolution > 0, 'resolution must be positive'
    assert args.batch_size > 0, 'batch_size must be positive'

    DATA_FORMAT = 'channels_first'

    args.input_dir = os.path.normpath(args.input_dir)
    len_input_dir = len(args.input_dir)
    assert os.path.exists(args.input_dir), "Input directory not found"

    input_glob = os.path.join(args.input_dir, args.input_pattern)

    files = pc_io.get_files(input_glob)
    assert len(files) > 0, "No input files found"
    filenames = [x[len_input_dir + 1:] for x in files]
    output_files = [
        os.path.join(args.output_dir, x + '.bin') for x in filenames
    ]

    p_min, p_max, dense_tensor_shape = pc_io.get_shape_data(args.resolution)
    points = pc_io.load_points(files,
                               p_min,
                               p_max,
                               batch_size=args.read_batch_size)

    estimator = tf.estimator.Estimator(model_fn=compression_model.model_fn,
                                       model_dir=args.checkpoint_dir,
                                       params={
Exemple #2
0
def train():
    """Trains the model."""
    if args.verbose:
        tf.logging.set_verbosity(tf.logging.INFO)

    files = pc_io.get_files(args.train_glob)
    points = pc_io.load_points(files)
    files_cat = np.array(
        [os.path.split(os.path.split(x)[0])[1] for x in files])
    for cat in files_cat:
        assert (cat == 'train') or (cat == 'eval')
    TRAIN_DATASET = points[files_cat == 'train']
    EVAL_DATASET = points[files_cat == 'eval']
    assert (len(TRAIN_DATASET) + len(EVAL_DATASET) == len(points))

    config = tf.estimator.RunConfig(
        keep_checkpoint_every_n_hours=1,
        save_checkpoints_secs=args.save_checkpoints_secs,  # 600
        keep_checkpoint_max=args.keep_checkpoint_max,  # 50
        log_step_count_steps=args.log_step_count_steps,  # 100
        save_summary_steps=args.save_summary_steps,  # 100
        tf_random_seed=42)

    estimator = tf.estimator.Estimator(
        model_fn=compression_model_2048.model_fn,
        model_dir=args.checkpoint_dir,
        config=config,
        params={
            'num_points': args.num_point,
            'batch_size': args.batch_size,
            'knn': args.knn,
            'alpha': args.alpha,
            'gamma': args.gamma,
            'lmbda': args.lmbda,
            'additional_metrics': not args.no_additional_metrics,
            'checkpoint_dir': args.checkpoint_dir,
            'data_format': DATA_FORMAT  # channels_first
        })

    hooks = None
    if args.debug_address is not None:
        hooks = [tf_debug.TensorBoardDebugHook(args.debug_address)]

    train_spec = tf.estimator.TrainSpec(
        input_fn=lambda: compression_model_2048.input_fn(
            TRAIN_DATASET,
            args.batch_size,
            args.preprocess_threads,
            prefetch_size=args.prefetch_size),
        max_steps=args.max_steps,
        hooks=hooks)

    val_spec = tf.estimator.EvalSpec(
        input_fn=lambda: compression_model_2048.input_fn(
            EVAL_DATASET,
            args.batch_size,
            args.preprocess_threads,
            repeat=False,
            prefetch_size=args.prefetch_size),
        steps=None,
        hooks=hooks)
    #
    tf.estimator.train_and_evaluate(estimator, train_spec, val_spec)
Exemple #3
0
def train():
    """Trains the model."""

    if args.verbose:
        tf.logging.set_verbosity(tf.logging.INFO)

    p_min, p_max, dense_tensor_shape = pc_io.get_shape_data(args.resolution)
    files = pc_io.get_files(args.train_glob)
    points = pc_io.load_points(files, p_min, p_max)

    files_cat = np.array(
        [os.path.split(os.path.split(x)[0])[1] for x in files])
    for cat in files_cat:
        assert (cat == 'train') or (cat == 'test')
    points_train = points[files_cat == 'train']
    points_test = points[files_cat == 'test']

    assert (len(points_train) + len(points_test) == len(points))

    config = tf.estimator.RunConfig(
        keep_checkpoint_every_n_hours=1,
        save_checkpoints_secs=args.save_checkpoints_secs,
        keep_checkpoint_max=args.keep_checkpoint_max,
        log_step_count_steps=args.log_step_count_steps,
        save_summary_steps=args.save_summary_steps,
        tf_random_seed=42)
    estimator = tf.estimator.Estimator(model_fn=compression_model.model_fn,
                                       model_dir=args.checkpoint_dir,
                                       config=config,
                                       params={
                                           'num_filters': args.num_filters,
                                           'alpha': args.alpha,
                                           'gamma': args.gamma,
                                           'lmbda': args.lmbda,
                                           'additional_metrics':
                                           not args.no_additional_metrics,
                                           'checkpoint_dir':
                                           args.checkpoint_dir,
                                           'data_format': DATA_FORMAT
                                       })

    hooks = None
    if args.debug_address is not None:
        hooks = [tf_debug.TensorBoardDebugHook(args.debug_address)]

    train_spec = tf.estimator.TrainSpec(
        input_fn=lambda: compression_model.input_fn(points_train,
                                                    args.batch_size,
                                                    dense_tensor_shape,
                                                    args.preprocess_threads,
                                                    prefetch_size=args.
                                                    prefetch_size),
        max_steps=args.max_steps,
        hooks=hooks)
    val_spec = tf.estimator.EvalSpec(
        input_fn=lambda: compression_model.input_fn(points_test,
                                                    args.batch_size,
                                                    dense_tensor_shape,
                                                    args.preprocess_threads,
                                                    repeat=False,
                                                    prefetch_size=args.
                                                    prefetch_size),
        steps=None,
        hooks=hooks)

    tf.estimator.train_and_evaluate(estimator, train_spec, val_spec)
Exemple #4
0
    args.ori_dir = os.path.normpath(args.ori_dir)
    args.decompressed_dir = os.path.normpath(args.decompressed_dir)

    assert os.path.exists(args.ori_dir), "Original directory not found"
    assert os.path.exists(
        args.decompressed_dir), "Decompressed directory not found"

    compressed_dir_supplied = args.compressed_dir is not None
    if compressed_dir_supplied:
        args.compressed_dir = os.path.normpath(args.compressed_dir)
        assert os.path.exists(
            args.compressed_dir), "Compressed directory not found"

    logger.info("Loading files list")
    ori_glob = os.path.join(args.ori_dir, args.ori_pattern)
    files = pc_io.get_files(ori_glob)
    assert len(files) > 0, "No ori files found"
    print('ori file', files)
    filenames = [x[len(args.ori_dir) + 1:] for x in files]
    if compressed_dir_supplied:
        compressed_files = [
            os.path.join(args.compressed_dir, x + args.compressed_suffix)
            for x in filenames
        ]
    decompressed_files = [
        os.path.join(args.decompressed_dir, x + args.decompressed_suffix)
        for x in filenames
    ]
    print('compressed file', compressed_files)
    print('decompressed file', decompressed_files)
    logger.info("Checking filenames consistency")