Esempio n. 1
0
def evaluate_model(_):
    if FLAGS.gpu >= 0:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
    else:
        logging.info(
            "Not using a GPU. Specify one by passing --gpu {idx} with idx >= 0."
        )

    if FLAGS.dataset == "hdataset":
        data_index = hdataset_provider.HDatasetGenerator.initialize_hdataset(
            FLAGS.data)
        eval_index = data_index[~data_index['train']]

        eval_generator = hdataset_provider.HDatasetGenerator(eval_index)
    else:
        data_index = provider.ShapeNetGenerator.initialize_shapenet(
            FLAGS.data, class_choice=FLAGS.categories)
        eval_index = data_index[~data_index['train']]

        eval_generator = provider.ShapeNetGenerator(eval_index,
                                                    n_points=FLAGS.n_points)

    eval_dataset = eval_generator.create_dataset().batch(FLAGS.batch_size)

    atlas_net = model.AtlasNetV2(structure_type=FLAGS.structure_type,
                                 adjustment_type=FLAGS.adjustment)

    logging.info(f"Using {len(eval_generator)} samples for evaluation.")

    atlas_net.compile(optimizer=tf.keras.optimizers.Adam(),
                      loss=loss.chamfer_loss,
                      metrics=['mean_squared_error', loss.mean_chamfer_loss])

    atlas_net.predict(eval_dataset, steps=1)
    atlas_net.load_weights(FLAGS.model)

    results = atlas_net.evaluate(eval_dataset,
                                 steps=len(eval_generator) // FLAGS.batch_size,
                                 return_dict=True)

    results['model_metadata'] = {
        'num_params': atlas_net.count_params(),
        'structure_type': FLAGS.structure_type,
        'adjustment_type': FLAGS.adjustment,
    }

    results_json = json.dumps(results)

    with tf.io.gfile.GFile(FLAGS.result_file, "w") as outfile:
        json.dump(results_json, outfile)
Esempio n. 2
0
 def test_load_saved_model(self):
     atlasnet = model.AtlasNetV2(structure_type='patch',
                                 adjustment_type='linear',
                                 n_structures=10)
     atlasnet.compile(optimizer='Adam',
                      loss=loss.chamfer_loss,
                      metrics=['mean_squared_error'])
     df = provider.ShapeNetGenerator.initialize_shapenet(
         "../data/shape_net/", class_choice=['plane'], train_frac=0.7)
     generator = provider.ShapeNetGenerator(df,
                                            n_points=10000,
                                            visualize=False)
     dataset = generator.create_dataset().batch(BATCH_SIZE)
     atlasnet.evaluate(dataset, steps=1)
     atlasnet.load_weights('weights/atlasnet')
     atlasnet.evaluate(dataset, steps=1)
Esempio n. 3
0
    def test_fit_with_real_input_and_serialization(self):
        df = provider.ShapeNetGenerator.initialize_shapenet(
            "../data/shape_net/", class_choice=['plane'], train_frac=0.7)

        atlasnet = model.AtlasNetV2(structure_type='point',
                                    adjustment_type='linear',
                                    n_structures=10)
        generator = provider.ShapeNetGenerator(df,
                                               n_points=10000,
                                               visualize=False)
        dataset = generator.create_dataset().batch(BATCH_SIZE)
        atlasnet.compile(optimizer='Adam',
                         loss=loss.chamfer_loss,
                         metrics=['mean_squared_error'])
        print("Model compiled successfully!")
        atlasnet.fit(dataset, epochs=1, steps_per_epoch=1)

        atlasnet.save_weights('weights/atlasnet')
        import json
        print(json.dumps(atlasnet.get_config()))
 def test_shapenet_provider_batch_size(self):
     dataset = provider.ShapeNetGenerator.initialize_shapenet("../data/shape_net/", class_choice=None)
     generator = provider.ShapeNetGenerator(dataset, n_points=42)
     data = iter(generator.create_dataset().batch(8))
     assert next(data)[0].shape == (8, 42, 3)
    def test_shapenet_provider_len(self):
        dataset = provider.ShapeNetGenerator.initialize_shapenet("../data/shape_net/", class_choice=['plane'])
        generator = provider.ShapeNetGenerator(dataset, n_points=42)

        assert len(dataset) == len(generator)
Esempio n. 6
0
def train_model(_):
    if FLAGS.fromcheckpoint is None:
        now = datetime.now()
        mlflow.set_experiment(
            f"{FLAGS.experiment} vom {now.strftime('%m/%d/%Y, %H:%M:%S')}")
    else:
        mlflow.set_experiment(FLAGS.experiment)

    with mlflow.start_run():
        if FLAGS.gpu >= 0:
            os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
            os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
        else:
            logging.info(
                "Not using a GPU. Specify one by passing --gpu {idx} with idx >= 0."
            )

        data_index = provider.ShapeNetGenerator.initialize_shapenet(
            FLAGS.data, class_choice=FLAGS.categories)
        train_index = data_index[data_index['train']]
        train_files, val_files = random_split(train_index, 1 - FLAGS.val_split)
        train_generator = provider.ShapeNetGenerator(train_files,
                                                     n_points=FLAGS.n_points)
        eval_generator = provider.ShapeNetGenerator(val_files,
                                                    n_points=FLAGS.n_points)

        train_dataset = train_generator.create_dataset().batch(
            FLAGS.batch_size)
        eval_dataset = eval_generator.create_dataset().batch(FLAGS.batch_size)

        logging.info(
            f"Using {len(train_generator)} samples for training and {len(eval_generator)} samples for evaluation."
        )

        atlas_net = model.AtlasNetV2(structure_type=FLAGS.structure_type,
                                     adjustment_type=FLAGS.adjustment)

        callbacks = [
            tf.keras.callbacks.TensorBoard(FLAGS.logdir, write_graph=False),
            tf.keras.callbacks.ModelCheckpoint(FLAGS.checkpoint,
                                               save_weights_only=True,
                                               save_best_only=True),
            MLflowCallback(),
        ]

        atlas_net.compile(
            optimizer=tf.keras.optimizers.Adam(FLAGS.learning_rate),
            loss=loss.chamfer_loss,
            metrics=['mean_squared_error', loss.mean_chamfer_loss])

        if FLAGS.fromcheckpoint is not None:
            atlas_net.fit(train_dataset, steps_per_epoch=1, epochs=1)
            atlas_net.load_weights(FLAGS.from_checkpoint)

        atlas_net.fit(train_dataset,
                      steps_per_epoch=len(train_generator) // FLAGS.batch_size,
                      epochs=FLAGS.epochs,
                      callbacks=callbacks,
                      validation_data=eval_dataset,
                      validation_steps=len(eval_generator) // FLAGS.batch_size)

        mlflow.log_param('AtlasNet-Config', atlas_net.get_config())

        atlas_net.save_weights(FLAGS.output)