Пример #1
0
    def test_model(self):

        # Create the model.
        model = GAPNet()
        model.summary()

        for x in model.non_trainable_weights:
            if "normalization" not in str(x):
                print(x)
Пример #2
0
for experiment_run in experiment.get_runs():
    if experiment_run.id == args.run_id:
        run_that_contains_model = experiment_run
        break
if run_that_contains_model is None:
    print("ERROR! Run not found!")
    exit(0)

# Download the model.
print("Downloading the model...")
output_directory = "model-" + args.run_id
run_that_contains_model.download_files(output_directory=output_directory)

# Instantiate the model with its weights.
print("Creating the model...")
model = GAPNet()
print("Loading model weights...")
model.load_weights(os.path.join(output_directory, "gapnet_weights.h5"))
model.summary()

# Get all files from the dataset.
print("Find all files for evaluation...")
pickle_files = glob.glob(os.path.join(dataset_path, "**", "*.p"))

# Evaluate all files.
# TODO parallelize this.
print("Evaluate all files...")
data = {"results": []}
for index, pickle_file in enumerate(pickle_files):
    name = os.path.basename(pickle_file).split(".")[0]
Пример #3
0
def main():

    # Check command line arguments.
    #if len(sys.argv) != 2 or sys.argv[1] not in model_names:
    #    print("Must provide name of model.")
    #    print("Options: " + " ".join(model_names))
    #    exit(0)
    #model_name = sys.argv[1]

    # Data preparation.
    nb_classes = 40
    train_file = './ModelNet40/ply_data_train.h5'
    test_file = './ModelNet40/ply_data_test.h5'

    # Hyperparameters.
    number_of_points = 1024
    epochs = 100
    batch_size = 32

    # Data generators for training and validation.
    train = DataGenerator(train_file,
                          batch_size,
                          number_of_points,
                          nb_classes,
                          train=True)
    val = DataGenerator(test_file,
                        batch_size,
                        number_of_points,
                        nb_classes,
                        train=False)

    # Create the model.
    if model_name == "pointnet":
        model = create_pointnet(number_of_points, nb_classes)
    elif model_name == "gapnet":
        model = GAPNet()
    model.summary()

    # Ensure output paths.
    output_path = "logs"
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    output_path = os.path.join(output_path, model_name)
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    output_path = os.path.join(output_path, training_name)
    if os.path.exists(output_path):
        shutil.rmtree(output_path)
    os.mkdir(output_path)

    # Compile the model.
    lr = 0.0001
    adam = Adam(lr=lr)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Checkpoint callback.
    checkpoint = ModelCheckpoint(os.path.join(output_path, "model.h5"),
                                 monitor="val_acc",
                                 save_weights_only=True,
                                 save_best_only=True,
                                 verbose=1)

    # Logging training progress with tensorboard.
    tensorboard_callback = tf.keras.callbacks.TensorBoard(
        log_dir=output_path,
        histogram_freq=0,
        batch_size=32,
        write_graph=True,
        write_grads=False,
        write_images=True,
        embeddings_freq=0,
        embeddings_layer_names=None,
        embeddings_metadata=None,
        embeddings_data=None,
        update_freq="epoch")

    callbacks = []
    #callbacks.append(checkpoint)
    callbacks.append(onetenth_50_75(lr))
    callbacks.append(tensorboard_callback)

    # Train the model.
    history = model.fit_generator(train.generator(),
                                  steps_per_epoch=9840 // batch_size,
                                  epochs=epochs,
                                  validation_data=val.generator(),
                                  validation_steps=2468 // batch_size,
                                  callbacks=callbacks,
                                  verbose=1)

    # Save history and model.
    plot_history(history, output_path)
    save_history(history, output_path)
    model.save_weights(os.path.join(output_path, "model_weights.h5"))
Пример #4
0
paths = paths_validate
dataset = tf.data.Dataset.from_tensor_slices(paths)
dataset = dataset.map(lambda path: tf_load_pickle(path, subsample_size,
                                                  channels, targets_indices))
dataset = dataset.cache()
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
dataset_validate = dataset
del dataset

# Note: Now the datasets are prepared.

# Start logging.
#run = experiment.start_logging()

# Intantiate GAPNet.
model = GAPNet()
model.summary()

# Get ready to add callbacks.
training_callbacks = []

# Pushes metrics and losses into the run on AzureML.


class AzureLogCallback(callbacks.Callback):
    def on_epoch_end(self, epoch, logs=None):
        if logs is not None:
            for key, value in logs.items():
                run.log(key, value)