Ejemplo n.º 1
0
def test_load_restore_model_from_checkpoint_smoke_test(
    logger: logging.Logger,
    graph_db: graph_tuple_database.Database,
    ir_db: ir_database.Database,
):
    """Test creating and restoring model from checkpoint."""
    run_id = run_id_lib.RunId.GenerateUnique(
        f"mock{random.randint(0, int(1e6)):06}")

    model = graph_lstm.GraphLstm(logger, graph_db, ir_db=ir_db, run_id=run_id)
    model.Initialize()

    checkpoint_ref = model.SaveCheckpoint()

    model.RestoreFrom(checkpoint_ref)
Ejemplo n.º 2
0
def test_classifier_call(
    epoch_type: epoch.Type,
    logger: logging.Logger,
    graph_db: graph_tuple_database.Database,
    ir_db: ir_database.Database,
):
    """Test running a graph classifier."""
    run_id = run_id_lib.RunId.GenerateUnique(
        f"mock{random.randint(0, int(1e6)):06}")

    model = graph_lstm.GraphLstm(
        logger,
        graph_db,
        ir_db=ir_db,
        batch_size=8,
        padded_sequence_length=100,
        run_id=run_id,
    )
    model.Initialize()

    batch_iterator = batch_iterator_lib.MakeBatchIterator(
        model=model,
        graph_db=graph_db,
        splits={
            epoch.Type.TRAIN: [0],
            epoch.Type.VAL: [1],
            epoch.Type.TEST: [2],
        },
        epoch_type=epoch_type,
    )

    results = model(
        epoch_type=epoch_type,
        batch_iterator=batch_iterator,
        logger=logger,
    )
    assert isinstance(results, epoch.Results)

    assert results.batch_count

    # We only get loss for training.
    if epoch_type == epoch.Type.TRAIN:
        assert results.has_loss
    else:
        assert not results.has_loss
Ejemplo n.º 3
0
def test_load_restore_model_from_checkpoint_smoke_test(
    logger: logging.Logger,
    graph_db: graph_tuple_database.Database,
    ir_db: ir_database.Database,
):
    """Test creating and restoring model from checkpoint."""
    # Create and initialize a model.
    model = graph_lstm.GraphLstm(
        logger,
        graph_db,
        ir_db=ir_db,
        batch_size=32,
        padded_sequence_length=10,
    )
    model.Initialize()

    # Create a checkpoint from the model.
    checkpoint_ref = model.SaveCheckpoint()

    # Reset the model state to the checkpoint.
    model.RestoreFrom(checkpoint_ref)

    # Run a test epoch to make sure the restored model works.
    batch_iterator = batch_iterator_lib.MakeBatchIterator(
        model=model,
        graph_db=graph_db,
        splits={
            epoch.Type.TRAIN: [0],
            epoch.Type.VAL: [1],
            epoch.Type.TEST: [2],
        },
        epoch_type=epoch.Type.TEST,
    )
    model(
        epoch_type=epoch.Type.TEST,
        batch_iterator=batch_iterator,
        logger=logger,
    )

    # Create a new model instance and restore its state from the checkpoint.
    new_model = graph_lstm.GraphLstm(
        logger,
        graph_db,
        ir_db=ir_db,
        batch_size=32,
        padded_sequence_length=10,
    )
    new_model.RestoreFrom(checkpoint_ref)

    # Check that the new model works.
    batch_iterator = batch_iterator_lib.MakeBatchIterator(
        model=new_model,
        graph_db=graph_db,
        splits={
            epoch.Type.TRAIN: [0],
            epoch.Type.VAL: [1],
            epoch.Type.TEST: [2],
        },
        epoch_type=epoch.Type.TEST,
    )
    new_model(
        epoch_type=epoch.Type.TEST,
        batch_iterator=batch_iterator,
        logger=logger,
    )