示例#1
0
def test_correctness_use_adaptive_pooling():
    if mge.is_cuda_available():
        model_name = "mnist_model_with_test.mge"
    else:
        model_name = "mnist_model_with_test_cpu.mge"
    model_path = os.path.join(os.path.dirname(__file__), model_name)
    set_execution_strategy("HEURISTIC_REPRODUCIBLE")

    run_train(model_path, False, False, max_err=1e-5, use_adaptive_pooling=True)
    run_train(model_path, True, False, max_err=1e-5, use_adaptive_pooling=True)
    run_train(model_path, True, True, max_err=1e-5, use_adaptive_pooling=True)

    # sublinear
    config = SublinearMemoryConfig(genetic_nr_iter=10)
    run_train(
        model_path,
        True,
        True,
        sublinear_memory_config=config,
        max_err=1e-5,
        use_adaptive_pooling=True,
    )

    run_eval(model_path, False, max_err=1e-7, use_adaptive_pooling=True)
    run_eval(model_path, True, max_err=1e-7, use_adaptive_pooling=True)
示例#2
0
def test_sublinear():
    config = SublinearMemoryConfig(genetic_nr_iter=10)

    @jit.trace(symbolic=True, sublinear_memory_config=config)
    def f(x):
        return x + x

    f([0.0])
示例#3
0
from ..builder import build_backbone
from ..registry import MODELS


def get_box(xy_ctr, offsets):
    """
        xy_ctr: [1,2,37,37]
        offsets: [B,2,37,37]
    """
    xy0 = (xy_ctr - offsets)  # top-left
    xy1 = xy0 + 511  # bottom-right
    bboxes_pred = F.concat([xy0, xy1], axis=1)  # (B,4,H,W)
    return bboxes_pred


config = SublinearMemoryConfig()


@trace(symbolic=True)
def train_generator_batch(optical, sar, label, *, opt, netG):
    netG.train()
    cls_score, offsets, ctr_score = netG(sar, optical)
    loss, loss_cls, loss_reg, loss_ctr = netG.loss(cls_score, offsets,
                                                   ctr_score, label)
    opt.backward(loss)
    if dist.is_distributed():
        # do all reduce mean
        pass

    # performance in the training data
    B, _, _, _ = cls_score.shape