Exemplo n.º 1
0
def test_concat_with_type():
    sym = mx.sym.Concat(name="concat", num_args=2)
    ctx_list = [
        {
            "ctx": mx.gpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float64, "concat_arg1": np.float64},
        },
        {
            "ctx": mx.gpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float32, "concat_arg1": np.float32},
        },
        {
            "ctx": mx.gpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float16, "concat_arg1": np.float16},
        },
        {
            "ctx": mx.cpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float64, "concat_arg1": np.float64},
        },
        {
            "ctx": mx.cpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float32, "concat_arg1": np.float32},
        },
    ]
    check_consistency(sym, ctx_list)
Exemplo n.º 2
0
def test_elementwisesum_with_type():
    sym = mx.sym.ElementWiseSum(name="ews", num_args=2)
    ctx_list = [
        {
            "ctx": mx.gpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float64, "ews_arg1": np.float64},
        },
        {
            "ctx": mx.gpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float32, "ews_arg1": np.float32},
        },
        {
            "ctx": mx.gpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float16, "ews_arg1": np.float16},
        },
        {
            "ctx": mx.cpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float64, "ews_arg1": np.float64},
        },
        {
            "ctx": mx.cpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float32, "ews_arg1": np.float32},
        },
    ]
    check_consistency(sym, ctx_list)
Exemplo n.º 3
0
def test_reshape_with_type():
    sym = mx.sym.Reshape(name='reshape', shape=(-1, 1, 1, 0))
    ctx_list = [{
        'ctx': mx.gpu(0),
        'reshape_data': (2, 2, 2, 10),
        'type_dict': {
            'reshape_data': np.float64
        }
    }, {
        'ctx': mx.gpu(0),
        'reshape_data': (2, 2, 2, 10),
        'type_dict': {
            'reshape_data': np.float32
        }
    }, {
        'ctx': mx.gpu(0),
        'reshape_data': (2, 2, 2, 10),
        'type_dict': {
            'reshape_data': np.float16
        }
    }, {
        'ctx': mx.cpu(0),
        'reshape_data': (2, 2, 2, 10),
        'type_dict': {
            'reshape_data': np.float64
        }
    }, {
        'ctx': mx.cpu(0),
        'reshape_data': (2, 2, 2, 10),
        'type_dict': {
            'reshape_data': np.float32
        }
    }]
    check_consistency(sym, ctx_list)
Exemplo n.º 4
0
def test_deconvolution_with_type():
    sym = mx.sym.Deconvolution(num_filter=2, kernel=(3, 3), name='deconv')
    ctx_list = [{
        'ctx': mx.gpu(0),
        'deconv_data': (2, 2, 10, 10),
        'type_dict': {
            'deconv_data': np.float64
        }
    }, {
        'ctx': mx.gpu(0),
        'deconv_data': (2, 2, 10, 10),
        'type_dict': {
            'deconv_data': np.float32
        }
    }, {
        'ctx': mx.gpu(0),
        'deconv_data': (2, 2, 10, 10),
        'type_dict': {
            'deconv_data': np.float16
        }
    }, {
        'ctx': mx.cpu(0),
        'deconv_data': (2, 2, 10, 10),
        'type_dict': {
            'deconv_data': np.float64
        }
    }, {
        'ctx': mx.cpu(0),
        'deconv_data': (2, 2, 10, 10),
        'type_dict': {
            'deconv_data': np.float32
        }
    }]
    check_consistency(sym, ctx_list)
    check_consistency(sym, ctx_list, grad_req="add")
Exemplo n.º 5
0
def test_blockgrad_with_type():
    sym = mx.sym.BlockGrad(name='bg')
    ctx_list = [{
        'ctx': mx.gpu(0),
        'bg_data': (2, 2, 2, 10),
        'type_dict': {
            'bg_data': np.float64
        }
    }, {
        'ctx': mx.gpu(0),
        'bg_data': (2, 2, 2, 10),
        'type_dict': {
            'bg_data': np.float32
        }
    }, {
        'ctx': mx.gpu(0),
        'bg_data': (2, 2, 2, 10),
        'type_dict': {
            'bg_data': np.float16
        }
    }, {
        'ctx': mx.cpu(0),
        'bg_data': (2, 2, 2, 10),
        'type_dict': {
            'bg_data': np.float64
        }
    }, {
        'ctx': mx.cpu(0),
        'bg_data': (2, 2, 2, 10),
        'type_dict': {
            'bg_data': np.float32
        }
    }]
    check_consistency(sym, ctx_list)
Exemplo n.º 6
0
def test_convolution_with_type():
    np.random.seed(1234)
    sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')

    data = mx.sym.Variable('conv_data')
    w = mx.sym.Variable('conv_weight')
    b = mx.sym.Variable('conv_bias')
    w = mx.sym.transpose(w, axes=(0,2,3,1))
    sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
    sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
    sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')

    sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
    ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
                # NHWC
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
                 'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
                 'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
                ]
    # wider tolerance needed for true-fp16 NCHW test above
    tol = {np.dtype(np.float16): 0.5,
               np.dtype(np.float32): 1e-3,
               np.dtype(np.float64): 1e-5,
               np.dtype(np.uint8): 0,
               np.dtype(np.int32): 0}
    check_consistency(sym, ctx_list, tol=tol)
    # test ability to turn off training on bias
    check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
Exemplo n.º 7
0
def test_swapaxis_with_type():
    sym = mx.sym.SwapAxis(name='swap', dim1=1)
    ctx_list = [{
        'ctx': mx.gpu(0),
        'swap_data': (2, 2, 2, 10),
        'type_dict': {
            'swap_data': np.float64
        }
    }, {
        'ctx': mx.gpu(0),
        'swap_data': (2, 2, 2, 10),
        'type_dict': {
            'swap_data': np.float32
        }
    }, {
        'ctx': mx.gpu(0),
        'swap_data': (2, 2, 2, 10),
        'type_dict': {
            'swap_data': np.float16
        }
    }, {
        'ctx': mx.cpu(0),
        'swap_data': (2, 2, 2, 10),
        'type_dict': {
            'swap_data': np.float64
        }
    }, {
        'ctx': mx.cpu(0),
        'swap_data': (2, 2, 2, 10),
        'type_dict': {
            'swap_data': np.float32
        }
    }]
    check_consistency(sym, ctx_list)
Exemplo n.º 8
0
def test_fullyconnected_with_type():
    sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
    ctx_list = [{
        'ctx': mx.gpu(0),
        'inner_data': (2, 10),
        'type_dict': {
            'inner_data': np.float64
        }
    }, {
        'ctx': mx.gpu(0),
        'inner_data': (2, 10),
        'type_dict': {
            'inner_data': np.float32
        }
    }, {
        'ctx': mx.gpu(0),
        'inner_data': (2, 10),
        'type_dict': {
            'inner_data': np.float16
        }
    }, {
        'ctx': mx.cpu(0),
        'inner_data': (2, 10),
        'type_dict': {
            'inner_data': np.float64
        }
    }, {
        'ctx': mx.cpu(0),
        'inner_data': (2, 10),
        'type_dict': {
            'inner_data': np.float32
        }
    }]
    check_consistency(sym, ctx_list)
Exemplo n.º 9
0
def test_deconvolution_with_type():
    sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
    ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
                {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
                {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
                {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
                {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 10
0
def test_fullyconnected_with_type():
    sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
    ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
                {'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
                {'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
                {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
                {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 11
0
def test_batchnorm_with_type():
    sym = mx.sym.BatchNorm(name='norm', fix_gamma=False)
    ctx_list = [{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
                {'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}}]
    check_consistency(sym, ctx_list)

    sym = mx.sym.BatchNorm(name='norm', fix_gamma=True)
    check_consistency(sym, ctx_list)
Exemplo n.º 12
0
def test_upsampling_with_type():
    sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type = 'nearest', num_args=1)
    ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
                {'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
                {'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
                {'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
                {'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 13
0
def test_batchnorm_with_type():
    sym = mx.sym.BatchNorm(name='norm', fix_gamma=False)
    ctx_list = [{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
                {'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}}]
    check_consistency(sym, ctx_list)

    sym = mx.sym.BatchNorm(name='norm', fix_gamma=True)
    check_consistency(sym, ctx_list)
Exemplo n.º 14
0
def test_blockgrad_with_type():
    sym = mx.sym.BlockGrad(name='bg')
    ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
                {'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
                {'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
                {'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
                {'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 15
0
def test_reshape_with_type():
    sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
    ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
                {'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
                {'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
                {'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
                {'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 16
0
def test_upsampling_bilinear_with_type():
    sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
    ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
                {'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
                {'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
                {'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
                {'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 17
0
def test_swapaxis_with_type():
    sym = mx.sym.SwapAxis(name='swap', dim1=1)
    ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
                {'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
                {'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
                {'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
                {'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 18
0
def test_activation_with_type():
    sym = mx.sym.Activation(name='act', act_type='sigmoid')
    ctx_list = [{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
                {'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},
                {'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float16}},
                {'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
                {'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},
                {'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float16}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 19
0
def test_activation_with_type():
    sym = mx.sym.Activation(name='act', act_type='sigmoid')
    ctx_list = [{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
                {'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},
                {'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float16}},
                {'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
                {'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},
                {'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float16}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 20
0
def test_svmoutput_with_type():
    sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
    ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
                {'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
                {'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
                {'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
                {'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
                {'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 21
0
def test_embedding_with_type():
    sym = mx.sym.Embedding(name='embedding', input_dim=10, output_dim=20)
    ctx_list = [{'ctx': mx.gpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float64}},
                {'ctx': mx.gpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float32}},
                {'ctx': mx.gpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float16}},
                {'ctx': mx.cpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float64}},
                {'ctx': mx.cpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float32}},
                {'ctx': mx.cpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float16}}]
    check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'})
Exemplo n.º 22
0
def test_fullyconnected_with_type():
    sym = mx.sym.FullyConnected(num_hidden=3, name="inner")
    ctx_list = [
        {"ctx": mx.gpu(0), "inner_data": (2, 10), "type_dict": {"inner_data": np.float64}},
        {"ctx": mx.gpu(0), "inner_data": (2, 10), "type_dict": {"inner_data": np.float32}},
        {"ctx": mx.gpu(0), "inner_data": (2, 10), "type_dict": {"inner_data": np.float16}},
        {"ctx": mx.cpu(0), "inner_data": (2, 10), "type_dict": {"inner_data": np.float64}},
        {"ctx": mx.cpu(0), "inner_data": (2, 10), "type_dict": {"inner_data": np.float32}},
    ]
    check_consistency(sym, ctx_list)
Exemplo n.º 23
0
def test_blockgrad_with_type():
    sym = mx.sym.BlockGrad(name="bg")
    ctx_list = [
        {"ctx": mx.gpu(0), "bg_data": (2, 2, 2, 10), "type_dict": {"bg_data": np.float64}},
        {"ctx": mx.gpu(0), "bg_data": (2, 2, 2, 10), "type_dict": {"bg_data": np.float32}},
        {"ctx": mx.gpu(0), "bg_data": (2, 2, 2, 10), "type_dict": {"bg_data": np.float16}},
        {"ctx": mx.cpu(0), "bg_data": (2, 2, 2, 10), "type_dict": {"bg_data": np.float64}},
        {"ctx": mx.cpu(0), "bg_data": (2, 2, 2, 10), "type_dict": {"bg_data": np.float32}},
    ]
    check_consistency(sym, ctx_list)
Exemplo n.º 24
0
def test_swapaxis_with_type():
    sym = mx.sym.SwapAxis(name="swap", dim1=1)
    ctx_list = [
        {"ctx": mx.gpu(0), "swap_data": (2, 2, 2, 10), "type_dict": {"swap_data": np.float64}},
        {"ctx": mx.gpu(0), "swap_data": (2, 2, 2, 10), "type_dict": {"swap_data": np.float32}},
        {"ctx": mx.gpu(0), "swap_data": (2, 2, 2, 10), "type_dict": {"swap_data": np.float16}},
        {"ctx": mx.cpu(0), "swap_data": (2, 2, 2, 10), "type_dict": {"swap_data": np.float64}},
        {"ctx": mx.cpu(0), "swap_data": (2, 2, 2, 10), "type_dict": {"swap_data": np.float32}},
    ]
    check_consistency(sym, ctx_list)
Exemplo n.º 25
0
def test_reshape_with_type():
    sym = mx.sym.Reshape(name="reshape", shape=(-1, 1, 1, 0))
    ctx_list = [
        {"ctx": mx.gpu(0), "reshape_data": (2, 2, 2, 10), "type_dict": {"reshape_data": np.float64}},
        {"ctx": mx.gpu(0), "reshape_data": (2, 2, 2, 10), "type_dict": {"reshape_data": np.float32}},
        {"ctx": mx.gpu(0), "reshape_data": (2, 2, 2, 10), "type_dict": {"reshape_data": np.float16}},
        {"ctx": mx.cpu(0), "reshape_data": (2, 2, 2, 10), "type_dict": {"reshape_data": np.float64}},
        {"ctx": mx.cpu(0), "reshape_data": (2, 2, 2, 10), "type_dict": {"reshape_data": np.float32}},
    ]
    check_consistency(sym, ctx_list)
Exemplo n.º 26
0
def test_batchnorm_with_type():
    sym = mx.sym.BatchNorm(name="norm", fix_gamma=False)
    ctx_list = [
        {"ctx": mx.gpu(0), "norm_data": (10, 2, 10, 10), "type_dict": {"norm_data": np.float32}},
        {"ctx": mx.cpu(0), "norm_data": (10, 2, 10, 10), "type_dict": {"norm_data": np.float32}},
    ]
    check_consistency(sym, ctx_list)

    sym = mx.sym.BatchNorm(name="norm", fix_gamma=True)
    check_consistency(sym, ctx_list)
Exemplo n.º 27
0
def test_upsampling_with_type():
    sym = mx.sym.UpSampling(scale=2, num_filter=2, name="up", sample_type="nearest", num_args=1)
    ctx_list = [
        {"ctx": mx.gpu(0), "up_arg0": (2, 2, 2, 10), "type_dict": {"up_arg0": np.float64}},
        {"ctx": mx.gpu(0), "up_arg0": (2, 2, 2, 10), "type_dict": {"up_arg0": np.float32}},
        {"ctx": mx.gpu(0), "up_arg0": (2, 2, 2, 10), "type_dict": {"up_arg0": np.float16}},
        {"ctx": mx.cpu(0), "up_arg0": (2, 2, 2, 10), "type_dict": {"up_arg0": np.float64}},
        {"ctx": mx.cpu(0), "up_arg0": (2, 2, 2, 10), "type_dict": {"up_arg0": np.float32}},
    ]
    check_consistency(sym, ctx_list)
Exemplo n.º 28
0
def test_deconvolution_with_type():
    sym = mx.sym.Deconvolution(num_filter=2, kernel=(3, 3), name="deconv")
    ctx_list = [
        {"ctx": mx.gpu(0), "deconv_data": (2, 2, 10, 10), "type_dict": {"deconv_data": np.float64}},
        {"ctx": mx.gpu(0), "deconv_data": (2, 2, 10, 10), "type_dict": {"deconv_data": np.float32}},
        {"ctx": mx.gpu(0), "deconv_data": (2, 2, 10, 10), "type_dict": {"deconv_data": np.float16}},
        {"ctx": mx.cpu(0), "deconv_data": (2, 2, 10, 10), "type_dict": {"deconv_data": np.float64}},
        {"ctx": mx.cpu(0), "deconv_data": (2, 2, 10, 10), "type_dict": {"deconv_data": np.float32}},
    ]
    check_consistency(sym, ctx_list)
Exemplo n.º 29
0
def test_embedding_with_type():
    sym = mx.sym.Embedding(name='embedding', input_dim=10, output_dim=20)
    ctx_list = [{'ctx': mx.gpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float64}},
                {'ctx': mx.gpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float32}},
                {'ctx': mx.gpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float16}},
                {'ctx': mx.cpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float64}},
                {'ctx': mx.cpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float32}},
                {'ctx': mx.cpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float16}}]
    arg_params = {'embedding_data': np.random.randint(low=0, high=10, size=(2, 10))}
    check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
                      arg_params=arg_params)
Exemplo n.º 30
0
def test_activation_with_type():
    sym = mx.sym.Activation(name="act", act_type="sigmoid")
    ctx_list = [
        {"ctx": mx.gpu(0), "act_data": (2, 2, 10, 10), "type_dict": {"act_data": np.float64}},
        {"ctx": mx.gpu(0), "act_data": (2, 2, 10, 10), "type_dict": {"act_data": np.float32}},
        {"ctx": mx.gpu(0), "act_data": (2, 2, 10, 10), "type_dict": {"act_data": np.float16}},
        {"ctx": mx.cpu(0), "act_data": (2, 2, 10, 10), "type_dict": {"act_data": np.float64}},
        {"ctx": mx.cpu(0), "act_data": (2, 2, 10, 10), "type_dict": {"act_data": np.float32}},
        {"ctx": mx.cpu(0), "act_data": (2, 2, 10, 10), "type_dict": {"act_data": np.float16}},
    ]
    check_consistency(sym, ctx_list)
Exemplo n.º 31
0
def test_embedding_with_type():
    sym = mx.sym.Embedding(name='embedding', input_dim=10, output_dim=20)
    ctx_list = [{'ctx': mx.gpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float64}},
                {'ctx': mx.gpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float32}},
                {'ctx': mx.gpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float16}},
                {'ctx': mx.cpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float64}},
                {'ctx': mx.cpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float32}},
                {'ctx': mx.cpu(0), 'embedding_data': (2, 10), 'type_dict': {'embedding_data': np.float16}}]
    arg_params = {'embedding_data': np.random.randint(low=0, high=10, size=(2, 10))}
    check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
                      arg_params=arg_params)
Exemplo n.º 32
0
def test_embedding_with_type():
    sym = mx.sym.Embedding(name="embedding", input_dim=10, output_dim=20)
    ctx_list = [
        {"ctx": mx.gpu(0), "embedding_data": (2, 10), "type_dict": {"embedding_data": np.float64}},
        {"ctx": mx.gpu(0), "embedding_data": (2, 10), "type_dict": {"embedding_data": np.float32}},
        {"ctx": mx.gpu(0), "embedding_data": (2, 10), "type_dict": {"embedding_data": np.float16}},
        {"ctx": mx.cpu(0), "embedding_data": (2, 10), "type_dict": {"embedding_data": np.float64}},
        {"ctx": mx.cpu(0), "embedding_data": (2, 10), "type_dict": {"embedding_data": np.float32}},
        {"ctx": mx.cpu(0), "embedding_data": (2, 10), "type_dict": {"embedding_data": np.float16}},
    ]
    check_consistency(sym, ctx_list, grad_req={"embedding_data": "null", "embedding_weight": "write"})
Exemplo n.º 33
0
def test_elementwisesum_with_type():
    sym = mx.sym.ElementWiseSum(name='ews', num_args=2)
    ctx_list = [{'ctx': mx.gpu(0), 'ews_arg1': (2, 10), 'ews_arg0': (2, 10),
                 'type_dict': {'ews_arg0': np.float64, 'ews_arg1': np.float64}},
                {'ctx': mx.gpu(0), 'ews_arg1': (2, 10), 'ews_arg0': (2, 10),
                 'type_dict': {'ews_arg0': np.float32, 'ews_arg1': np.float32}},
                {'ctx': mx.gpu(0), 'ews_arg1': (2, 10), 'ews_arg0': (2, 10),
                 'type_dict': {'ews_arg0': np.float16, 'ews_arg1': np.float16}},
                {'ctx': mx.cpu(0), 'ews_arg1': (2, 10), 'ews_arg0': (2, 10),
                 'type_dict': {'ews_arg0': np.float64, 'ews_arg1': np.float64}},
                {'ctx': mx.cpu(0), 'ews_arg1': (2, 10), 'ews_arg0': (2, 10),
                 'type_dict': {'ews_arg0': np.float32, 'ews_arg1': np.float32}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 34
0
def test_concat_with_type():
    sym = mx.sym.Concat(name='concat', num_args=2)
    ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
                 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
                {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
                 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
                {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
                 'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
                {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
                 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
                {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
                 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 35
0
def test_elementwisesum_with_type():
    sym = mx.sym.ElementWiseSum(name='ews', num_args=2)
    ctx_list = [{'ctx': mx.gpu(0), 'ews_arg1': (2, 10), 'ews_arg0': (2, 10),
                 'type_dict': {'ews_arg0': np.float64, 'ews_arg1': np.float64}},
                {'ctx': mx.gpu(0), 'ews_arg1': (2, 10), 'ews_arg0': (2, 10),
                 'type_dict': {'ews_arg0': np.float32, 'ews_arg1': np.float32}},
                {'ctx': mx.gpu(0), 'ews_arg1': (2, 10), 'ews_arg0': (2, 10),
                 'type_dict': {'ews_arg0': np.float16, 'ews_arg1': np.float16}},
                {'ctx': mx.cpu(0), 'ews_arg1': (2, 10), 'ews_arg0': (2, 10),
                 'type_dict': {'ews_arg0': np.float64, 'ews_arg1': np.float64}},
                {'ctx': mx.cpu(0), 'ews_arg1': (2, 10), 'ews_arg0': (2, 10),
                 'type_dict': {'ews_arg0': np.float32, 'ews_arg1': np.float32}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 36
0
def test_concat_with_type():
    sym = mx.sym.Concat(name='concat', num_args=2)
    ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
                 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
                {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
                 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
                {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
                 'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
                {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
                 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
                {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
                 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
    check_consistency(sym, ctx_list)
Exemplo n.º 37
0
 def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
     NVD = [[20, 10, 20], [200, 10, 300]]
     for N, V, D in NVD:
         sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
         ctx_list = []
         for data_type in data_types:
             for weight_type in weight_types:
                 ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
                     'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
                 ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
                     'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
         arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
         check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
                           arg_params=arg_params)
Exemplo n.º 38
0
def test_deconvolution_with_type():
    sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
    ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
                {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
                {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
                {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
                {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
    # wider tolerance needed for true-fp16 test above
    tol = {np.dtype(np.float16): 0.3,
               np.dtype(np.float32): 1e-3,
               np.dtype(np.float64): 1e-5,
               np.dtype(np.uint8): 0,
               np.dtype(np.int32): 0}
    check_consistency(sym, ctx_list, tol=tol)
    check_consistency(sym, ctx_list, tol=tol, grad_req="add")
Exemplo n.º 39
0
def test_bilinear_sampler_with_type():
    data = mx.sym.Variable('data')
    grid = mx.sym.Variable('grid')
    sym = mx.sym.BilinearSampler(data=data, grid=grid)
    ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
                 'type_dict': {'data': np.float64}},
                {'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
                 'type_dict': {'data': np.float32}},
                {'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
                 'type_dict': {'data': np.float16}},
                {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
                 'type_dict': {'data': np.float64}},
                {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
                 'type_dict': {'data': np.float32}}]
    check_consistency(sym, ctx_list)
    check_consistency(sym, ctx_list, grad_req="add")
Exemplo n.º 40
0
 def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,
                                  pooling_convention='valid', global_pool=False):
     ctx_list = []
     sym_list = []
     # PoolingV1 cpu
     if 'pool_v1_cpu' in pool_op_list:
         ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
         if not global_pool:
             sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
                                               pooling_convention=pooling_convention, name='pool'))
         else:
             sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
     # PoolingV1 gpu
     if 'pool_v1_gpu' in pool_op_list:
         ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
         if not global_pool:
             sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
                                               pooling_convention=pooling_convention, name='pool'))
         else:
             sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
     # Pooling cpu
     if 'pool_cpu' in pool_op_list:
         ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
         if not global_pool:
             sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
                                            pooling_convention=pooling_convention, name='pool'))
         else:
             sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
     # Pooling gpu
     if 'pool_gpu' in pool_op_list:
         ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
         if not global_pool:
             sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
                                            pooling_convention=pooling_convention, cudnn_off=True, name='pool'))
         else:
             sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=True,
                                            name='pool'))
     # CuDNNPooling
     if 'pool_cudnn' in pool_op_list:
         ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
         if not global_pool:
             sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
                                            pooling_convention=pooling_convention, cudnn_off=False, name='pool'))
         else:
             sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=False,
                                            name='pool'))
     check_consistency(sym_list, ctx_list)
Exemplo n.º 41
0
def test_pooling_with_type():
    ctx_list = [{
        'ctx': mx.gpu(0),
        'pool_data': (10, 2, 10, 10),
        'type_dict': {
            'pool_data': np.float64
        }
    }, {
        'ctx': mx.gpu(0),
        'pool_data': (10, 2, 10, 10),
        'type_dict': {
            'pool_data': np.float32
        }
    }, {
        'ctx': mx.gpu(0),
        'pool_data': (10, 2, 10, 10),
        'type_dict': {
            'pool_data': np.float16
        }
    }, {
        'ctx': mx.cpu(0),
        'pool_data': (10, 2, 10, 10),
        'type_dict': {
            'pool_data': np.float64
        }
    }, {
        'ctx': mx.cpu(0),
        'pool_data': (10, 2, 10, 10),
        'type_dict': {
            'pool_data': np.float32
        }
    }]

    sym = mx.sym.Pooling(name='pool',
                         kernel=(3, 3),
                         stride=(2, 2),
                         pool_type='max')
    check_consistency(sym, ctx_list)

    sym = mx.sym.Pooling(name='pool',
                         kernel=(3, 3),
                         pad=(1, 1),
                         pool_type='avg')
    check_consistency(sym, ctx_list)

    sym = mx.sym.Pooling(name='pool',
                         kernel=(5, 5),
                         pad=(2, 2),
                         pool_type='max')
    check_consistency(sym, ctx_list)

    sym = mx.sym.Pooling(name='pool',
                         kernel=(3, 3),
                         pad=(1, 1),
                         pool_type='sum')
    check_consistency(sym, ctx_list)
Exemplo n.º 42
0
def test_embedding_with_type():
    sym = mx.sym.Embedding(name='embedding', input_dim=10, output_dim=20)
    ctx_list = [{
        'ctx': mx.gpu(0),
        'embedding_data': (2, 10),
        'type_dict': {
            'embedding_data': np.float64
        }
    }, {
        'ctx': mx.gpu(0),
        'embedding_data': (2, 10),
        'type_dict': {
            'embedding_data': np.float32
        }
    }, {
        'ctx': mx.gpu(0),
        'embedding_data': (2, 10),
        'type_dict': {
            'embedding_data': np.float16
        }
    }, {
        'ctx': mx.cpu(0),
        'embedding_data': (2, 10),
        'type_dict': {
            'embedding_data': np.float64
        }
    }, {
        'ctx': mx.cpu(0),
        'embedding_data': (2, 10),
        'type_dict': {
            'embedding_data': np.float32
        }
    }, {
        'ctx': mx.cpu(0),
        'embedding_data': (2, 10),
        'type_dict': {
            'embedding_data': np.float16
        }
    }]
    check_consistency(sym,
                      ctx_list,
                      grad_req={
                          'embedding_data': 'null',
                          'embedding_weight': 'write'
                      })
Exemplo n.º 43
0
def test_elementwisesum_with_type():
    dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
                 [mx.cpu(0), [np.float64, np.float32]] ]
    for num_args in range(1, 6):
        ews_arg_shape = {}
        for i in range(num_args):
            ews_arg_shape['ews_arg'+str(i)] = (2, 10)
        sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
        ctx_list = []
        for dev, types in dev_types:
            for dtype in types:
                ews_arg_dtype = {'type_dict':{}}
                for i in range(num_args):
                    ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
                ctx_elem = {'ctx': dev}
                ctx_elem.update(ews_arg_shape)
                ctx_elem.update(ews_arg_dtype)
                ctx_list.append(ctx_elem)
    check_consistency(sym, ctx_list)
Exemplo n.º 44
0
def test_grid_generator_with_type():
    data = mx.sym.Variable('data')
    sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
    ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
                {'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
    check_consistency(sym, ctx_list)
    check_consistency(sym, ctx_list, grad_req="add")
    sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
    ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
                {'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
    check_consistency(sym, ctx_list)
    check_consistency(sym, ctx_list, grad_req="add")
Exemplo n.º 45
0
def test_take_with_type():
    sym = mx.sym.take(name='take')
    for data_ndim in range(2, 5):
        for idx_ndim in range(1, 4):
            data_shape = ()
            for _ in range(data_ndim):
                data_shape += (np.random.randint(low=3, high=6), )
            idx_shape = ()
            for _ in range(idx_ndim):
                idx_shape += (np.random.randint(low=3, high=5), ) 
            ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape, 
                         'take_a': data_shape, 
                         'type_dict': {'take_indices': np.float64, 
                                       'take_a': np.float64}},
                        {'ctx': mx.gpu(0), 'take_indices': idx_shape, 
                         'take_a': data_shape, 
                         'type_dict': {'take_indices': np.float32, 
                                       'take_a': np.float32}},
                        {'ctx': mx.gpu(0), 'take_indices': idx_shape, 
                         'take_a': data_shape, 
                         'type_dict': {'take_indices': np.float16, 
                                       'take_a': np.float16}},
                        {'ctx': mx.cpu(0), 'take_indices': idx_shape, 
                         'take_a': data_shape, 
                         'type_dict': {'take_indices': np.float64, 
                                       'take_a': np.float64}},
                        {'ctx': mx.cpu(0), 'take_indices': idx_shape, 
                         'take_a': data_shape, 
                         'type_dict': {'take_indices': np.float32, 
                                       'take_a': np.float32}},
                        {'ctx': mx.cpu(0), 'take_indices': idx_shape, 
                         'take_a': data_shape, 
                         'type_dict': {'take_indices': np.float16, 
                                       'take_a': np.float16}}]
            arg_params = {'take_indices': np.random.randint(low=0, 
                                                            high=data_shape[0], 
                                                            size=idx_shape), 
                          'take_a': np.random.normal(size=data_shape)}
            check_consistency(sym, ctx_list, 
                              grad_req={'take_indices': 'null',
                                        'take_a': 'write'},
                              arg_params=arg_params)
Exemplo n.º 46
0
def test_convolution_options():
    ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]

    sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
    check_consistency(sym, ctx_list)
    sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
    check_consistency(sym, ctx_list)
    sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
    check_consistency(sym, ctx_list)

    ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
    sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
    check_consistency(sym, ctx_list)
    sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
    check_consistency(sym, ctx_list)
Exemplo n.º 47
0
def test_convolution_versions():
    # 2D convolution NCHW
    ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
    conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
    conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
    conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
    conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
    conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
    syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
    check_consistency(syms, ctx_list)

    # 3D convolution NCDHW
    ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
    conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
    conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
    conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
    syms = [conv_cudnn, conv_cpu, conv_gpu]
    check_consistency(syms, ctx_list)
Exemplo n.º 48
0
def test_convolution_with_type():
    sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')

    data = mx.sym.Variable('conv_data')
    w = mx.sym.Variable('conv_weight')
    b = mx.sym.Variable('conv_bias')
    w = mx.sym.transpose(w, axes=(0,2,3,1))
    sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
    sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
    sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')

    sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
    ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
                # NHWC
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
                 'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
                 'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
                ]
    check_consistency(sym, ctx_list)
Exemplo n.º 49
0
def test_pooling_with_type():
    ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
                {'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
                {'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
                {'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
                {'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
    sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
    check_consistency(sym, ctx_list)

    sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
    check_consistency(sym, ctx_list)

    sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
    check_consistency(sym, ctx_list)
Exemplo n.º 50
0
def check_consistency_NxM(sym_list, ctx_list, arg_params=None):
    # e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
    # sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
    check_consistency(np.repeat(sym_list, len(ctx_list)),
                      ctx_list * len(sym_list),
                      arg_params=arg_params)