Esempio n. 1
0
def get_super_resolution():
    factor = 3
    size = 224
    data = sym.Variable(name='9')
    conv1 = sym.conv2d(data,
                       channels=64,
                       kernel_size=(5, 5),
                       padding=(2, 2),
                       use_bias=False)
    relu1 = sym.relu(conv1 + sym.Variable(name='2'))
    conv2 = sym.conv2d(relu1,
                       channels=64,
                       kernel_size=(3, 3),
                       padding=(1, 1),
                       use_bias=False)
    relu2 = sym.relu(conv2 + sym.Variable(name='4'))
    conv3 = sym.conv2d(relu2,
                       channels=32,
                       kernel_size=(3, 3),
                       padding=(1, 1),
                       use_bias=False)
    relu3 = sym.relu(conv3 + sym.Variable(name='6'))
    conv4 = sym.conv2d(relu3,
                       channels=factor**2,
                       kernel_size=(3, 3),
                       padding=(1, 1),
                       use_bias=False)
    conv4 = conv4 + sym.Variable(name='8')
    # TODO(zhreshold): allow shape inference for batch size > 1
    r1 = sym.reshape(conv4, shape=(1, 1, factor, factor, size, size))
    t1 = sym.transpose(r1, axes=(0, 1, 4, 2, 5, 3))
    r2 = sym.reshape(t1, shape=(1, 1, size * factor, size * factor))
    return r2
Esempio n. 2
0
def with_nnvm(nwarmup: int,
              nloops: int,
              args,
              lam,
              params={},
              verbose: bool = False,
              opt_level: int = 2,
              debug: bool = False) -> Result:
    """ Take numpy arrays as args, convert them to TVM tensors and call `lam`.
  Result of lambda is converted back to numpy array and returned.
  """
    runtime = graph_runtime if not debug else debug_runtime

    tgt = 'llvm'
    ctx = tvm.cpu(0)
    inps = []
    ishapes = {}
    itypes = {}
    idata = {}
    for i, arg in enumerate(args):
        nm = 'pl' + str(i)
        inps.append(sym.Variable(name=nm))
        ishapes.update({nm: arg.shape})
        idata.update({nm: arg})
        itypes.update({nm: "float32"})

    out = lam(*inps)
    with nnvm.compiler.build_config(opt_level=opt_level):
        graph, lib, _ = nnvm.compiler.build(out, tgt, ishapes)

    forward_graph,_,_,out_shapes,out_types = \
        infer_shapes_dtypes(nnvm.graph.create(out), shape=ishapes, dtype=itypes, fallback_dtype='float32')

    out_nd = tvm.nd.array(np.zeros(out_shapes[0], dtype=out_types[0]), ctx)
    m = runtime.create(graph, lib, ctx)
    m.set_input(**idata)
    m.set_input(**params)

    perfs: List[float] = []
    for i in range(nwarmup + nloops):
        tb = perf_counter()
        m.run()
        te = perf_counter()
        if i >= nwarmup:
            perfs.append(te - tb)
        if verbose:
            print("NNVM", te - tb)
    out_nd = m.get_output(
        0, tvm.nd.empty(shape=out_shapes[0], dtype=out_types[0], ctx=ctx))
    return Result.fromPasses(out_nd.asnumpy(), perfs)
Esempio n. 3
0
def nnvm_conv_test(nblocks=200,ks=1,w=54,h=6,c=256,verbose:bool=False,opt_level:int=2):
  """ Test convolution performance for different shape """

  shape=(1,h,w,c)
  kshape=(ks,ks,c,c)
  x=_sym.Variable(init=np.zeros(shape=shape),name='x')
  k=_sym.Variable(init=np.zeros(shape=kshape),name='k')
  t=x

  def _print_shape(t):
    print(run_nnvm(0,1,
        {x:np.zeros(shape=shape),
         k:np.zeros(shape=kshape)},
        t).last_data.shape)

  t=_sym.conv2d(t,k,
      dilation=(1,1),
      layout="NHWC",
      strides=(1,1),
      padding=[0,0],
      kernel_size=(ks,ks),
      channels=c,
      kernel_layout="HWIO",
      name="conv1",
      use_bias=False)

  # print(t.__dir__())
  # print(t.list_attr())

  t=_sym.strided_slice(t,begin=[0,0,0,0],end=[1,1,1,c])
  t=_sym.expand_like(t,x,axis=[1,2])

  r=run_nnvm(1,15,
    {x:np.zeros(shape=shape)
    ,k:np.zeros(shape=kshape)}, t,
    verbose=verbose,
    opt_level=opt_level)
Esempio n. 4
0
def block2_nnvm_run(nblocks:int=1,nwarmup:int=0,nloops:int=1,init_method='zeros',verbose=True,debug=False,**kwargs):
  # sym_149080784 = tf.placeholder(shape=(1,108,21,32),dtype=tf.float32)
  print("Warning: unused args:", kwargs) if kwargs != {} else None
  inp=_sym.Variable(name='inp', init=np.zeros(shape=(1,54,6,192)))

  block2,blockvars=block2_block_nnvm(nblocks,inp)
  block2_params={sym:MODEL_PARAMS[k] for (k,sym) in blockvars.items()}
  block2_params.update({inp:common_init('zeros',(1,54,6,192),np.float32)})

  r=run_nnvm(
      nwarmup,nloops,
      block2_params,
      block2,
      verbose=verbose,
      debug=debug)

  return r
Esempio n. 5
0
def get_super_resolution_deprecated():
    factor = 3
    size = 224
    data = sym.Variable(name='9')
    conv1 = sym.conv2d(data, channels=64, kernel_size=(5, 5), padding=(2, 2))
    relu1 = sym.relu(conv1)
    conv2 = sym.conv2d(relu1, channels=64, kernel_size=(3, 3), padding=(1, 1))
    relu2 = sym.relu(conv2)
    conv3 = sym.conv2d(relu2, channels=32, kernel_size=(3, 3), padding=(1, 1))
    relu3 = sym.relu(conv3)
    conv4 = sym.conv2d(relu3,
                       channels=factor**2,
                       kernel_size=(3, 3),
                       padding=(1, 1))
    r1 = sym.reshape(conv4, shape=(0, 1, factor, factor, size, size))
    t1 = sym.transpose(r1, axes=(0, 1, 4, 2, 5, 3))
    r2 = sym.reshape(t1, shape=(0, 1, size * factor, size * factor))
    return r2
Esempio n. 6
0
def get_symbol(num_classes=1000, **kwargs):
    data = sym.Variable(name="data")
    # stage 1
    conv = Conv(data, 32, kernel=(3, 3), stride=(2, 2), name="conv")
    conv_1 = Conv(conv, 32, kernel=(3, 3), name="conv_1")
    conv_2 = Conv(conv_1, 64, kernel=(3, 3), pad=(1, 1), name="conv_2")
    pool = Pooling(data=conv_2,
                   kernel=(3, 3),
                   stride=(2, 2),
                   pool_type="max",
                   pad=(0, 0),
                   name="pool")
    # stage 2
    conv_3 = Conv(pool, 80, kernel=(1, 1), name="conv_3")
    conv_4 = Conv(conv_3, 192, kernel=(3, 3), name="conv_4")
    pool1 = Pooling(data=conv_4,
                    kernel=(3, 3),
                    stride=(2, 2),
                    pool_type="max",
                    pad=(0, 0),
                    name="pool1")

    # stage 3
    in3a = Inception7A(pool1, 64, 64, 96, 96, 48, 64, "avg", 32, "mixed")
    in3b = Inception7A(in3a, 64, 64, 96, 96, 48, 64, "avg", 64, "mixed_1")
    in3c = Inception7A(in3b, 64, 64, 96, 96, 48, 64, "avg", 64, "mixed_2")
    in3d = Inception7B(in3c, 384, 64, 96, 96, "max", "mixed_3")
    # stage 4
    in4a = Inception7C(in3d, 192, 128, 128, 192, 128, 128, 128, 128, 192,
                       "avg", 192, "mixed_4")
    in4b = Inception7C(in4a, 192, 160, 160, 192, 160, 160, 160, 160, 192,
                       "avg", 192, "mixed_5")
    in4c = Inception7C(in4b, 192, 160, 160, 192, 160, 160, 160, 160, 192,
                       "avg", 192, "mixed_6")
    in4d = Inception7C(in4c, 192, 192, 192, 192, 192, 192, 192, 192, 192,
                       "avg", 192, "mixed_7")
    in4e = Inception7D(in4d, 192, 320, 192, 192, 192, 192, "max", "mixed_8")
    # stage 5
    in5a = Inception7E(in4e, 320, 384, 384, 384, 448, 384, 384, 384, "avg",
                       192, "mixed_9")
    in5b = Inception7E(in5a, 320, 384, 384, 384, 448, 384, 384, 384, "max",
                       192, "mixed_10")
    return in5b
Esempio n. 7
0
def get_symbol(num_classes, num_layers=11, batch_norm=False):
    """
    Parameters
    ----------
    num_classes : int, default 1000
        Number of classification classes.
    num_layers : int
        Number of layers for the variant of densenet. Options are 11, 13, 16, 19.
    batch_norm : bool, default False
        Use batch normalization.
    """
    vgg_spec = {11: ([1, 1, 2, 2, 2], [64, 128, 256, 512, 512]),
                13: ([2, 2, 2, 2, 2], [64, 128, 256, 512, 512]),
                16: ([2, 2, 3, 3, 3], [64, 128, 256, 512, 512]),
                19: ([2, 2, 4, 4, 4], [64, 128, 256, 512, 512]),
                1:([1,0,0,0,0],[64,0,0,0,0])}
    if num_layers not in vgg_spec:
        raise ValueError("Invalide num_layers {}. Choices are 11,13,16,19.".format(num_layers))
    layers, filters = vgg_spec[num_layers]
    data = sym.Variable(name="data")
    feature = get_feature(data, layers, filters, batch_norm)

    return feature
Esempio n. 8
0
def block1_block_nnvm_consts():
    return {
        "sym_75044384":
        _sym.Variable(name="Rcnn_ctcV3/expand_conv1/conv2d_4/kernel",
                      shape=(1, 1, 32, 64)),
        "sym_73427696":
        _sym.Variable(name="Rcnn_ctcV3/expand_conv1/conv2d_4/bias",
                      shape=(64, )),
        "sym_223382672":
        _sym.Variable(
            name="Rcnn_ctcV3/expand_conv1/activation/conv2d_6/kernel",
            shape=(1, 1, 64, 64)),
        "sym_356827536":
        _sym.Variable(name="Rcnn_ctcV3/expand_conv1/activation/conv2d_6/bias",
                      shape=(64, )),
        "sym_451228704":
        _sym.Variable(name="Rcnn_ctcV3/expand_conv1/conv2d_5/kernel",
                      shape=(3, 3, 32, 64)),
        "sym_88828560":
        _sym.Variable(name="Rcnn_ctcV3/expand_conv1/conv2d_5/bias",
                      shape=(64, )),
        "sym_379167584":
        _sym.Variable(
            name="Rcnn_ctcV3/expand_conv1/static_batch_normalization_3/gamma",
            shape=(64, )),
        "sym_492256464":
        _sym.Variable(
            name="Rcnn_ctcV3/expand_conv1/static_batch_normalization_3/beta",
            shape=(64, )),
        "sym_104779696":
        _sym.Variable(
            name=
            "Rcnn_ctcV3/expand_conv1/static_batch_normalization_3/moving_mean",
            shape=(64, )),
        "sym_378983504":
        _sym.Variable(
            name=
            "Rcnn_ctcV3/expand_conv1/static_batch_normalization_3/moving_variance",
            shape=(64, )),
        "sym_73418512":
        _sym.Variable(
            name=
            "Rcnn_ctcV3/expand_conv1/static_batch_normalization_3/batchnorm/add/y",
            shape=(1, )),
        "sym_134609696":
        _sym.Variable(
            name="Rcnn_ctcV3/expand_conv1/activation/conv2d_7/kernel",
            shape=(1, 1, 64, 64)),
        "sym_131967104":
        _sym.Variable(name="Rcnn_ctcV3/expand_conv1/activation/conv2d_7/bias",
                      shape=(64, )),
        "sym_473740336":
        _sym.Variable(name="Rcnn_ctcV3/expand_conv1/activation/max_2/mul/x",
                      shape=(1, )),
        "sym_112766576":
        _sym.Variable(name="Rcnn_ctcV3/expand_conv1/conv2d_8/kernel",
                      shape=(3, 3, 64, 64)),
        "sym_105635760":
        _sym.Variable(name="Rcnn_ctcV3/expand_conv1/conv2d_8/bias",
                      shape=(64, ))
    }
Esempio n. 9
0
 def get_var(self, name, must_contain=True):
     if must_contain:
         assert name in self.vars
     if name not in self.vars:
         self.vars[name] = _sym.Variable(name=name)
     return self.vars[name]
Esempio n. 10
0
 def new_const(self, value):
     name = "_param_%d" % (self.const_ctr)
     self.const_ctr += 1
     self.params[name] = value
     self.vars[name] = _sym.Variable(name=name)
     return self.vars[name]
Esempio n. 11
0
def resnet(units,
           num_stages,
           filter_list,
           num_classes,
           image_shape,
           bottle_neck=True):
    """Return ResNet symbol of
    Parameters
    ----------
    units : list
        Number of units in each stage
    num_stages : int
        Number of stage
    filter_list : list
        Channel size of each stage
    num_classes : int
        Ouput size of symbol
    dataset : str
        Dataset type, only cifar10 and imagenet supports
    """
    num_unit = len(units)
    assert num_unit == num_stages
    data = sym.Variable(name='data')

    #data = sym.batch_norm(data=data, epsilon=2e-5, scale=False, name='bn_data',axis=3)
    (_, height, _) = image_shape
    if height <= 32:  # such as cifar10
        body = sym.conv2d(data=data,
                          channels=filter_list[0],
                          kernel_size=(3, 3),
                          strides=(1, 1),
                          padding=(1, 1),
                          use_bias=False,
                          name="conv0")
    else:  # often expected to be 224 such as imagenet
        body = sym.conv2d(data=data,
                          channels=filter_list[0],
                          kernel_size=(7, 7),
                          strides=(2, 2),
                          padding=(3, 3),
                          use_bias=False,
                          name="conv0")
        body = sym.batch_norm(data=body, epsilon=2e-5, name='bn0', axis=3)
        body = sym.relu(data=body, name='relu0')
        body = sym.max_pool2d(data=body,
                              pool_size=(3, 3),
                              strides=(2, 2),
                              padding=(1, 1),
                              layout="NHWC")
    #body = residual_unit2(
    #        body, filter_list[1], (1,1),
    #        False, name='stage%d_unit%d' % (1, 1), bottle_neck=bottle_neck,filter_last=filter_list[0])

    for i in range(num_stages):
        body = residual_unit(body,
                             filter_list[i + 1],
                             (1 if i == 0 else 2, 1 if i == 0 else 2),
                             False,
                             name='stage%d_unit%d' % (i + 1, 1),
                             bottle_neck=bottle_neck,
                             last_filter=filter_list[i])
        for j in range(units[i] - 1):
            body = residual_unit(body,
                                 filter_list[i + 1], (1, 1),
                                 True,
                                 name='stage%d_unit%d' % (i + 1, j + 2),
                                 bottle_neck=bottle_neck,
                                 last_filter=filter_list[i + 1])

    bn1 = sym.batch_norm(data=body, epsilon=2e-5, name='bn1', axis=3)
    relu1 = sym.relu(data=bn1, name='relu1')

    return relu1
Esempio n. 12
0
def _get_model(dshape):
    data = sym.Variable('data', shape=dshape)
    fc1 = sym.dense(data, units=dshape[-1] * 2, use_bias=True)
    left, right = sym.split(fc1, indices_or_sections=2, axis=1)
    return sym.Group(((left + 1), (right - 1)))
Esempio n. 13
0
 def addvar(name,shape):
   nonlocal varnames
   s = _sym.Variable(name=name,shape=shape)
   varnames.update({name:s})
   return s
Esempio n. 14
0
def Variable(name, **kwargs):
    if name in _var_values:
        kwargs["init"] = _var_values[name]
    var = sym.Variable(name, **kwargs)
    _global_vars[name] = var
    return var
Esempio n. 15
0
def drn(arch,
        block,
        layers,
        num_classes=1000,
        channels=(16, 32, 64, 128, 256, 512, 512, 512)):
    data = sym.Variable(name='data')
    if arch == 'C':
        out = data
        out = sym.conv2d(data=out,
                         channels=channels[0],
                         kernel_size=(7, 7),
                         strides=(1, 1),
                         padding=(3, 3),
                         use_bias=False)
        out = int8_wrapper(sym.batch_norm, data=out)
        out = sym.relu(data=out)

        out = drn_unit(out,
                       basic_block,
                       channels[0],
                       channels[0],
                       layers[0],
                       stride=1)
        out = drn_unit(out,
                       basic_block,
                       channels[0],
                       channels[1],
                       layers[1],
                       stride=2)
        num_channel = channels[1]
    else:
        raise NotImplementedError()

    out = drn_unit(out, block, num_channel, channels[2], layers[2], stride=2)
    out = drn_unit(out, block, channels[2], channels[3], layers[3], stride=2)
    out = drn_unit(out,
                   block,
                   channels[3],
                   channels[4],
                   layers[4],
                   dilation=2,
                   new_level=False)

    num_channel = channels[4]
    if layers[5] > 0:
        out = drn_unit(out,
                       block,
                       num_channel,
                       channels[5],
                       layers[5],
                       dilation=4,
                       new_level=False)
        num_channel = channels[5]

    if arch == 'C':
        if layers[6] > 0:
            out = drn_unit(out,
                           block,
                           num_channel,
                           channels[6],
                           layers[6],
                           dilation=2,
                           new_level=False,
                           residual=False)
            num_channel = channels[6]
        if layers[7] > 0:
            out = drn_unit(out,
                           block,
                           num_channel,
                           channels[7],
                           layers[7],
                           dilation=1,
                           new_level=False,
                           residual=False)
            num_channel = channels[7]

    return out