Beispiel #1
0
    def construct(self, m):
        stride = self.stride
        out_channels = self.out_channels
        x = m.input_slot('x')
        if self.preact_branch == 'single':
            r = x.to(BN(), name='bn1')
            r = r.to(ReLU(), inplace=True, name='relu1')
        else:
            if self.preact_branch == 'both':
                x = x.to(BN(), name='bn1')
                x = x.to(ReLU(), inplace=True, name='relu1')
            r = x

        r = r.to(Convolution(
            3, out_channels, stride=stride, pad=1, bias=False),
            name='conv1')
        r = r.to(BN(), name='bn2')
        r = r.to(ReLU(), inplace=True, name='relu2')
        r = r.to(Convolution(3, out_channels, pad=1, bias=False), name='conv2')

        if self.shortcut_type == 'conv':
            x = x.to(Convolution(
                1, out_channels, stride=stride, bias=False),
                name='shortcut')
        x = m.vars(x, r).to(Sum(), name='sum')

        m.output_slots = x.name
Beispiel #2
0
def conv_relu(x, kernel_size, num_outputs, stride=1, pad=0, dilation=1,
              w_policy=None, b_policy=None, name=None):  # yapf: disable
    x = x.to(Convolution(
        kernel_size, num_outputs, stride=stride, pad=pad, hole=dilation,
        w_policy=w_policy, b_policy=b_policy),
             name='conv' + name)
    x = x.to(ReLU(), inplace=True, name='relu' + name)
    return x
Beispiel #3
0
    def construct(self, m):
        x = m.input_slot('x')
        stride = 2 if self.shrink else 1
        r = (x.to(Convolution(3, self.out, stride=stride, pad=1, bias=False),
                  name='conv1').to(BN(), name='bn1').to(
                      ReLU(), inplace=True,
                      name='relu1').to(Convolution(3,
                                                   self.out,
                                                   pad=1,
                                                   bias=False),
                                       name='conv2').to(BN(), name='bn2'))
        if self.shrink:
            x = (x.to(Convolution(1, self.out, stride=2),
                      name='shrink').to(BN(), name='shrinkbn'))

        x = (m.vars(x, r).to(Sum(), name='sum').to(ReLU(),
                                                   inplace=True,
                                                   name='relu'))

        m.output_slots = x.name
Beispiel #4
0
def create_model(name="ResNet56",
                 input=32,
                 dropout=0.5,
                 depth=20,
                 nclass=62,
                 dataset='cifar'):
    """
    - dataset: "cifar", depth 20, 32, 44, 56, 110, 1202
    """
    main = GModule(name)
    main.input_slots = ('data', 'label')
    inputs = {
        'data': 'float32({}, {}, 3, _)'.format(input, input),
        'label': 'uint32(1, _)'
    }

    if dataset == 'cifar':
        assert (depth -
                2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110, 1202'
        n = (depth - 2) / 6

        x = (main.var('data').to(Convolution(3, 16, pad=1, bias=False),
                                 name='conv1'))

        for i in range(n):
            x = x.to(BasicBlock(16, False), name='a{}'.format(i))
        for i in range(n):
            x = x.to(BasicBlock(32, i == 0), name='b{}'.format(i))
        for i in range(n):
            x = x.to(BasicBlock(64, i == 0), name='c{}'.format(i))

        x = (x.to(BN(), name='last').to(ReLU(), inplace=True, name='relu'))
        x = x.to(GlobalPooling('ave'), name='pool')
        x = (x.to(Dropout(dropout), name='dropout').to(FullyConnected(nclass),
                                                       name='fc'))

    else:
        raise ValueError('unsupported dataset: ' + dataset)

    main.vars('fc', 'label').to(SoftmaxWithLoss(), name='loss')
    main.vars('fc', 'label').to(Accuracy(1), name='accuracy_top1')
    if nclass > 100:
        main.vars('fc', 'label').to(Accuracy(5), name='accuracy_top5')

    return main.compile(inputs=inputs)
Beispiel #5
0
def creat_bb_model(depth=101, input_size=224, num_classes=1000, name=None, trans_size=1200):
    cfg = {
        18: (BasicBlock, [(2, 64), (2, 128), (2, 256), (2, 512)]),
        34: (BasicBlock, [(3, 64), (4, 128), (6, 256), (3, 512)]),
        50: (Bottleneck, [(3, 64), (4, 128), (6, 256), (3, 512)]),
        101: (Bottleneck, [(3, 64), (4, 128), (23, 256), (3, 512)]),
    }

    assert depth in cfg

    if name is None:
        name = 'resnet-{}'.format(depth)
    main = GModule(name)
    inputs = {
        'data': 'float32({}, {}, 3, _)'.format(input_size, input_size),
        'label': 'uint32(1, _)',
        'coarse_label': 'uint32(1,_)'
    }
    main.input_slots = tuple(inputs.keys())

    x = main.var('data')
    x = x.to(Convolution(7, 64, stride=2, pad=3, bias=False), name='conv1')
    x = x.to(BN(), name='bn1')
    x = x.to(ReLU(), inplace=True, name='relu1')
    x = x.to(Pooling('max', 3, stride=2), name='pool1')

    block, params = cfg[depth]

    for i, (num, out_channels) in enumerate(params):
        stride = 1 if i == 0 else 2
        preact_branch = 'none' if i == 0 else 'both'
        if i <= 2:
            x = x.to(block(out_channels, stride, preact_branch, 'conv'),
                     name='res{}a'.format(i + 2))
            for j in range(1, num):
                x = x.to(block(out_channels, 1), name='res{}b{}'.format(i + 2, j))
        if i == 2:
            x1, x2 = x.to(MySplit(), name='luzai.split')

            branch = x2
            branch = branch.to(block(512, 2, 'both', 'conv'), name='luzai.res{}a'.format(i + 2))
            for j in range(1, 2):
                branch = branch.to(block(512, 1, 'both', 'conv'), name='luzai.res{}b{}'.format(i + 2, j))
            branch = branch.to(BN(), name='luzai.bn{}'.format(len(params) + 1))
            branch = branch.to(ReLU(), inplace=True, name='luzai.relu{}'.format(len(params) + 1))
            branch = branch.to(Pooling('ave', 7), name='luzai.pool{}'.format(len(params) + 1))
            branch = branch.to(Dropout(0.5), inplace=True, name='luzai.dp')
            branch = branch.to(FullyConnected(1000), name='luzai.coarse.cls')

            main.vars(branch, 'coarse_label').to(SoftmaxWithLoss(), name='coarse_loss')
            main.vars(branch, 'coarse_label').to(Accuracy(5), name='coarse_accuracy_top5')
            main.vars(branch, 'coarse_label').to(Accuracy(1), name='coarse_accuracy_top1')


            x = x1

        if i > 2:
            x = x.to(block(out_channels, stride, preact_branch, 'conv'),
                     name='luzai.main.res{}a'.format(i + 2))
            for j in range(1, num):
                x = x.to(block(out_channels, 1), name='luzai.main.res{}b{}'.format(i + 2, j))

    num_stages = len(params) + 1
    x = x.to(BN(), name='luzai.main.bn{}'.format(num_stages))
    x = x.to(ReLU(), inplace=True, name='relu{}'.format(num_stages))

    x = x.to(Convolution(3, trans_size, pad=0, bias=False, stride=2), name='luzai.conv')
    x = x.to(BN(), name='luzai.bn')
    x = x.to(ReLU(), inplace=True, name='luzai.relu')

    # x = x.to(Pooling('ave', 7), name='pool{}'.format(num_stages))

    x = x.to(Dropout(0.5), inplace=True, name='dropout')
    x = x.to(FullyConnected(
        num_classes,
    ),
        name='luzai.cls')
    x.to(Softmax(), name='prob')
    main.vars(x, 'label').to(SoftmaxWithLoss(), name='loss')
    main.vars(x, 'label').to(Accuracy(1, ), name='accuracy_top1')
    main.vars(x, 'label').to(Accuracy(5, ), name='accuracy_top5')
    model = main.compile(inputs=inputs, seal=False)
    model.add_flow('main',
                   inputs.keys(), ['loss', 'accuracy_top1', 'accuracy_top5', 'coarse_loss', 'coarse_accuracy_top1','coarse_accuracy_top5'],
                   ['loss', 'coarse_loss'])
    model.seal()
    return model
Beispiel #6
0
def create_model():
    mod = GModule('vgg16')
    inputs = {
        'data': 'float32(480, 480, 3, _)',
        'label': 'uint32(480, 480, 1, _)',
        'label_weight': 'float32(480, 480, 1, _)',
    }
    mod.input_slots = tuple(inputs.keys())
    x = mod.var('data')

    # conv 1
    x = conv_relu(x, 3, 64, pad=1, name='1_1')
    x = conv_relu(x, 3, 64, pad=1, name='1_2')
    x = x.to(Pooling('max', 2, stride=2), name='pool1')

    # conv2
    x = conv_relu(x, 3, 128, pad=1, name='2_1')
    x = conv_relu(x, 3, 128, pad=1, name='2_2')
    x = x.to(Pooling('max', 2, stride=2), name='pool2')

    # conv3
    x = conv_relu(x, 3, 256, pad=1, name='3_1')
    x = conv_relu(x, 3, 256, pad=1, name='3_2')
    x = conv_relu(x, 3, 256, pad=1, name='3_3')
    x = x.to(Pooling('max', 2, stride=2), name='pool3')

    # conv4 
    # set pad to be 1, feature map does not shrink
    x = conv_relu(x, 3, 512, pad=1, name='4_1')
    x = conv_relu(x, 3, 512, pad=1, name='4_2')
    x = conv_relu(x, 3, 512, pad=1, name='4_3')
    x = x.to(Pooling('max', 3, pad=1, stride=1), name='pool4')

    # conv5, use atrous convolution to dense extract features
    x = conv_relu(x, 3, 512, pad=2, dilation=2, name='5_1')
    x = conv_relu(x, 3, 512, pad=2, dilation=2, name='5_2')
    x = conv_relu(x, 3, 512, pad=2, dilation=2, name='5_3')
    x = x.to(Pooling('max', 3, pad=1, stride=1), name='pool5')
    x = x.to(Pooling('ave', 3, pad=1, stride=1), name='pool5a')

    # conv_fc6, atrous convolution
    x = conv_relu(x, 7, 4096, pad=12, stride=1, dilation=4, name='_fc6')
    x = x.to(Dropout(0.5), inplace=True, name='drop6')

    # conv_fc7
    x = conv_relu(x, 1, 4096, name='_fc7')
    x = x.to(Dropout(0.5), inplace=True, name='drop7')

    x = x.to(Convolution(1, 21, w_policy={'init': 'gauss(0.01)'}, b_policy={'init': 'fill(0)'}),  name='conv_fc8')

    # deconvolution (bilinear interpolation) used to upsampling
    x = x.to(Deconvolution(16, 21, pad=4, stride=8, bias=False,
                           w_policy={'init': 'fill(1)', 'lr_mult': '0', 'decay_mult': '0'}),
              name='upscore2')

    # softmax layer
    mod.vars(x, 'label', 'label_weight').to(SoftmaxWithLoss(axis=2), name='loss')
    model = mod.compile(inputs=inputs, seal=False)
    model.add_flow('main',
                   inputs.keys(), ['loss', 'accuracy_top1', 'accuracy_top5'],
                   ['loss'])
    model.seal()
    return model
Beispiel #7
0
def create_model(depth=101, input_size=224, num_classes=1000, name=None):
    cfg = {
        18: (BasicBlock, [(2, 64), (2, 128), (2, 256), (2, 512)]),
        34: (BasicBlock, [(3, 64), (4, 128), (6, 256), (3, 512)]),
        50: (Bottleneck, [(3, 64), (4, 128), (6, 256), (3, 512)]),
        101: (Bottleneck, [(3, 64), (4, 128), (23, 256), (3, 512)]),
        '101_10k': (Bottleneck, [(3, 64), (4, 256), (23, 512), (3, 2560)]),
        152: (Bottleneck, [(3, 64), (8, 128), (36, 256), (3, 512)]),
        200: (Bottleneck, [(3, 64), (24, 128), (36, 256), (3, 512)]),
    }

    assert depth in cfg

    if name is None:
        name = 'resnet-{}'.format(depth)
    main = GModule(name)
    inputs = {
        'data': 'float32({}, {}, 3, _)'.format(input_size, input_size),
        'label': 'uint32(1, _)'
    }
    main.input_slots = tuple(inputs.keys())

    x = main.var('data')
    x = x.to(Convolution(7, 64, stride=2, pad=3, bias=False), name='conv1')
    x = x.to(BN(), name='bn1')
    x = x.to(ReLU(), inplace=True, name='relu1')
    x = x.to(Pooling('max', 3, stride=2), name='pool1')

    block, params = cfg[depth]

    for i, (num, out_channels) in enumerate(params):
        stride = 1 if i == 0 else 2
        preact_branch = 'none' if i == 0 else 'both'
        x = x.to(block(out_channels, stride, preact_branch, 'conv'),
                 name='res{}a'.format(i + 2))
        for j in range(1, num):
            x = x.to(block(out_channels, 1), name='res{}b{}'.format(i + 2, j))

    num_stages = len(params) + 1
    x = x.to(BN(), name='bn{}'.format(num_stages))
    x = x.to(ReLU(), inplace=True, name='relu{}'.format(num_stages))
    x = x.to(Pooling('ave', 7), name='pool{}'.format(num_stages))
    x = x.to(Dropout(0.5), inplace=True, name='dropout')
    x = x.to(FullyConnected(
        num_classes,
        w_policy={'init': 'gauss(0.01)',
                  'decay_mult': '1'},
        b_policy={'init': 'fill(0)',
                  'decay_mult': '1',
                  'lr_mult': '1'}
    ),
        name='luzai.cls')

    x.to(Softmax(), name='prob')
    main.vars(x, 'label').to(SoftmaxWithLoss(), name='loss')
    main.vars(x, 'label').to(Accuracy(1), name='accuracy_top1')
    main.vars(x, 'label').to(Accuracy(5), name='accuracy_top5')
    model = main.compile(inputs=inputs, seal=False)
    model.add_flow('main',
                   inputs.keys(), ['loss', 'accuracy_top1', 'accuracy_top5'],
                   ['loss'])
    model.seal()
    return main.compile(inputs=inputs)