def __init__(self, n_layers, in_size, out_size, dropout, use_bi_direction, **kwargs): argument.check_unexpected_kwargs( kwargs, use_cudnn='use_cudnn argument is not supported anymore. ' 'Use chainer.using_config') argument.assert_kwargs_empty(kwargs) weights = [] direction = 2 if use_bi_direction else 1 for i in six.moves.range(n_layers): for di in six.moves.range(direction): weight = link.Link() with weight.init_scope(): for j in six.moves.range(6): if i == 0 and j < 3: w_in = in_size elif i > 0 and j < 3: w_in = out_size * direction else: w_in = out_size w = variable.Parameter( normal.Normal(numpy.sqrt(1. / w_in)), (out_size, w_in)) b = variable.Parameter(0, (out_size, )) setattr(weight, 'w%d' % j, w) setattr(weight, 'b%d' % j, b) w_in = out_size w = variable.Parameter( normal.Normal(numpy.sqrt(1. / w_in)), (out_size, w_in)) b = variable.Parameter(0, (out_size, )) setattr(weight, 'w%d' % 6, w) setattr(weight, 'b%d' % 6, b) w_in = out_size w = variable.Parameter( normal.Normal(numpy.sqrt(1. / w_in)), (out_size, w_in)) b = variable.Parameter(0, (out_size, )) setattr(weight, 'w%d' % 7, w) setattr(weight, 'b%d' % 7, b) w_in = out_size * 2 w = variable.Parameter( normal.Normal(numpy.sqrt(1. / w_in)), (out_size, w_in)) b = variable.Parameter(0, (out_size, )) setattr(weight, 'w%d' % 8, w) setattr(weight, 'b%d' % 8, b) weights.append(weight) super(NStepGRUBase, self).__init__(*weights) self.n_layers = n_layers self.dropout = dropout self.out_size = out_size self.direction = direction self.rnn = rnn.n_step_bigru if use_bi_direction else rnn.n_step_gru
def __init__(self, pretrained_model='auto'): if pretrained_model: # As a sampling process is time-consuming, # we employ a zero initializer for faster computation. init = constant.Zero() conv_kwargs = {'initialW': init, 'initial_bias': init} fc_kwargs = conv_kwargs else: # employ default initializers used in the original paper conv_kwargs = { 'initialW': normal.Normal(0.01), 'initial_bias': constant.Zero(), } fc_kwargs = { 'initialW': normal.Normal(0.005), 'initial_bias': constant.One(), } super(C3DVersion1, self).__init__( conv1a=ConvolutionND(3, 3, 64, 3, 1, 1, **conv_kwargs), conv2a=ConvolutionND(3, 64, 128, 3, 1, 1, **conv_kwargs), conv3a=ConvolutionND(3, 128, 256, 3, 1, 1, **conv_kwargs), conv3b=ConvolutionND(3, 256, 256, 3, 1, 1, **conv_kwargs), conv4a=ConvolutionND(3, 256, 512, 3, 1, 1, **conv_kwargs), conv4b=ConvolutionND(3, 512, 512, 3, 1, 1, **conv_kwargs), conv5a=ConvolutionND(3, 512, 512, 3, 1, 1, **conv_kwargs), conv5b=ConvolutionND(3, 512, 512, 3, 1, 1, **conv_kwargs), fc6=Linear(512 * 4 * 4, 4096, **fc_kwargs), fc7=Linear(4096, 4096, **fc_kwargs), fc8=Linear(4096, 101, **fc_kwargs), ) if pretrained_model == 'auto': _retrieve( 'conv3d_deepnetA_ucf.npz', 'http://vlg.cs.dartmouth.edu/c3d/' 'c3d_ucf101_finetune_whole_iter_20000', self) elif pretrained_model: npz.load_npz(pretrained_model, self) self.functions = collections.OrderedDict([ ('conv1a', [self.conv1a, relu]), ('pool1', [_max_pooling_2d]), ('conv2a', [self.conv2a, relu]), ('pool2', [_max_pooling_3d]), ('conv3a', [self.conv3a, relu]), ('conv3b', [self.conv3b, relu]), ('pool3', [_max_pooling_3d]), ('conv4a', [self.conv4a, relu]), ('conv4b', [self.conv4b, relu]), ('pool4', [_max_pooling_3d]), ('conv5a', [self.conv5a, relu]), ('conv5b', [self.conv5b, relu]), ('pool5', [_max_pooling_3d]), ('fc6', [self.fc6, relu, dropout]), ('fc7', [self.fc7, relu, dropout]), ('fc8', [self.fc8]), ('prob', [softmax]), ])
def __init__(self, num_inter, num_out): kwargs = { 'initialW': normal.Normal(0.01), 'initial_bias': constant.Zero(), } super(VGG16BN, self).__init__() with self.init_scope(): self.block1_1 = Block(64, 3) self.block1_2 = Block(64, 3) self.block2_1 = Block(128, 3) self.block2_2 = Block(128, 3) self.block3_1 = Block(256, 3) self.block3_2 = Block(256, 3) self.block3_3 = Block(256, 3) self.block4_1 = Block(512, 3) self.block4_2 = Block(512, 3) self.block4_3 = Block(512, 3) self.block5_1 = Block(512, 3) self.block5_2 = Block(512, 3) self.block5_3 = Block(512, 3) self.fc1 = L.Linear(None, num_inter, **kwargs) self.bn_fc1 = L.BatchNormalization(num_inter) self.fc2 = L.Linear(None, num_inter, **kwargs) self.bn_fc2 = L.BatchNormalization(num_inter) self.fc3 = L.Linear(None, num_out, **kwargs)
def __init__(self, num_inter, num_out): kwargs = { 'initialW': normal.Normal(0.01), 'initial_bias': constant.Zero(), } self.num_out = num_out super(VGG16, self).__init__() with self.init_scope(): self.conv1_1 = Convolution2D(3, 64, 3, 1, 1, **kwargs) self.conv1_2 = Convolution2D(64, 64, 3, 1, 1, **kwargs) self.conv2_1 = Convolution2D(64, 128, 3, 1, 1, **kwargs) self.conv2_2 = Convolution2D(128, 128, 3, 1, 1, **kwargs) self.conv3_1 = Convolution2D(128, 256, 3, 1, 1, **kwargs) self.conv3_2 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv3_3 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv4_1 = Convolution2D(256, 512, 3, 1, 1, **kwargs) self.conv4_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv4_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_1 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.fc6 = Linear(512 * 7 * 7, num_inter, **kwargs) self.fc7 = Linear(num_inter, num_inter, **kwargs) self.fc8 = Linear(num_inter, num_out, **kwargs)
def __init__(self, num_inter, num_out, dropout_ratio=.5): kwargs = { 'initialW': normal.Normal(0.01), 'initial_bias': constant.Zero(), } super(VGG16BNFC3, self).__init__() with self.init_scope(): self.conv1_1 = Convolution2D(3, 64, 3, 1, 1, **kwargs) self.conv1_2 = Convolution2D(64, 64, 3, 1, 1, **kwargs) self.conv2_1 = Convolution2D(64, 128, 3, 1, 1, **kwargs) self.conv2_2 = Convolution2D(128, 128, 3, 1, 1, **kwargs) self.conv3_1 = Convolution2D(128, 256, 3, 1, 1, **kwargs) self.conv3_2 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv3_3 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv4_1 = Convolution2D(256, 512, 3, 1, 1, **kwargs) self.conv4_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv4_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_1 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.bn1 = L.BatchNormalization(512) self.fc6 = Linear(512 * 7 * 7, num_inter, **kwargs) self.bn2 = L.BatchNormalization(num_inter) self.fc7 = Linear(num_inter, num_inter, **kwargs) self.bn3 = L.BatchNormalization(num_inter) self.fc8 = Linear(num_inter, num_out, **kwargs) self.dropout_ratio = dropout_ratio
def __init__(self, n_class=None, pretrained_model=None, mean=None, initialW=None, initial_bias=None): if n_class is None: if pretrained_model in self._models: n_class = self._models[pretrained_model]['n_class'] else: n_class = 1000 if mean is None: if pretrained_model in self._models: mean = self._models[pretrained_model]['mean'] else: mean = _imagenet_mean self.mean = mean if initialW is None: # Employ default initializers used in the original paper. initialW = normal.Normal(0.01) if pretrained_model: # As a sampling process is time-consuming, # we employ a zero initializer for faster computation. initialW = constant.Zero() kwargs = {'initialW': initialW, 'initial_bias': initial_bias} super(VGG16, self).__init__() with self.init_scope(): self.conv1_1 = Conv2DActiv(None, 64, 3, 1, 1, **kwargs) self.conv1_2 = Conv2DActiv(None, 64, 3, 1, 1, **kwargs) self.pool1 = _max_pooling_2d self.conv2_1 = Conv2DActiv(None, 128, 3, 1, 1, **kwargs) self.conv2_2 = Conv2DActiv(None, 128, 3, 1, 1, **kwargs) self.pool2 = _max_pooling_2d self.conv3_1 = Conv2DActiv(None, 256, 3, 1, 1, **kwargs) self.conv3_2 = Conv2DActiv(None, 256, 3, 1, 1, **kwargs) self.conv3_3 = Conv2DActiv(None, 256, 3, 1, 1, **kwargs) self.pool3 = _max_pooling_2d self.conv4_1 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs) self.conv4_2 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs) self.conv4_3 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs) self.pool4 = _max_pooling_2d self.conv5_1 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs) self.conv5_2 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs) self.conv5_3 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs) self.pool5 = _max_pooling_2d self.fc6 = Linear(None, 4096, **kwargs) self.fc6_relu = relu self.fc6_dropout = dropout self.fc7 = Linear(None, 4096, **kwargs) self.fc7_relu = relu self.fc7_dropout = dropout self.fc8 = Linear(None, n_class, **kwargs) self.prob = softmax if pretrained_model in self._models: path = download_model(self._models[pretrained_model]['url']) chainer.serializers.load_npz(path, self) elif pretrained_model: chainer.serializers.load_npz(pretrained_model, self)
def __init__(self, in_size, out_size, initialW=None, ignore_label=None): super(EmbedID, self).__init__() self.ignore_label = ignore_label with self.init_scope(): if initialW is None: initialW = normal.Normal(1.0) self.W = variable.Parameter(initialW, (in_size, out_size))
def __init__(self, pretrained_model='auto', n_layers=16): super(VGGLayers, self).__init__() if pretrained_model: # As a sampling process is time-consuming, # we employ a zero initializer for faster computation. init = constant.Zero() kwargs = {'initialW': init, 'initial_bias': init} else: # employ default initializers used in the original paper kwargs = { 'initialW': normal.Normal(0.01), 'initial_bias': constant.Zero(), } if n_layers not in [16, 19]: raise ValueError( 'The n_layers argument should be either 16 or 19, ' 'but {} was given.'.format(n_layers) ) with self.init_scope(): self.conv1_1 = Convolution2D(3, 64, 3, 1, 1, **kwargs) self.conv1_2 = Convolution2D(64, 64, 3, 1, 1, **kwargs) self.conv2_1 = Convolution2D(64, 128, 3, 1, 1, **kwargs) self.conv2_2 = Convolution2D(128, 128, 3, 1, 1, **kwargs) self.conv3_1 = Convolution2D(128, 256, 3, 1, 1, **kwargs) self.conv3_2 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv3_3 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv4_1 = Convolution2D(256, 512, 3, 1, 1, **kwargs) self.conv4_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv4_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_1 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.fc6 = Linear(512 * 7 * 7, 4096, **kwargs) self.fc7 = Linear(4096, 4096, **kwargs) self.fc8 = Linear(4096, 1000, **kwargs) if n_layers == 19: self.conv3_4 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv4_4 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_4 = Convolution2D(512, 512, 3, 1, 1, **kwargs) if pretrained_model == 'auto': if n_layers == 16: _retrieve( 'VGG_ILSVRC_16_layers.npz', 'https://www.robots.ox.ac.uk/%7Evgg/software/very_deep/' 'caffe/VGG_ILSVRC_16_layers.caffemodel', self) else: _retrieve( 'VGG_ILSVRC_19_layers.npz', 'http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/' 'caffe/VGG_ILSVRC_19_layers.caffemodel', self) elif pretrained_model: npz.load_npz(pretrained_model, self)
def __init__(self, in_size, out_size, initialW=None, ignore_label=None, Ip=1, factor=None): super(SNEmbedID, self).__init__() self.ignore_label = ignore_label self.Ip = Ip self.factor = factor with self.init_scope(): if initialW is None: initialW = normal.Normal(1.0) self.W = variable.Parameter(initialW, (in_size, out_size)) self.u = np.random.normal(size=(1, in_size)).astype(dtype="f") self.register_persistent('u')
def __init__(self, out_channels, ksize, pad=1): kwargs = { 'initialW': normal.Normal(0.01), 'initial_bias': constant.Zero(), } super(Block, self).__init__() with self.init_scope(): self.conv = L.Convolution2D(None, out_channels, ksize, pad=pad, **kwargs) self.bn = L.BatchNormalization(out_channels)
def __init__(self, n_layers, in_size, out_size, dropout, **kwargs): if kwargs: argument.check_unexpected_kwargs( kwargs, use_cudnn='use_cudnn argument is not supported anymore. ' 'Use chainer.using_config', use_bi_direction='use_bi_direction is not supported anymore', activation='activation is not supported anymore') argument.assert_kwargs_empty(kwargs) weights = [] if self.use_bi_direction: direction = 2 else: direction = 1 for i in six.moves.range(n_layers): for di in six.moves.range(direction): weight = link.Link() with weight.init_scope(): for j in six.moves.range(self.n_weights): if i == 0 and j < self.n_weights // 2: w_in = in_size elif i > 0 and j < self.n_weights // 2: w_in = out_size * direction else: w_in = out_size w = variable.Parameter( normal.Normal(numpy.sqrt(1. / w_in)), (out_size, w_in)) b = variable.Parameter(0, (out_size, )) setattr(weight, 'w%d' % j, w) setattr(weight, 'b%d' % j, b) weights.append(weight) super(NStepRNNBase, self).__init__(*weights) self.ws = [[ getattr(layer, 'w%d' % i) for i in six.moves.range(self.n_weights) ] for layer in self] self.bs = [[ getattr(layer, 'b%d' % i) for i in six.moves.range(self.n_weights) ] for layer in self] self.n_layers = n_layers self.dropout = dropout self.out_size = out_size self.direction = direction
def __init__(self, pretrained_model='auto'): if pretrained_model: # As a sampling process is time-consuming, # we employ a zero initializer for faster computation. init = constant.Zero() kwargs = {'initialW': init, 'initial_bias': init} else: # employ default initializers used in the original paper kwargs = { 'initialW': normal.Normal(0.01), 'initial_bias': constant.Zero(), } super(VGG16Layers, self).__init__() with self.init_scope(): self.conv1_1 = Convolution2D(3, 64, 3, 1, 1, **kwargs) self.conv1_2 = Convolution2D(64, 64, 3, 1, 1, **kwargs) self.conv2_1 = Convolution2D(64, 128, 3, 1, 1, **kwargs) self.conv2_2 = Convolution2D(128, 128, 3, 1, 1, **kwargs) self.conv3_1 = Convolution2D(128, 256, 3, 1, 1, **kwargs) self.conv3_2 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv3_3 = Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv4_1 = Convolution2D(256, 512, 3, 1, 1, **kwargs) self.conv4_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv4_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_1 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs) self.fc6 = Linear(512 * 7 * 7, 4096, **kwargs) self.fc7 = Linear(4096, 4096, **kwargs) self.fc8 = Linear(4096, 1000, **kwargs) if pretrained_model == 'auto': _retrieve( 'VGG_ILSVRC_16_layers.npz', 'http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/' 'caffe/VGG_ILSVRC_16_layers.caffemodel', self) elif pretrained_model: npz.load_npz(pretrained_model, self)
def __init__(self, pretrained_model='auto', n_channels=3, n_outputs=101, mean_path='datasets/models/mean2.npz'): super(C3DVersion1UCF101, self).__init__() if pretrained_model: # As a sampling process is time-consuming, # we employ a zero initializer for faster computation. init = constant.Zero() conv_kwargs = {'initialW': init, 'initial_bias': init} fc_kwargs = conv_kwargs else: # employ default initializers used in the original paper conv_kwargs = { 'initialW': normal.Normal(0.01), 'initial_bias': constant.Zero(), } fc_kwargs = { 'initialW': normal.Normal(0.005), 'initial_bias': constant.One(), } with self.init_scope(): self.conv1a = ConvolutionND(3, n_channels, 64, 3, 1, 1, **conv_kwargs) self.conv2a = ConvolutionND(3, 64, 128, 3, 1, 1, **conv_kwargs) self.conv3a = ConvolutionND(3, 128, 256, 3, 1, 1, **conv_kwargs) self.conv3b = ConvolutionND(3, 256, 256, 3, 1, 1, **conv_kwargs) self.conv4a = ConvolutionND(3, 256, 512, 3, 1, 1, **conv_kwargs) self.conv4b = ConvolutionND(3, 512, 512, 3, 1, 1, **conv_kwargs) self.conv5a = ConvolutionND(3, 512, 512, 3, 1, 1, **conv_kwargs) self.conv5b = ConvolutionND(3, 512, 512, 3, 1, 1, **conv_kwargs) self.fc6 = Linear(512 * 4 * 4, 4096, **fc_kwargs) self.fc7 = Linear(4096, 4096, **fc_kwargs) self.fc8 = Linear(4096, n_outputs, **fc_kwargs) if pretrained_model == 'auto': _retrieve( 'conv3d_deepnetA_ucf.npz', 'http://vlg.cs.dartmouth.edu/c3d/' 'c3d_ucf101_finetune_whole_iter_20000', self) elif pretrained_model: npz.load_npz(pretrained_model, self) self.pre = ConvolutionND(3, n_channels, n_channels, 1, 1, 0, nobias=True, **conv_kwargs) self.pre.W.data[:] = 0 self.pre.W.data[[0, 1, 2], [2, 1, 0]] = 128 # self.pre.b.data[:] = 128 - numpy.array([90.25164795, 97.65701294, 101.4083252]) self.mean = Bias(shape=(3, 16, 112, 112)) mean = numpy.load(mean_path)['mean'] self.mean.b.data[:] = 128 - mean[:, :, 8:8 + 112, 8:8 + 112] self.functions = collections.OrderedDict([ ('pre', [self.pre, _resize, self.mean]), ('conv1a', [self.conv1a, relu]), ('pool1', [_max_pooling_2d]), ('conv2a', [self.conv2a, relu]), ('pool2', [_max_pooling_3d]), ('conv3a', [self.conv3a, relu]), ('conv3b', [self.conv3b, relu]), ('pool3', [_max_pooling_3d]), ('conv4a', [self.conv4a, relu]), ('conv4b', [self.conv4b, relu]), ('pool4', [_max_pooling_3d]), ('conv5a', [self.conv5a, relu]), ('conv5b', [self.conv5b, relu]), ('pool5', [_max_pooling_3d, dropout]), ('fc6', [self.fc6, relu, dropout]), ('fc7', [self.fc7, relu, dropout]), ('fc8', [self.fc8]), ('prob', [softmax]), ])
def __init__(self, pretrained_model='auto'): if pretrained_model: # As a sampling process is time-consuming, # we employ a zero initializer for faster computation. init = constant.Zero() kwargs = {'initialW': init, 'initial_bias': init} else: # employ default initializers used in the original paper kwargs = { 'initialW': normal.Normal(0.01), 'initial_bias': constant.Zero(), } super(VGG16Layers, self).__init__( conv1_1=Convolution2D(3, 64, 3, 1, 1, **kwargs), conv1_2=Convolution2D(64, 64, 3, 1, 1, **kwargs), conv2_1=Convolution2D(64, 128, 3, 1, 1, **kwargs), conv2_2=Convolution2D(128, 128, 3, 1, 1, **kwargs), conv3_1=Convolution2D(128, 256, 3, 1, 1, **kwargs), conv3_2=Convolution2D(256, 256, 3, 1, 1, **kwargs), conv3_3=Convolution2D(256, 256, 3, 1, 1, **kwargs), conv4_1=Convolution2D(256, 512, 3, 1, 1, **kwargs), conv4_2=Convolution2D(512, 512, 3, 1, 1, **kwargs), conv4_3=Convolution2D(512, 512, 3, 1, 1, **kwargs), conv5_1=Convolution2D(512, 512, 3, 1, 1, **kwargs), conv5_2=Convolution2D(512, 512, 3, 1, 1, **kwargs), conv5_3=Convolution2D(512, 512, 3, 1, 1, **kwargs), fc6=Linear(512 * 7 * 7, 4096, **kwargs), fc7=Linear(4096, 4096, **kwargs), fc8=Linear(4096, 1000, **kwargs), ) if pretrained_model == 'auto': _retrieve( 'VGG_ILSVRC_16_layers.npz', 'http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/' 'caffe/VGG_ILSVRC_16_layers.caffemodel', self) elif pretrained_model: npz.load_npz(pretrained_model, self) self.functions = collections.OrderedDict([ ('conv1_1', [self.conv1_1, relu]), ('conv1_2', [self.conv1_2, relu]), ('pool1', [_max_pooling_2d]), ('conv2_1', [self.conv2_1, relu]), ('conv2_2', [self.conv2_2, relu]), ('pool2', [_max_pooling_2d]), ('conv3_1', [self.conv3_1, relu]), ('conv3_2', [self.conv3_2, relu]), ('conv3_3', [self.conv3_3, relu]), ('pool3', [_max_pooling_2d]), ('conv4_1', [self.conv4_1, relu]), ('conv4_2', [self.conv4_2, relu]), ('conv4_3', [self.conv4_3, relu]), ('pool4', [_max_pooling_2d]), ('conv5_1', [self.conv5_1, relu]), ('conv5_2', [self.conv5_2, relu]), ('conv5_3', [self.conv5_3, relu]), ('pool5', [_max_pooling_2d]), ('fc6', [self.fc6, relu, dropout]), ('fc7', [self.fc7, relu, dropout]), ('fc8', [self.fc8, relu]), ('prob', [softmax]), ])