Beispiel #1
0
    def __init__(self, n_out, pretrained_model='auto', layers=None):
        super(ResNet101, self).__init__()
        init_param = initializers.HeNormal()
        self.n_out = n_out
        if layers:
            self.layers = layers
        else:
            self.layers = ['pool5']

        with self.init_scope():
            self.base = L.ResNet101Layers(pretrained_model=pretrained_model)
            self.fc = L.Linear(None, n_out, initialW=init_param)
Beispiel #2
0
 def __init__(self, n_in, n_out, stride=1):
     
     self.dtype = np.float32
     w = 1/np.sqrt(2)
     initW = initializers.HeNormal(scale=w)
     initbias = initializers.Zero()
     super(Module, self).__init__(
         conv1=L.Convolution2D(n_in, n_out, 3, stride, 1, 1, initialW=initW, initial_bias=initbias),
         bn1=L.BatchNormalization(n_out,dtype=self.dtype),
         conv2=L.Convolution2D(n_out, n_out, 3, 1, 1, 1, initialW=initW, initial_bias=initbias),
         bn2=L.BatchNormalization(n_out,dtype=self.dtype),
     )
Beispiel #3
0
    def __init__(self,
                 growth_rate=32,
                 n_layers=(6, 12, 24, 16),
                 init_features=64,
                 bn_size=4,
                 dropout_rate=0,
                 n_class=1000):
        r"""Initialization of DenseNetImagenet.

        Args:
            growth_rate (int): growth_rate
            n_layers (int): # of layers for each block
            init_features (int): # of self.conv1's outputs' channels
            bn_size (int): bn_size
            dropout_rate (float): dropout rate \in [0, 1). If > 0,
                apply dropout
            n_class (int): # of class
        """
        super(DenseNetImagenet, self).__init__()
        with self.init_scope():
            initialW = initializers.HeNormal()
            self.conv1 = L.Convolution2D(None,
                                         init_features,
                                         7,
                                         2,
                                         3,
                                         initialW=initialW,
                                         nobias=True)
            self.bn1 = L.BatchNormalization(init_features)

            self.block1 = DenseBlock(n_layers[0], init_features, bn_size,
                                     growth_rate, dropout_rate)
            n_feature = init_features + n_layers[0] * growth_rate
            self.trans1 = Transition(n_feature, dropout_rate)

            self.block2 = DenseBlock(n_layers[1], n_feature, bn_size,
                                     growth_rate, dropout_rate)
            n_feature += n_layers[1] * growth_rate
            self.trans2 = Transition(n_feature, dropout_rate)

            self.block3 = DenseBlock(n_layers[2], n_feature, bn_size,
                                     growth_rate, dropout_rate)
            n_feature += n_layers[2] * growth_rate
            self.trans3 = Transition(n_feature, dropout_rate)

            self.block4 = DenseBlock(n_layers[3], n_feature, bn_size,
                                     growth_rate, dropout_rate)
            n_feature += n_layers[3] * growth_rate
            self.bn4 = L.BatchNormalization(n_feature)

            self.prob = L.Linear(None, n_class)

        self.n_class = n_class
Beispiel #4
0
    def __init__(self, optimizer, dtype, use_placeholder):
        self.dtype = dtype
        weight = initializers.HeNormal(1 / numpy.sqrt(2), dtype)
        bias = initializers.Constant(0, dtype)
        in_size = None if use_placeholder else self.UNIT_NUM
        self.model = L.Linear(in_size, 2, initialW=weight, initial_bias=bias)

        self.optimizer = optimizer
        # true parameters
        self.w = numpy.random.uniform(
            -1, 1, (self.UNIT_NUM, 1)).astype(dtype)
        self.b = numpy.random.uniform(-1, 1, (1, )).astype(dtype)
Beispiel #5
0
    def __init__(self, ninput, nhidden, nhidden2, noutput):
        super(RNN_Elman2, self).__init__(
            L1=CL.Elman(ninput, nhidden, actfun=sigmoid.sigmoid),
            L2=CL.Elman(nhidden,nhidden2, actfun=sigmoid.sigmoid),
            L3=L.Linear(nhidden2, noutput, initialW=init.HeNormal()),
        )

        self.ninput = ninput
        self.nhidden = nhidden
        self.noutput = noutput
        self.h={}
        self.h[1]=None
Beispiel #6
0
 def __init__(self):
     super(ResNet50_Nhwc, self).__init__()
     with self.init_scope():
         self.conv1 = L.Convolution2D(
             3, 64, 7, 2, 3, initialW=initializers.HeNormal())
         self.bn1 = L.BatchNormalization(64)
         with chainer.using_config('compute_mode', 'cudnn_fast'):
             self.res2 = Block(3, 64, 64, 256, 1)
             self.res3 = Block(4, 256, 128, 512)
             self.res4 = Block(6, 512, 256, 1024)
             self.res5 = Block(3, 1024, 512, 2048)
         self.fc = L.Linear(2048, 1000)
Beispiel #7
0
    def __init__(self, output=8):
        super(ResNet50, self).__init__()
        with self.init_scope():
            self.conv1 = L.Convolution2D(3,
                                         64,
                                         7,
                                         2,
                                         3,
                                         initialW=initializers.HeNormal(),
                                         nobias=True)
            self.bn1 = L.BatchNormalization(64)
            self.layer1 = Block(3, 64, 64, 256, 1)
            self.layer2 = Block(4, 256, 128, 512)
            self.layer3 = Block(6, 512, 256, 1024)

            self.att_layer4 = Block(3, 1024, 512, 2048, 1)
            self.bn_att = L.BatchNormalization(512 * blockexpansion)
            self.att_conv = L.Convolution2D(512 * blockexpansion,
                                            output,
                                            ksize=1,
                                            pad=0,
                                            initialW=initializers.HeNormal(),
                                            nobias=True)
            self.bn_att2 = L.BatchNormalization(output)
            self.att_conv2 = L.Convolution2D(output,
                                             output,
                                             ksize=1,
                                             pad=0,
                                             initialW=initializers.HeNormal(),
                                             nobias=True)
            self.att_conv3 = L.Convolution2D(output,
                                             1,
                                             ksize=3,
                                             pad=1,
                                             initialW=initializers.HeNormal(),
                                             nobias=True)
            self.bn_att3 = L.BatchNormalization(1)

            self.layer4 = Block(3, 1024, 512, 2048)
            self.fc = L.Linear(512 * blockexpansion, output)
Beispiel #8
0
    def __init__(self,
                 in_ch,
                 out_ch_list,
                 n_convs_list=None,
                 ksize_list=None,
                 use_bn_list=None,
                 activation_list=None,
                 w_init=inits.HeNormal()):
        if n_convs_list is None:
            n_convs_list = [2] * len(out_ch_list)

        if ksize_list is None:
            ksize_list = [3] * len(out_ch_list)

        if use_bn_list is None:
            use_bn_list = [True] * len(out_ch_list)

        if activation_list is None:
            activation_list = [F.relu] * len(out_ch_list)

        assert len(out_ch_list) == len(n_convs_list), \
            (len(out_ch_list), len(n_convs_list))

        assert len(n_convs_list) == len(ksize_list), \
            (len(n_convs_list), len(ksize_list))

        assert len(ksize_list) == len(use_bn_list), \
            (len(ksize_list), len(use_bn_list))

        assert len(use_bn_list) == len(activation_list), \
            (len(use_bn_list), len(activation_list))

        super(Block, self).__init__()

        in_ch_list = [in_ch] + out_ch_list[:-1]

        for in_ch, out_ch, n_convs, ksize, use_bn, activation in zip(
                in_ch_list, out_ch_list, n_convs_list, ksize_list, use_bn_list,
                activation_list):
            out_ch_list_for_unit = [
                in_ch + (out_ch - in_ch) * (idx + 1) // n_convs
                for idx in range(n_convs)
            ]

            self.append(
                Residual(
                    Unit(in_ch=in_ch,
                         out_ch_list=out_ch_list_for_unit,
                         ksize_list=[ksize] * n_convs,
                         use_bn_list=[use_bn] * n_convs,
                         activation_list=[None] + [activation] * (n_convs - 1),
                         w_init=w_init)))
Beispiel #9
0
    def __init__(
            self,
            database,
            n_layers: int,
            in_size: int,
            out_size: int,
            initialW=None,
            spatial_edge_model: SpatialEdgeMode = SpatialEdgeMode.all_edge,
            recurrent_block_type: RecurrentType = RecurrentType.rnn,
            attn_heads=8,
            bi_lstm=False):
        super(SpaceTimeRNN, self).__init__()
        self.neg_pos_ratio = 3
        self.database = database
        self.spatial_edge_mode = spatial_edge_model
        self.out_size = out_size
        self.in_size = in_size
        self.frame_node_num = config.BOX_NUM[self.database]
        self.mid_size = 1024
        NodeRecurrentModule = AttentionBlock if recurrent_block_type == RecurrentType.attention_block else TemporalRNN
        if recurrent_block_type == RecurrentType.no_temporal:
            NodeRecurrentModule = PositionwiseFeedForwardLayer

        with self.init_scope():
            if not initialW:
                initialW = initializers.HeNormal()

            self.top = dict()
            for i in range(self.frame_node_num):
                if recurrent_block_type == RecurrentType.rnn:
                    self.add_link(
                        "Node_{}".format(i),
                        TemporalRNN(n_layers,
                                    self.mid_size,
                                    self.out_size,
                                    use_bi_lstm=bi_lstm))
                else:
                    self.add_link(
                        "Node_{}".format(i),
                        NodeRecurrentModule(n_layers, self.mid_size,
                                            self.out_size))
                self.top[str(i)] = getattr(self, "Node_{}".format(i))
            if spatial_edge_model != SpatialEdgeMode.no_edge:
                self.space_lstm = L.NStepBiLSTM(n_layers,
                                                self.in_size,
                                                self.mid_size // 2,
                                                dropout=0.1,
                                                initialW=initialW)  #FIXME
            else:
                self.transfer_dim_fc = L.Linear(self.in_size,
                                                self.mid_size,
                                                initialW=initialW)
Beispiel #10
0
    def __post_init__(self):
        super(ClassifierNet, self).__init__()
        w = initializers.HeNormal()
        with self.init_scope():
            self.l_phr = L.Linear(None, self.out_size, initialW=w)
            self.l_img = L.Linear(None, self.out_size, initialW=w)

            self.l_1 = L.Linear(None, self.out_size, initialW=w, nobias=True)
            self.bn_l0 = L.BatchNormalization(self.out_size)
            self.bn_v0 = L.BatchNormalization(self.out_size)
            self.bn_1 = L.BatchNormalization(self.out_size)

            self.cls = L.Linear(None, 1, initialW=w)
Beispiel #11
0
	def __init__(self, nfeats, out_per_feat):
		self.dtype = np.float32
		w = 1./np.sqrt(2)
		initW = initializers.HeNormal(scale=w)
		initbias = initializers.Zero()

		super(SplitCNN,self).__init__()
		self.cnns = []
		for i in range(nfeats):
			self.forward.append('CNN%s'%i)
			self.add_link('CNN%s'%i, CNN(out_per_feat))
		self.add_link('fc1',F.Linear(out_per_feat*len(inputs),out_size=64,initialW=initW, initial_bias=initbias))
		self.add_link('fc2',F.Linear(64,out_size=2,initialW=initW, initial_bias=initbias))
Beispiel #12
0
 def __init__(self, hidden_state, dtype=np.float32):
     self.dtype = dtype
     W = initializers.HeNormal(1 / np.sqrt(2), self.dtype)
     super(ConvNet, self).__init__()
     with self.init_scope():
         self.conv1 = L.Convolution2D(3, 96, 3, stride=1, pad=1, initialW=W)
         self.bn1 = L.BatchNormalization(size=96, dtype=np.float32)
         self.conv2 = L.Convolution2D(None, 96, 3, stride=1, pad=1, initialW=W)
         self.bn2 = L.BatchNormalization(size=96, dtype=dtype)
         self.conv3 = L.Convolution2D(None, 96, 3, stride=1, pad=1, initialW=W)
         self.bn3 = L.BatchNormalization(size=96, dtype=dtype)
         self.conv4 = L.Convolution2D(None, hidden_state, 3, stride=1, pad=1, initialW=W)
         self.bn4 = L.BatchNormalization(size=hidden_state, dtype=dtype)
Beispiel #13
0
 def __init__(self):
     super(ResNet, self).__init__()
     with self.init_scope():
         self.conv1 = L.Convolution2D(3,
                                      64,
                                      3,
                                      2,
                                      1,
                                      initialW=initializers.HeNormal(),
                                      nobias=True)
         self.bn1 = L.BatchNormalization(64)
         self.res2 = Block(3, 64, 64, 128, 1)
         self.res3 = Block(4, 128, 64, 32)
 def __call__(self, link, cfg, initialW=I.HeNormal()):
     assert isinstance(link, self.cls)
     link_ = AdaLossConvolution2D(link.in_channels,
                                  link.out_channels,
                                  ksize=link.ksize,
                                  stride=link.stride,
                                  pad=link.pad,
                                  dilate=link.dilate,
                                  groups=link.groups,
                                  nobias=link.b is None,
                                  ada_loss_cfg=cfg)
     link_.copyparams(link)
     return link_
Beispiel #15
0
    def __init__(self, n_layer,
                 n_class=None,
                 pretrained_model=None,
                 mean=None, initialW=None, fc_kwargs={}, arch='fb'):
        if arch == 'fb':
            if pretrained_model == 'imagenet':
                raise ValueError(
                    'Pretrained weights for Facebook ResNet models '
                    'are not supported. Please set arch to \'he\'.')
            stride_first = False
            conv1_no_bias = True
        elif arch == 'he':
            stride_first = True
            # Kaiming He uses bias only for ResNet50
            conv1_no_bias = n_layer != 50
        else:
            raise ValueError('arch is expected to be one of [\'he\', \'fb\']')
        blocks = self._blocks[n_layer]

        param, path = utils.prepare_pretrained_model(
            {'n_class': n_class, 'mean': mean},
            pretrained_model, self._models[arch][n_layer],
            {'n_class': 1000, 'mean': _imagenet_mean})
        self.mean = param['mean']

        if initialW is None:
            initialW = initializers.HeNormal(scale=1., fan_option='fan_out')
        if 'initialW' not in fc_kwargs:
            fc_kwargs['initialW'] = initializers.Normal(scale=0.01)
        if pretrained_model:
            # As a sampling process is time-consuming,
            # we employ a zero initializer for faster computation.
            initialW = initializers.constant.Zero()
            fc_kwargs['initialW'] = initializers.constant.Zero()
        kwargs = {'initialW': initialW, 'stride_first': stride_first}

        super(ResNet, self).__init__()
        with self.init_scope():
            self.conv1 = Conv2DBNActiv(None, 64, 7, 2, 3, nobias=conv1_no_bias,
                                       initialW=initialW)
            self.pool1 = lambda x: F.max_pooling_2d(x, ksize=3, stride=2)
            self.res2 = ResBlock(blocks[0], None, 64, 256, 1, **kwargs)
            self.res3 = ResBlock(blocks[1], None, 128, 512, 2, **kwargs)
            self.res4 = ResBlock(blocks[2], None, 256, 1024, 2, **kwargs)
            self.res5 = ResBlock(blocks[3], None, 512, 2048, 2, **kwargs)
            self.pool5 = _global_average_pooling_2d
            self.fc6 = L.Linear(None, param['n_class'], **fc_kwargs)
            self.prob = F.softmax

        if path:
            chainer.serializers.load_npz(path, self)
Beispiel #16
0
    def __init__(self,
                 depth,
                 n_class,
                 widen_factor=1,
                 drop_rate=0.0,
                 initialW=None):
        """ CTOR. """
        super(WideResNet, self).__init__()

        k = widen_factor

        assert (depth - 4) % 6 == 0, 'Depth should be 6n + 4'
        n = (depth - 4) // 6
        n_channel = [16, 16 * k, 32 * k, 64 * k]

        if initialW is None:
            initialW = initializers.HeNormal(scale=1., fan_option='fan_out')
        kwargs = {'initialW': initialW}

        with self.init_scope():
            self.conv1 = L.Convolution2D(3,
                                         n_channel[0],
                                         ksize=3,
                                         stride=1,
                                         pad=1,
                                         nobias=True,
                                         **kwargs)

            self.wide2 = _WideResBlock(n,
                                       n_channel[0],
                                       n_channel[1],
                                       1,
                                       drop_rate=drop_rate,
                                       **kwargs)
            self.wide3 = _WideResBlock(n,
                                       n_channel[1],
                                       n_channel[2],
                                       2,
                                       drop_rate=drop_rate,
                                       **kwargs)
            self.wide4 = _WideResBlock(n,
                                       n_channel[2],
                                       n_channel[3],
                                       2,
                                       drop_rate=drop_rate,
                                       **kwargs)

            self.bn = L.BatchNormalization(n_channel[3])
            self.relu = lambda x: F.relu(x)
            self.pool5 = lambda x: F.average(x, axis=(2, 3))
            self.fc6 = L.Linear(n_channel[3], n_class, **kwargs)
Beispiel #17
0
    def __init__(
            self,
            model_name,
            insize,
            keypoint_names,
            edges,  # limbs, keypoints connection
            local_grid_size,
            parts_scale,
            instance_scale,
            width_multiplier=1.0,
            lambda_resp=0.25,
            lambda_iou=1.0,
            lambda_coor=5.0,
            lambda_size=5.0,
            lambda_limb=0.5,
            dtype=np.float32):
        super(PoseProposalNet, self).__init__()
        self.model_name = model_name
        self.insize = insize
        self.keypoint_names = keypoint_names
        self.edges = edges
        self.local_grid_size = local_grid_size
        self.dtype = dtype
        self.lambda_resp = lambda_resp
        self.lambda_iou = lambda_iou
        self.lambda_coor = lambda_coor
        self.lambda_size = lambda_size
        self.lambda_limb = lambda_limb
        self.parts_scale = np.array(
            parts_scale)  # side length of square for kps
        self.instance_scale = np.array(instance_scale)
        with self.init_scope():
            # 基础的网络 feature size narrow 32 multiple channels=512
            self.feature_layer = get_network(model_name,
                                             dtype=dtype,
                                             width_multiplier=width_multiplier)
            ksize = self.feature_layer.last_ksize  # 1
            self.lastconv = L.Convolution2D(
                None,
                6 * len(self.keypoint_names) + self.local_grid_size[0] *
                self.local_grid_size[1] * len(self.edges),
                ksize=ksize,
                stride=1,
                pad=ksize // 2,
                initialW=initializers.HeNormal(1 / np.sqrt(2), dtype))

        self.outsize = self.get_outsize(
        )  # the number of gird  fixed or not?  --- (7, 7) #p1 No fixed, changed by insize and local_grid_size
        inW, inH = self.insize  # image input shape[1:]
        outW, outH = self.outsize
        self.gridsize = (int(inW / outW), int(inH / outH))  #32 32
Beispiel #18
0
 def __init__(self, class_labels=9):
     super(ResNet, self).__init__()
     with self.init_scope():
         self.conv1 = L.Convolution2D(1,
                                      64,
                                      7,
                                      2,
                                      3,
                                      initialW=initializers.HeNormal())
         self.bn1 = L.BatchNormalization(64)
         self.res1 = Block(2, 64, 64, 64, 1)
         self.res2 = Block(2, 64, 128, 128)
         self.res3 = Block(2, 128, 256, 256)
         self.fc = L.Linear(256, class_labels)
 def __init__(self):
     super(ResnetFeatureExtractor, self).__init__()
     with self.init_scope():
         self.conv1 = L.Convolution2D(3,
                                      64,
                                      7,
                                      2,
                                      3,
                                      initialW=initializers.HeNormal(),
                                      nobias=True)
         self.bn1 = L.BatchNormalization(64)
         self.res2 = Block(3, 64, 64, 256, 1)
         self.res3 = Block(4, 256, 128, 512)
         self.res4 = Block(23, 512, 256, 1024)
Beispiel #20
0
    def __init__(self, in_size, ch):
        super(BottleNeckB, self).__init__()
        initialW = initializers.HeNormal()

        with self.init_scope():
            self.conv1 = L.Convolution2D(
                in_size, ch, 1, 1, 0, initialW=initialW, nobias=True)
            self.bn1 = L.BatchNormalization(ch)
            self.conv2 = L.Convolution2D(
                ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
            self.bn2 = L.BatchNormalization(ch)
            self.conv3 = L.Convolution2D(
                ch, in_size, 1, 1, 0, initialW=initialW, nobias=True)
            self.bn3 = L.BatchNormalization(in_size)
    def __init__(self, n_class=1000):  # 1000 is for ImageNet
        super(NIN, self).__init__()
        conv_init = I.HeNormal()  # MSRA scaling
        self.n_class = n_class

        with self.init_scope():
            self.mlpconv1 = L.MLPConvolution2D(
                None, (96, 96, 96), 11, stride=4, conv_init=conv_init)
            self.mlpconv2 = L.MLPConvolution2D(
                None, (256, 256, 256), 5, pad=2, conv_init=conv_init)
            self.mlpconv3 = L.MLPConvolution2D(
                None, (384, 384, 384), 3, pad=1, conv_init=conv_init)
            self.mlpconv4 = L.MLPConvolution2D(
                None, (1024, 1024, self.n_class), 3, pad=1, conv_init=conv_init)
Beispiel #22
0
    def __init__(self, inch, outch, first_stride=2):
        super(BlockA, self).__init__()
        initialW = initializers.HeNormal(scale=1.0)
        with self.init_scope():
            self.conv1 = L.Convolution2D(
                inch, outch, ksize=3, stride=first_stride, pad=3 // 2, initialW=initialW, nobias=True)
            self.bn1 = L.BatchNormalization(outch)
            self.conv2 = L.Convolution2D(
                outch, outch, ksize=3, stride=1, pad=3 // 2, initialW=initialW, nobias=True)
            self.bn2 = L.BatchNormalization(outch)

            self.conv_skip = L.Convolution2D(
                inch, outch, ksize=3, stride=first_stride, pad=3 // 2, initialW=initialW, nobias=True)
            self.bn_skip = L.BatchNormalization(outch)
Beispiel #23
0
 def __init__(self, depth, ksize, stride=1, pad=0, initialW=I.HeNormal()):
     super(ConvBnRelu, self).__init__()
     with self.init_scope():
         self.conv = L.Convolution2D(None,
                                     depth,
                                     ksize=ksize,
                                     stride=stride,
                                     pad=pad,
                                     initialW=initialW,
                                     nobias=True)
         self.bn = L.BatchNormalization(depth,
                                        decay=0.9997,
                                        eps=0.001,
                                        use_gamma=False)
Beispiel #24
0
 def __init__(self, n_layers, insize, outsize, initialW=None, use_bi_lstm=False):
     super(TemporalRNN, self).__init__()
     if not initialW:
         initialW = initializers.HeNormal()
     self.n_layer = n_layers
     self.insize=  insize
     with self.init_scope():
         if use_bi_lstm:
             self.lstm = L.NStepBiLSTM(self.n_layer, 1024, 256, dropout=0.1) #dropout = 0.0
         else:
             self.lstm = L.NStepLSTM(self.n_layer, 1024, 512,  dropout=0.1)
         self.fc1 = L.Linear(insize, 1024, initialW=initialW)
         self.fc2 = L.Linear(1024, 1024, initialW=initialW)
         self.fc3 = L.Linear(512, outsize, initialW=initialW)
 def __init__(self, use_optical_flow=False, temporal_length=10):
     super(ResnetFeatureExtractor, self).__init__()
     with self.init_scope():
         if use_optical_flow:
             self.conv1 = L.Convolution2D(2 * temporal_length,
                                          64,
                                          7,
                                          2,
                                          3,
                                          initialW=initializers.HeNormal(),
                                          nobias=True)
         else:
             self.conv1 = L.Convolution2D(3,
                                          64,
                                          7,
                                          2,
                                          3,
                                          initialW=initializers.HeNormal(),
                                          nobias=True)
         self.bn1 = L.BatchNormalization(64)
         self.res2 = Block(3, 64, 64, 256, 1)
         self.res3 = Block(4, 256, 128, 512)
         self.res4 = Block(23, 512, 256, 1024)
 def __init__(self,
              roi_size=7,
              spatial_scale=1 / 16.0,
              n_class=22,
              classify_mode=False):
     # n_class includes the background
     super(MobileNetHead, self).__init__()
     self.classify_mode = classify_mode
     initialW = initializers.HeNormal()
     self.roi_size = roi_size
     self.spatial_scale = spatial_scale  # 这个很关键,一般都是1/16.0
     with self.init_scope():
         self.convert_feature_dim_fc = L.Linear(75264, 2048)
         self.score = L.Linear(2048, n_class)
Beispiel #27
0
 def __init__(self):
     super(ResNet_v2_50, self).__init__()
     with self.init_scope():
         self.conv1 = L.Convolution2D(3,
                                      64,
                                      7,
                                      2,
                                      3,
                                      initialW=initializers.HeNormal())
         self.res2 = Block(3, 64, 64, 256, stride=2)
         self.res3 = Block(4, 256, 128, 512, stride=2)
         self.res4 = Block(6, 512, 256, 1024, stride=2)
         self.res5 = Block(3, 1024, 512, 2048, stride=1)
         self.postnorm = L.BatchNormalization(2048)
    def __init__(self, action_space, hidden_layers=[100, 100, 100], seed=0):
        super().__init__()
        # assert hasattr(action_space, 'n')
        # assert hasattr(action_space, 'sample')
        self.action_size = action_space.shape[0]
        w_init = initializers.HeNormal(rng=np.random.RandomState(seed))

        with self.init_scope():
            self.hidden_layers = chainer.ChainList(
                *[L.Linear(None, h, initialW=w_init) for h in hidden_layers])
            # self.mu_layers = chainer.ChainList(*[L.Linear(None, self.action_size, initialW=w_init)])
            self.mu_layers = L.Linear(None, self.action_size, initialW=w_init)
            self.action_out = SoftmaxDistribution
            self.activation = F.relu
Beispiel #29
0
 def __init__(self, n_classes):
     super(ResNet50, self).__init__()
     with self.init_scope():
         self.conv1 = L.Convolution2D(3,
                                      64,
                                      7,
                                      2,
                                      3,
                                      initialW=initializers.HeNormal())
         self.bn1 = L.BatchNormalization(64)
         self.res2 = Block(3, 64, 64, 256, 1)
         self.res3 = Block(4, 256, 128, 512)
         self.res4 = Block(6, 512, 256, 1024)
         self.res5 = Block(3, 1024, 512, 2048)
         self.fc = L.Linear(2048, n_classes)
Beispiel #30
0
 def __init__(self, inout_units, hidden_units):
     """
     @param inout_units: the number of input units 
     @param hidden_units: the number of hidden units 
     """
     initializer = I.HeNormal()
     super(Seq2Seq, self).__init__(
         l1=L.Linear(inout_units, hidden_units, initialW=initializer),
         l2=L.LSTM(hidden_units, hidden_units),
         l3=L.Linear(hidden_units, inout_units, initialW=initializer),
         w1=L.Linear(hidden_units, hidden_units, initialW=initializer),
         w2=L.Linear(hidden_units, hidden_units, initialW=initializer),
     )
     self.phase = Seq2Seq.Train
     self.train = True