コード例 #1
0
ファイル: cpu_server.py プロジェクト: aichitang/style2paints
    def __init__(self):
        super(GoogLeNet, self).__init__(
            conv1=L.Convolution2D(3, 64, 7, stride=2, pad=3, nobias=True),
            norm1=L.BatchNormalization(64),
            conv2=L.Convolution2D(64, 192, 3, pad=1, nobias=True),
            norm2=L.BatchNormalization(192),
            inc3a=L.InceptionBN(192, 64, 64, 64, 64, 96, 'avg', 32),
            inc3b=L.InceptionBN(256, 64, 64, 96, 64, 96, 'avg', 64),
            inc3c=L.InceptionBN(320, 0, 128, 160, 64, 96, 'max', stride=2),
            inc4a=L.InceptionBN(576, 224, 64, 96, 96, 128, 'avg', 128),
            inc4b=L.InceptionBN(576, 192, 96, 128, 96, 128, 'avg', 128),
            inc4c=L.InceptionBN(576, 128, 128, 160, 128, 160, 'avg', 128),
            inc4d=L.InceptionBN(576, 64, 128, 192, 160, 192, 'avg', 128),
            inc4e=L.InceptionBN(576, 0, 128, 192, 192, 256, 'max', stride=2),
            inc5a=L.InceptionBN(1024, 352, 192, 320, 160, 224, 'avg', 128),
            inc5b=L.InceptionBN(1024, 352, 192, 320, 192, 224, 'max', 128),
            out_tag=L.Linear(1024 + 8, 3000),

            conva=L.Convolution2D(576, 128, 1, nobias=True),
            norma=L.BatchNormalization(128),
            lina=L.Linear(2048, 1024, nobias=True),
            norma2=L.BatchNormalization(1024),
            out_a_tag=L.Linear(1024 + 8, 3000),

            convb=L.Convolution2D(576, 128, 1, nobias=True),
            normb=L.BatchNormalization(128),
            linb=L.Linear(2048, 1024, nobias=True),
            normb2=L.BatchNormalization(1024),
            out_b_tag=L.Linear(1024 + 8, 3000),
        )
コード例 #2
0
 def __init__(self):
     super(GoogLeNetBN, self).__init__(
         conv1=L.Convolution2D(None, 64, 7, stride=2, pad=3, nobias=True),
         norm1=L.BatchNormalization(64),
         conv2=L.Convolution2D(None, 192, 3, pad=1, nobias=True),
         norm2=L.BatchNormalization(192),
         inc3a=L.InceptionBN(None, 64, 64, 64, 64, 96, 'avg', 32),
         inc3b=L.InceptionBN(None, 64, 64, 96, 64, 96, 'avg', 64),
         inc3c=L.InceptionBN(None, 0, 128, 160, 64, 96, 'max', stride=2),
         inc4a=L.InceptionBN(None, 224, 64, 96, 96, 128, 'avg', 128),
         inc4b=L.InceptionBN(None, 192, 96, 128, 96, 128, 'avg', 128),
         inc4c=L.InceptionBN(None, 128, 128, 160, 128, 160, 'avg', 128),
         inc4d=L.InceptionBN(None, 64, 128, 192, 160, 192, 'avg', 128),
         inc4e=L.InceptionBN(None, 0, 128, 192, 192, 256, 'max', stride=2),
         inc5a=L.InceptionBN(None, 352, 192, 320, 160, 224, 'avg', 128),
         inc5b=L.InceptionBN(None, 352, 192, 320, 192, 224, 'max', 128),
         out=L.Linear(None, 1000),
         conva=L.Convolution2D(None, 128, 1, nobias=True),
         norma=L.BatchNormalization(128),
         lina=L.Linear(None, 1024, nobias=True),
         norma2=L.BatchNormalization(1024),
         outa=L.Linear(None, 1000),
         convb=L.Convolution2D(None, 128, 1, nobias=True),
         normb=L.BatchNormalization(128),
         linb=L.Linear(None, 1024, nobias=True),
         normb2=L.BatchNormalization(1024),
         outb=L.Linear(None, 1000),
     )
     self._train = True
コード例 #3
0
 def __init__(self, n_classes, n_base_units=8):
     n_mid_units = n_base_units * 2
     super().__init__(
         # conv1: 100 -> (100 - 4) / 3 + 1 = 33
         conv1=L.Convolution2D(3, n_base_units, 4, stride=3),
         # pool1: 33 -> (33 + 1*2 - 3) / 2 + 1 = 17
         # inc1: 17 -> (17 + 1*2 - 3) / 2 + 1 = 9
         inc1=L.InceptionBN(in_channels=n_base_units,
                            out1=0,
                            proj3=n_mid_units,
                            out3=n_mid_units,
                            proj33=n_mid_units,
                            out33=n_mid_units,
                            pooltype='max',
                            stride=2),
         # inc2: 9 -> (9 + 1*2 - 3) / 2 + 1 = 5
         inc2=L.InceptionBN(in_channels=n_mid_units * 2 + n_base_units,
                            out1=0,
                            proj3=n_mid_units,
                            out3=n_mid_units,
                            proj33=n_mid_units,
                            out33=n_mid_units,
                            pooltype='max',
                            stride=2),
         fc1=L.Linear(
             5 * 5 * (n_mid_units * 2 + n_mid_units * 2 + n_base_units),
             128),
         fc2=L.Linear(128, n_classes))
     self.n_units = 5 * 5 * (n_mid_units * 2 + n_mid_units * 2 +
                             n_base_units)
     self.train = True
コード例 #4
0
    def __init__(self):
        self.dtype = dtype = np.float16
        W = initializers.HeNormal(1 / np.sqrt(2), self.dtype)
        bias = initializers.Zero(self.dtype)

        chainer.Chain.__init__(self)
        with self.init_scope():
            self.conv1 = L.Convolution2D(
                None, 64, 7, stride=2, pad=3, initialW=W, nobias=True)
            self.norm1 = L.BatchNormalization(64, dtype=dtype)
            self.conv2 = L.Convolution2D(
                None, 192, 3, pad=1, initialW=W, nobias=True)
            self.norm2 = L.BatchNormalization(192, dtype=dtype)
            self.inc3a = L.InceptionBN(
                None, 64, 64, 64, 64, 96, 'avg', 32, conv_init=W, dtype=dtype)
            self.inc3b = L.InceptionBN(
                None, 64, 64, 96, 64, 96, 'avg', 64, conv_init=W, dtype=dtype)
            self.inc3c = L.InceptionBN(
                None, 0, 128, 160, 64, 96, 'max', stride=2,
                conv_init=W, dtype=dtype)
            self.inc4a = L.InceptionBN(
                None, 224, 64, 96, 96, 128, 'avg', 128,
                conv_init=W, dtype=dtype)
            self.inc4b = L.InceptionBN(
                None, 192, 96, 128, 96, 128, 'avg', 128,
                conv_init=W, dtype=dtype)
            self.inc4c = L.InceptionBN(
                None, 128, 128, 160, 128, 160, 'avg', 128,
                conv_init=W, dtype=dtype)
            self.inc4d = L.InceptionBN(
                None, 64, 128, 192, 160, 192, 'avg', 128,
                conv_init=W, dtype=dtype)
            self.inc4e = L.InceptionBN(
                None, 0, 128, 192, 192, 256, 'max',
                stride=2, conv_init=W, dtype=dtype)
            self.inc5a = L.InceptionBN(
                None, 352, 192, 320, 160, 224, 'avg', 128,
                conv_init=W, dtype=dtype)
            self.inc5b = L.InceptionBN(
                None, 352, 192, 320, 192, 224, 'max', 128,
                conv_init=W, dtype=dtype)
            self.out = L.Linear(None, 1000, initialW=W, bias=bias)

            self.conva = L.Convolution2D(None, 128, 1, initialW=W, nobias=True)
            self.norma = L.BatchNormalization(128, dtype=dtype)
            self.lina = L.Linear(None, 1024, initialW=W, nobias=True)
            self.norma2 = L.BatchNormalization(1024, dtype=dtype)
            self.outa = L.Linear(None, 1000, initialW=W, bias=bias)

            self.convb = L.Convolution2D(None, 128, 1, initialW=W, nobias=True)
            self.normb = L.BatchNormalization(128, dtype=dtype)
            self.linb = L.Linear(None, 1024, initialW=W, nobias=True)
            self.normb2 = L.BatchNormalization(1024, dtype=dtype)
            self.outb = L.Linear(None, 1000, initialW=W, bias=bias)
コード例 #5
0
ファイル: googlenetbn.py プロジェクト: ohnabe/food101
    def __init__(self, n_class):
        super(GoogLeNetBN, self).__init__()
        with self.init_scope():
            self.conv1 = L.Convolution2D(None,
                                         64,
                                         7,
                                         stride=2,
                                         pad=3,
                                         nobias=True)
            self.norm1 = L.BatchNormalization(64)
            self.conv2 = L.Convolution2D(None, 192, 3, pad=1, nobias=True)
            self.norm2 = L.BatchNormalization(192)
            self.inc3a = L.InceptionBN(None, 64, 64, 64, 64, 96, 'avg', 32)
            self.inc3b = L.InceptionBN(None, 64, 64, 96, 64, 96, 'avg', 64)
            self.inc3c = L.InceptionBN(None,
                                       0,
                                       128,
                                       160,
                                       64,
                                       96,
                                       'max',
                                       stride=2)
            self.inc4a = L.InceptionBN(None, 224, 64, 96, 96, 128, 'avg', 128)
            self.inc4b = L.InceptionBN(None, 192, 96, 128, 96, 128, 'avg', 128)
            self.inc4c = L.InceptionBN(None, 160, 128, 160, 128, 160, 'avg',
                                       128)
            self.inc4d = L.InceptionBN(None, 96, 128, 192, 160, 192, 'avg',
                                       128)
            self.inc4e = L.InceptionBN(None,
                                       0,
                                       128,
                                       192,
                                       192,
                                       256,
                                       'max',
                                       stride=2)
            self.inc5a = L.InceptionBN(None, 352, 192, 320, 160, 224, 'avg',
                                       128)
            self.inc5b = L.InceptionBN(None, 352, 192, 320, 192, 224, 'max',
                                       128)
            self.out = L.Linear(None, n_class)

            self.conva = L.Convolution2D(None, 128, 1, nobias=True)
            self.norma = L.BatchNormalization(128)
            self.lina = L.Linear(None, 1024, nobias=True)
            self.norma2 = L.BatchNormalization(1024)
            self.outa = L.Linear(None, n_class)

            self.convb = L.Convolution2D(None, 128, 1, nobias=True)
            self.normb = L.BatchNormalization(128)
            self.linb = L.Linear(None, 1024, nobias=True)
            self.normb2 = L.BatchNormalization(1024)
            self.outb = L.Linear(None, n_class)
コード例 #6
0
 def setup_data(self):
     dtype = chainer.get_dtype()
     self.x = numpy.random.uniform(
         -1, 1, (10, self.in_channels, 5, 5)).astype(dtype)
     self.l = links.InceptionBN(self.in_channels, self.out1, self.proj3,
                                self.out3, self.proj33, self.out33,
                                self.pooltype, self.proj_pool, self.stride)
コード例 #7
0
 def setup_data(self):
     self.x = numpy.random.uniform(
         -1, 1, (10, self.in_channels, 5, 5)
     ).astype(numpy.float32)
     self.l = links.InceptionBN(
         self.in_channels, self.out1, self.proj3, self.out3,
         self.proj33, self.out33, self.pooltype, self.proj_pool,
         self.stride)
コード例 #8
0
ファイル: model.py プロジェクト: shinpoi/pixiv_collector
 def __init__(self):
     super(CNN_02, self).__init__(
         conv1=L.Convolution2D(1, 64, 3, stride=1, nobias=True),
         # MaxPool(3x3, 2)
         bn1=L.BatchNormalization(64),
         conv2a=L.Convolution2D(64, 64, 1, stride=1),
         conv2b=L.Convolution2D(64, 128, 3, stride=1),
         bn2=L.BatchNormalization(128),
         # MaxPool(3x3, 2)
         inc3a=L.InceptionBN(128, 64, 48, 64, 64, 96, 'avg', 32),
         inc3b=L.InceptionBN(256, 64, 64, 96, 64, 96, 'max', 64),
         # MaxPool(3x3, 2)
         inc4a=L.InceptionBN(320, 224, 64, 96, 96, 128, 'avg', 128),
         inc4b=L.InceptionBN(576, 192, 128, 192, 192, 256, 'max', 128),
         inc5a=L.InceptionBN(768, 320, 192, 288, 192, 288, 'max', 128),
         # AveragePool(7x7, 1)
         # Dropout(40%)
         preout=L.Linear(1024, 64),
         # ReLu
         out=L.Linear(64, 2)
         # SoftMax
     )
コード例 #9
0
    def __init__(self, n_outputs):
        super(GoogLeNetBN, self).__init__(
            conv1=L.Convolution2D(3, 64, 7, stride=2, pad=3, nobias=True),
            norm1=L.BatchNormalization(64),
            conv2=L.Convolution2D(64, 192, 3, pad=1, nobias=True),
            norm2=L.BatchNormalization(192),
            inc3a=L.InceptionBN(192, 64, 64, 64, 64, 96, 'avg', 32),
            inc3b=L.InceptionBN(256, 64, 64, 96, 64, 96, 'avg', 64),
            inc3c=L.InceptionBN(320, 0, 128, 160, 64, 96, 'max', stride=2),
            inc4a=L.InceptionBN(576, 224, 64, 96, 96, 128, 'avg', 128),
            inc4b=L.InceptionBN(576, 192, 96, 128, 96, 128, 'avg', 128),
            inc4c=L.InceptionBN(576, 128, 128, 160, 128, 160, 'avg', 128),
            inc4d=L.InceptionBN(576, 64, 128, 192, 160, 192, 'avg', 128),
            inc4e=L.InceptionBN(576, 0, 128, 192, 192, 256, 'max', stride=2),
            inc5a=L.InceptionBN(1024, 352, 192, 320, 160, 224, 'avg', 128),
            inc5b=L.InceptionBN(1024, 352, 192, 320, 192, 224, 'max', 128),
            linz=L.Linear(1024, 300),
            out=L.Linear(300, n_outputs),
            outimg=L.Linear(1024, n_outputs),
            outdoc=L.Linear(1000, n_outputs),

            doc_fc1=L.Linear(1000, 600),
            doc_fc2=L.Linear(600, 300),

            conva=L.Convolution2D(576, 128, 1, nobias=True),
            norma=L.BatchNormalization(128),
            lina=L.Linear(3200, 1024, nobias=True),
            norma2=L.BatchNormalization(1024),
            outa=L.Linear(1024, n_outputs),

            convb=L.Convolution2D(576, 128, 1, nobias=True),
            normb=L.BatchNormalization(128),
            linb=L.Linear(3200, 1024, nobias=True),
            normb2=L.BatchNormalization(1024),
            outb=L.Linear(1024, n_outputs),
            bi1=L.Bilinear(300, 300, 300)
        )
コード例 #10
0
    def __init__(self,
                 n_class=None,
                 pretrained_model=None,
                 mean=None,
                 initialW=None,
                 initialBias=None):
        self.n_class = n_class
        self.mean = mean
        self.initialbias = initialBias

        self.insize = 224

        if n_class is None:
            self.n_class = 100

        if mean is None:
            # imagenet means
            self.mean = np.array([123.68, 116.779, 103.939],
                                 dtype=np.float32)[:, np.newaxis, np.newaxis]

        if initialW is None:
            # employ default initializers used in BVLC. For more detail, see
            self.initialW = uniform.LeCunUniform(scale=1.0)

        if pretrained_model is None:
            # As a sampling process is time-consuming
            # we employ a zero initializer for faster computation
            self.initialW = constant.Zero()

        super(GoogleNetBN, self).__init__()
        with self.init_scope():
            # Deep layers: GoogleNet of BatchNormalization version
            self.conv1 = L.Convolution2D(None,
                                         64,
                                         7,
                                         stride=2,
                                         pad=3,
                                         nobias=True)
            self.norm1 = L.BatchNormalization(64)
            self.conv2 = L.Convolution2D(None,
                                         192,
                                         3,
                                         stride=1,
                                         pad=1,
                                         nobias=True)
            self.norm2 = L.BatchNormalization(192)

            self.inc3a = L.InceptionBN(None, 64, 64, 64, 64, 96, "avg", 32)
            self.inc3b = L.InceptionBN(None, 64, 64, 96, 64, 96, "avg", 64)
            self.inc3c = L.InceptionBN(None,
                                       0,
                                       128,
                                       160,
                                       64,
                                       96,
                                       "max",
                                       stride=2)

            self.inc4a = L.InceptionBN(None, 224, 64, 96, 96, 128, "avg", 128)
            self.inc4b = L.InceptionBN(None, 192, 96, 128, 96, 128, "avg", 128)
            self.inc4c = L.InceptionBN(None, 128, 128, 160, 128, 160, "avg",
                                       128)
            self.inc4d = L.InceptionBN(None, 64, 128, 192, 160, 192, "avg",
                                       128)
            self.inc4e = L.InceptionBN(None,
                                       0,
                                       128,
                                       192,
                                       192,
                                       256,
                                       "max",
                                       stride=2)

            self.inc5a = L.InceptionBN(None, 352, 192, 320, 160, 224, "avg",
                                       128)
            self.inc5b = L.InceptionBN(None, 352, 192, 320, 192, 224, "max",
                                       128)
            self.loss3_fc = L.Linear(None,
                                     self.n_class,
                                     initialW=self.initialW)

            self.loss1_conv = L.Convolution2D(None,
                                              128,
                                              1,
                                              initialW=self.initialW,
                                              nobias=True)
            self.norma = L.BatchNormalization(128)
            self.loss1_fc1 = L.Linear(None,
                                      1024,
                                      initialW=self.initialW,
                                      nobias=True)
            self.norma2 = L.BatchNormalization(1024)
            self.loss1_fc2 = L.Linear(None,
                                      self.n_class,
                                      initialW=self.initialW)

            self.loss2_conv = L.Convolution2D(None,
                                              128,
                                              1,
                                              initialW=self.initialW,
                                              nobias=True)
            self.normb = L.BatchNormalization(128)
            self.loss2_fc1 = L.Linear(None,
                                      1024,
                                      initialW=self.initialW,
                                      nobias=True)
            self.normb2 = L.BatchNormalization(1024)
            self.loss2_fc2 = L.Linear(None,
                                      self.n_class,
                                      initialW=self.initialW)