예제 #1
0
    def forward(self, x_data, y_data, train=True):
        x = Variable(x_data, volatile=not train)
        t = Variable(y_data, volatile=not train)

        h = F.relu(self.bn1_1(self.conv1_1(x)))
        h = F.relu(self.bn1_2(self.conv1_2(h)))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.dropout(h, ratio=0.25, train=train)

        h = F.relu(self.bn2_1(self.conv2_1(h)))
        h = F.relu(self.bn2_2(self.conv2_2(h)))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.dropout(h, ratio=0.25, train=train)

        h = F.relu(self.bn3_1(self.conv3_1(h)))
        h = F.relu(self.bn3_2(self.conv3_2(h)))
        h = F.relu(self.bn3_3(self.conv3_3(h)))
        h = F.relu(self.bn3_4(self.conv3_4(h)))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.dropout(h, ratio=0.25, train=train)

        h = F.dropout(F.relu(self.fc4(h)), train=train, ratio=0.5)
        h = F.dropout(F.relu(self.fc5(h)), train=train, ratio=0.5)
        h = self.fc6(h)

        if train:
            return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
        else:
            return F.softmax_cross_entropy(h, t), F.accuracy(h, t), h
예제 #2
0
    def forward(self, x_data, y_data, train=True, models=None):
        VGG_mini = models["VGG_mini"]
        VGG_mini2 = models["VGG_mini2"]
        VGG_mini3 = models["VGG_mini3"]
        
        x = Variable(x_data, volatile=not train)
        t = Variable(y_data, volatile=not train)

        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.relu(self.conv1_3(h))
        h = F.relu(self.conv1_4(h))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.dropout(h, ratio=0.25, train=train)
        
        h = F.relu(self.conv1_5(h))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.dropout(h, ratio=0.25, train=train)
        
        h = self.fc(h)

        if train:
            return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
        else:
            # return F.softmax_cross_entropy(h, t), F.accuracy(h, t), h
            return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
예제 #3
0
    def forward(self, x_data, y_data, train=True):
        x = Variable(x_data, volatile=not train)
        t = Variable(y_data, volatile=not train)

        h = self.prelu1_1(self.bn1_1(self.conv1_1(x)))
        h = self.prelu1_2(self.bn1_2(self.conv1_2(h)))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = self.prelu2_1(self.bn2_1(self.conv2_1(h)))
        h = self.prelu2_2(self.bn2_2(self.conv2_2(h)))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = self.prelu3_1(self.conv3_1(h))
        h = self.prelu3_2(self.conv3_2(h))
        h = self.prelu3_3(self.conv3_3(h))
        h = F.max_pooling_2d(h, 2, stride=1)

        h = self.prelu4_1(self.conv4_1(h))
        h = self.prelu4_2(self.conv4_2(h))
        h = self.prelu4_3(self.conv4_3(h))
        h = F.max_pooling_2d(h, 2, stride=1)

        h = self.prelu5_1(self.conv5_1(h))
        h = self.prelu5_2(self.conv5_2(h))
        h = self.prelu5_3(self.conv5_3(h))
        h = F.max_pooling_2d(h, 2, stride=1)

        h = F.dropout(self.prelu6(self.fc6(h)), train=train, ratio=0.5)
        h = F.dropout(self.prelu7(self.fc7(h)), train=train, ratio=0.5)
        h = self.fc8(h)

        if train:
            return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
        else:
            return F.softmax_cross_entropy(h, t), F.accuracy(h, t), h
예제 #4
0
    def forward(self, x_data, y_data, train=True):
        x, t = Variable(x_data), Variable(y_data)
        h = F.max_pooling_2d(F.relu(self.bn1(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.bn2(self.conv2(h))), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.conv3(h)), 3, stride=2)
        h = self.fc4(h)

        if train:
            return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
        else:
            return F.softmax_cross_entropy(h, t), F.accuracy(h, t), h
예제 #5
0
    def __call__(self, x, t, train):
        x = chainer.Variable(x)
        t = chainer.Variable(t)

        h = F.relu(self.l1(x))
        h = F.relu(self.l2(h))
        h = self.l3(h)

        if train:
            return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
        else:
            return F.accuracy(h, t)
예제 #6
0
    def __call__(self, x, t):
        y = self.predictor(x)

        if t.ndim == 2: # use squared error when label is one hot label
            y = F.softmax(y)
            # loss = F.mean_squared_error(y, t)
            loss = sum_of_squared_error(y, t)
            accuracy = F.accuracy(y, t.data.argmax(axis=1).astype(np.int32))
        else: # use softmax cross entropy when label is normal label
            loss = F.softmax_cross_entropy(y, t)
            accuracy = F.accuracy(y, t)

        return y, loss, accuracy
def forward(x_data, y_data, print_conf_matrix=False):
    '''
    Neural net architecture
    :param x_data:
    :param y_data:
    :param train:
    :return:
    '''
    x, t = Variable(x_data), Variable(y_data)

    h1 = F.relu(model.l1(x))
    h1 = F.max_pooling_2d(h1,max_pool_window_1,stride=max_pool_stride_1)

    h2 = F.dropout(F.relu(model.l2(h1)))
    h2 = F.average_pooling_2d(h2, avg_pool_window_2, stride=avg_pool_stride_2)
    h2 = F.max_pooling_2d(h2,max_pool_window_2,stride=max_pool_stride_2)

    y = model.l3(h2)

    # display confusion matrix
    if print_conf_matrix:
        pdb.set_trace()
        print confusion_matrix(cuda.to_cpu(t.data), cuda.to_cpu(y.data).argmax(axis=1))

    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
예제 #8
0
    def _train_linear_classifier(self, model, optimizer, gpu):
        def _make_label(x):
            a = (np.dot(x, self.w) + self.b).reshape((self.BATCH_SIZE, ))
            t = np.empty_like(a).astype(np.int32)
            t[a>=0] = 0
            t[a< 0] = 1
            return t

        def _make_dataset(batch_size, unit_num, gpu):
            x_data = np.random.uniform(-1, 1, (batch_size, unit_num)).astype(np.float32)
            t_data = _make_label(x_data)
            if gpu:
                x_data = cuda.to_gpu(x_data)
                t_data = cuda.to_gpu(t_data)
            x = Variable(x_data)
            t = Variable(t_data)
            return x, t

        for epoch in xrange(self.EPOCH):
            x, t = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu)
            optimizer.zero_grads()
            y = model.l(x)
            loss = softmax_cross_entropy(y, t)
            loss.backward()
            optimizer.update()

        x_test, t_test = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu)
        y_test = model.l(x_test)
        return accuracy(y_test, t_test)
def forward(x_data, y_data, model,train=True):
        # Neural net architecture
        #x, t = chainer.Variable(x_data), chainer.Variable(y_data)
        t = chainer.Variable(y_data)
        x = {}
        for n in range(500):
            x[n] = chainer.Variable(x_data[n])
        h = {}
        initial_V = {}
        initial_V_relu = {}
        for nameint in range(len(l_name)-2):
            initial_V[nameint] = model[l_name[nameint]](x[nameint])
            #initial_V_relu[nameint] = F.relu(initial_V[nameint])
            #initial_V_relu[nameint] = F.sigmoid(initial_V[nameint])
            initial_V_relu[nameint] = F.tanh(initial_V[nameint])
            #h[nameint] = F.dropout(F.relu(initial_V[nameint]), train=train)
            #h[nameint] = F.dropout(F.sigmoid(initial_V[nameint]), train=train)
            h[nameint] = F.dropout(F.tanh(initial_V[nameint]), train=train)
            #h[nameint] = F.relu(model[l_name[nameint]](x[nameint]))
        #h6 = F.dropout(F.relu(model.l501(Returnharray(h))), train=train)
        #h6 = F.dropout(F.sigmoid(model.l501(Returnharray(h))), train=train)
        h6 = F.dropout(F.tanh(model.l501(Returnharray(h))), train=train)
        y = model.l502(h6)
        y_pre = (y.data.argmax(axis = 1))
        return F.softmax_cross_entropy(y, t), F.accuracy(y, t),y_pre,initial_V,initial_V_relu
예제 #10
0
    def forward(self, x_img, x_doc, y_data, train=True):

        x_img = cuda.cupy.asarray(x_img)
        x_doc = cuda.cupy.asarray(x_doc)
        y_data = cuda.cupy.asarray(y_data)

        img, doc, t = Variable(x_img), Variable(x_doc), Variable(y_data)

        h = F.max_pooling_2d(F.relu(self.conv1(img)), ksize=3, stride=2, pad=0)
        h = F.local_response_normalization(h)
        h = F.max_pooling_2d(F.relu(self.conv2(h)), ksize=3, stride=2, pad=0)
        h = F.local_response_normalization(h)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), ksize=3, stride=2, pad=0)
        h = F.dropout(F.relu(self.fc6(h)), train=train, ratio=0.5)
        h = F.dropout(F.relu(self.fc7(h)), train=train, ratio=0.5)
        h2 = F.relu(self.doc_fc1(doc))
        h2 = F.relu(self.doc_fc2(h2))
        b = F.relu(self.bi1(h, h2))
        y = self.fc8(b)
        if train:
            return F.softmax_cross_entropy(y, t)
        else:
            return F.accuracy(y, t)
예제 #11
0
    def mini_batch_test(self, x_test, t_test, mini_batch_size = 10):
        sum_loss = 0
        sum_accuracy = 0

        test_count = len(x_test)
        #test_count = 20 # DEBUG
    
        for i in range(0, test_count, mini_batch_size):
            print i
            x_batch = []
            t_batch = []
            for j in range(i, i + mini_batch_size):
                x_batch.append(x_test[j])
                t_batch.append(t_test[j])
            x_batch = numpy.array(x_batch, dtype=numpy.float32)
            t_batch = numpy.array(t_batch, dtype=numpy.int32)

            x = chainer.Variable(x_batch)
            t = chainer.Variable(t_batch)
            h = self.forward(x)
            e = self.loss(h, t)
            a = F.accuracy(h, t)
        
            sum_loss += float(e.data) * len(t_batch)
            sum_accuracy += float(a.data) * len(t_batch)

        test_loss = sum_loss / test_count
        test_accuracy = sum_accuracy / test_count
        return test_loss, test_accuracy
예제 #12
0
def forward(x_data, y_data, train=True):
    """ 順伝搬の処理を定義 """
    # train ... 学習フラグ(Falseにすると学習しない)
    # 訓練時は,dropoutを実行
    # テスト時は,dropoutを無効

    # 入力と教師データ
    # データ配列は,Chainer.Variable型にしないといけない
    x, t = chainer.Variable(x_data), chainer.Variable(y_data)
    # 隠れ層1の出力
    h1 = F.dropout(F.relu(model.l1(x)), train=train)
    # 隠れ層2の出力
    h2 = F.dropout(F.relu(model.l2(h1)), train=train)
    # 出力層の出力
    y = model.l3(h2)

    # 訓練時とテスト時で返す値を変える
    # y ... ネットワークの出力(仮説)
    # t ... 教師データ
    if train:   # 訓練
        # 誤差を返す
        # 多クラス分類なので,誤差関数としてソフトマックス関数の
        # クロスエントロピー関数を使う
        loss = F.softmax_cross_entropy(y, t)
        return loss
    else:   # テスト
        # 精度を返す
        acc = F.accuracy(y, t)
        return acc
예제 #13
0
 def forward(self, x_data, y_data):
     x = Variable(x_data)
     t = Variable(y_data)
     y = F.sigmoid(self.transform(x))
     loss = F.softmax_cross_entropy(y, t)
     accuracy = F.accuracy(y, t)
     return loss, accuracy
예제 #14
0
파일: experiments.py 프로젝트: kzky/works
 def test(self, x_l, y_l):
     y = self.mlp_enc(x_l, test=True)
     acc = F.accuracy(y, y_l)
     losses = self.forward_for_losses(x_l, y_l, None, test=True)  # only measure x_l
     supervised_loss = losses[0]
     recon_loss = losses[1]
     return acc, supervised_loss, recon_loss
예제 #15
0
def forward(x_data, y_data, train=True):
    x_0 = chainer.Variable(cuda.to_gpu(x_data, 0), volatile=not train)
    x_1 = chainer.Variable(cuda.to_gpu(x_data, 1), volatile=not train)
    t = chainer.Variable(cuda.to_gpu(y_data, 0), volatile=not train)

    h1_0 = F.dropout(F.relu(model.gpu0.l1(x_0)),  train=train)
    h1_1 = F.dropout(F.relu(model.gpu1.l1(x_1)),  train=train)

    h2_0 = F.dropout(F.relu(model.gpu0.l2(h1_0)), train=train)
    h2_1 = F.dropout(F.relu(model.gpu1.l2(h1_1)), train=train)

    h3_0 = F.dropout(F.relu(model.gpu0.l3(h2_0)), train=train)
    h3_1 = F.dropout(F.relu(model.gpu1.l3(h2_1)), train=train)

    # Synchronize
    h3_0 += F.copy(h3_1, 0)
    h3_1 = F.copy(h3_0, 1)

    h4_0 = F.dropout(F.relu(model.gpu0.l4(h3_0)), train=train)
    h4_1 = F.dropout(F.relu(model.gpu1.l4(h3_1)), train=train)

    h5_0 = F.dropout(F.relu(model.gpu0.l5(h4_0)),  train=train)
    h5_1 = F.dropout(F.relu(model.gpu1.l5(h4_1)),  train=train)

    h6_0 = F.relu(model.gpu0.l6(h5_0))
    h6_1 = F.relu(model.gpu1.l6(h5_1))

    # Synchronize
    y = h6_0 + F.copy(h6_1, 0)
    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
예제 #16
0
    def __call__(self, x, t=None):
        h = F.relu(self.bn1_1(self.conv1_1(x), test=not self.train))
        h = F.dropout(h, ratio=0.3, train=self.train)
        h = F.relu(self.bn1_2(self.conv1_2(h), test=not self.train))
        h = F.max_pooling_2d(h, 3, stride=3)

        h = F.relu(self.bn2_1(self.conv2_1(h), test=not self.train))
        h = F.dropout(h, ratio=0.4, train=self.train)
        h = F.relu(self.bn2_2(self.conv2_2(h), test=not self.train))
        h = F.max_pooling_2d(h, 3, stride=3)

        h = F.relu(self.bn3_1(self.conv3_1(h), test=not self.train))
        h = F.dropout(h, ratio=0.4, train=self.train)
        h = F.relu(self.bn3_2(self.conv3_2(h), test=not self.train))
        h = F.dropout(h, ratio=0.4, train=self.train)
        h = F.max_pooling_2d(h, 3, stride=3)

        h = F.dropout(h, ratio=0.5, train=self.train)
        h = F.relu(self.bn4(self.fc4(h), test=not self.train))
        h = F.dropout(h, ratio=0.5, train=self.train)
        h = F.relu(self.bn5(self.fc5(h), test=not self.train))
        h = F.dropout(h, ratio=0.5, train=self.train)
        h = self.fc6(h)
        self.y = h

        if t is not None:
            self.loss = F.softmax_cross_entropy(h, t)
            self.accuracy = F.accuracy(h, t)
            return self.loss

        self.pred = F.softmax(self.y)
        return self.pred
    def _train_linear_classifier(self, model, optimizer, gpu):
        def _make_label(x):
            a = (numpy.dot(x, self.w) + self.b).reshape((self.BATCH_SIZE, ))
            t = numpy.empty_like(a).astype(numpy.int32)
            t[a >= 0] = 0
            t[a < 0] = 1
            return t

        def _make_dataset(batch_size, unit_num, gpu, dtype):
            x_data = numpy.random.uniform(
                -1, 1, (batch_size, unit_num)).astype(dtype)
            t_data = _make_label(x_data)
            if gpu:
                x_data = cuda.to_gpu(x_data)
                t_data = cuda.to_gpu(t_data)
            x = chainer.Variable(x_data)
            t = chainer.Variable(t_data)
            return x, t

        for _ in six.moves.range(self.EPOCH):
            x, t = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu,
                                 self.dtype)
            model.cleargrads()
            y = model(x)
            loss = F.softmax_cross_entropy(y, t)
            loss.backward()
            optimizer.update()

        x_test, t_test = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu,
                                       self.dtype)
        y_test = model(x_test)
        return F.accuracy(y_test, t_test)
예제 #18
0
def forward(x_data, y_data, train=True):
    # Neural net architecture
    x, t = chainer.Variable(x_data), chainer.Variable(y_data)
    h1 = F.dropout(F.relu(model.l1(x)), train=train)
    h2 = F.dropout(F.relu(model.l2(h1)), train=train)
    y = model.l3(h2)
    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
예제 #19
0
    def forward(self, x_data, y_data, train=True, gpu=-1):

        if gpu >= 0:
            x_data = cuda.to_gpu(x_data)
            y_data = cuda.to_gpu(y_data)

        x, t = Variable(x_data), Variable(y_data)
        h = F.max_pooling_2d(F.relu(self.conv1(x)), ksize=2, stride=2) # padding=0
        h = F.max_pooling_2d(F.relu(self.conv2(h)), ksize=3, stride=3)
        # h = F.spatial_pyramid_pooling_2d(F.relu(self.conv2(h)), 3, F.MaxPooling2D)
        h = F.dropout(F.relu(self.l3(h)), train=train)
        y = self.l4(h)


        if train == False: # 評価時にのみ以下を実行
            cnt = 0
            missid = []

            for ydata in y.data:
                # ファイル出力して確認するなら
                # fp_.write(str(np.argmax(ydata)))
                # fp_.write(' ')

                if y_data[cnt] != np.argmax(ydata):
                    # 識別に失敗したデータを出力する処理.
                    missid.append(glob_z_test[z_batch[cnt]])

                cnt += 1

            glob_all_missid.extend(missid)
                # 全バッチにおいて識別失敗した id を格納

        return F.softmax_cross_entropy(y, t), F.accuracy(y,t)
예제 #20
0
def forward(x_data, y_data, train=True):
    # Neural net architecture
    x, t = chainer.Variable(x_data), chainer.Variable(y_data)
    h = F.max_pooling_2d(F.dropout(F.relu(model.bn2(model.cv1(x))),  train=train),2)
    h = F.dropout(F.relu(model.ln3(h)), train=train)
    y = model.ln4(h)
    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
예제 #21
0
    def forward(self, x_data, y_data, train=True):
        x = chainer.Variable(x_data, volatile=not train)
        t = chainer.Variable(y_data, volatile=not train)

        h = F.max_pooling_2d(
            F.relu(self.norm1(self.conv1(x))),  3, stride=2, pad=1)
        h = F.max_pooling_2d(
            F.relu(self.norm2(self.conv2(h))), 3, stride=2, pad=1)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = self.inc3c(h)
        h = self.inc4a(h)

        a = F.average_pooling_2d(h, 5, stride=3)
        a = F.relu(self.norma(self.conva(a)))
        a = F.relu(self.norma2(self.lina(a)))
        a = self.outa(a)
        a = F.softmax_cross_entropy(a, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        b = F.average_pooling_2d(h, 5, stride=3)
        b = F.relu(self.normb(self.convb(b)))
        b = F.relu(self.normb2(self.linb(b)))
        b = self.outb(b)
        b = F.softmax_cross_entropy(b, t)

        h = self.inc4e(h)
        h = self.inc5a(h)
        h = F.average_pooling_2d(self.inc5b(h), 7)
        h = self.out(h)
        return 0.3 * (a + b) + F.softmax_cross_entropy(h, t), F.accuracy(h, t)
예제 #22
0
def forward(x_data, y_data):
  x = Variable(x_data)
  t = Variable(y_data)
  h1 = F.relu(model.l1(x))
  h2 = F.relu(model.l2(h1))
  y = model.l3(h2)
  return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
예제 #23
0
파일: experiments.py 프로젝트: kzky/works
    def test(self, x_l, y_l):
        y = F.softmax(self.mlp_enc(x_l, test=True))
        y_argmax = F.argmax(y, axis=1)
        acc = F.accuracy(y, y_l)
        y_l_cpu = cuda.to_cpu(y_l.data)
        y_argmax_cpu = cuda.to_cpu(y_argmax.data)

        # Confuction Matrix
        cm = confusion_matrix(y_l_cpu, y_argmax_cpu)
        print(cm)

        # Wrong samples
        idx = np.where(y_l_cpu != y_argmax_cpu)[0]
        #print(idx.tolist())

        # Generate and Save
        x_rec = self.mlp_dec(y, test=True)
        save_incorrect_info(x_rec.data[idx, ], x_l.data[idx, ],
                            y.data[idx, ], y_l.data[idx, ])

        # Save model
        serializers.save_hdf5("./model/mlp_encdec.h5py", self.model)

        loss = self.forward_for_losses(x_l, y_l, None, test=True)  # only measure x_l
        supervised_loss = loss
        return acc, supervised_loss
예제 #24
0
 def __call__(self, x, t,train = True):
     y, initial_V_concat = self.predictor(x,train = train)
     self.loss = F.softmax_cross_entropy(y, t)
     #self.loss = F.hinge(y, t, norm='L2')
     self.accuracy = F.accuracy(y, t)
     self.initial_V_concat = initial_V_concat
     return self.loss
예제 #25
0
    def __call__(self, x, y, t):
        self.clear()
        hR = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.convR1(x))), 3, stride=2)
        hR = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.convR2(hR))), 3, stride=2)
        hR = F.relu(self.convR3(hR))
        hR = F.relu(self.convR4(hR))
        hR = F.max_pooling_2d(F.relu(self.convR5(hR)), 3, stride=2)
        hR = F.dropout(F.relu(self.fcR6(hR)), train=self.train)
        hR = F.dropout(F.relu(self.fcR7(hR)), train=self.train)
        hD = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.convD1(y))), 3, stride=2)
        hD = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.convD2(hD))), 3, stride=2)
        hD = F.relu(self.convD3(hD))
        hD = F.relu(self.convD4(hD))
        hD = F.max_pooling_2d(F.relu(self.convD5(hD)), 3, stride=2)
        hD = F.dropout(F.relu(self.fcD6(hD)), train=self.train)
        hD = F.dropout(F.relu(self.fcD7(hD)), train=self.train)
        h = F.dropout(F.relu(self.fc8(hR, hD)), train=self.train)
        h = self.fc9(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
예제 #26
0
파일: main.py 프로젝트: sweetrabh/testeeg
    def forward_eye_states(self, x_batch_curr, y_batch_curr, volatile):

        current_sample = Variable(x_batch_curr, volatile=volatile)

        y_batch_curr = np.asarray(y_batch_curr).reshape(32, -1)
        current_output = Variable(y_batch_curr, volatile=volatile)

        h1_current = F.sigmoid(self.model_to_use.x_h1(current_sample))

        h2_current = F.sigmoid(self.model_to_use.h1_h2(h1_current))

        h3_current = F.sigmoid(self.model_to_use.h2_h3(h2_current))

        h4_current = F.sigmoid(self.model_to_use.h3_h4(h3_current))

        h4 = h4_current
        y = self.model_to_use.h4_y(h4)

        y.data = y.data.reshape(32, -1)
        loss = F.sigmoid_cross_entropy(y, current_output)
        current_output.data = np.squeeze(current_output.data)

        accuracy = F.accuracy(y, current_output)

        return accuracy, loss, y
예제 #27
0
파일: seq2seq.py 프로젝트: re53min/TOHO_AI
    def __call__(self, x, y):
        """

        :param x: ミニバッチの入力データ
        :param y: 入力データに対応するミニバッチの出力
        :return: 誤差
        """

        batch_size = len(x)
        eos = self.xp.array([EOS], dtype='int32')

        # EOS信号の埋め込み
        y_in = [F.concat((eos, tmp), axis=0) for tmp in y]
        y_out = [F.concat((tmp, eos), axis=0) for tmp in y]

        # Embedding Layer
        emb_x = [self.x_embed(tmp) for tmp in x]
        emb_y = [self.y_embed(tmp) for tmp in y_in]

        # Encoder, Decoderへの入力
        h, c, a = self.encoder(None, None, emb_x)  # h => hidden, c => cell, a => output(Attention)
        _, _, dec_hs = self.decoder(h, c, emb_y)  # dec_hs=> output

        # Output Layerの計算
        loss = 0
        accuracy = 0
        for dec_h, t, attention in zip(dec_hs, y_out, a):
            # o = self.y(dec_h)
            o = self.global_attention_layer(dec_h, attention)  # Attention Layerの計算
            loss += F.softmax_cross_entropy(o, t)  # 誤差計算
            accuracy += F.accuracy(o, t)  # 精度計算
        loss /= batch_size
        accuracy /= batch_size

        return loss, accuracy
 def __call__(self, x, t):
     h = self.predict_proba(x)
     self.loss = F.softmax_cross_entropy(h, t)
     #self.accuracy = self.calculate_accuracy(h, t)
     self.accuracy = F.accuracy(h, t, ignore_label=-1)
     self.IoU = self.calculate_intersection_of_union(h, t)
     return self.loss
예제 #29
0
    def __call__(self, x, t):
        h = F.relu(self.bn1_1(self.conv1_1(x), test=not self.train))
        h = F.relu(self.bn1_2(self.conv1_2(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.dropout(h, ratio=0.25, train=self.train)

        h = F.relu(self.bn2_1(self.conv2_1(h), test=not self.train))
        h = F.relu(self.bn2_2(self.conv2_2(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.dropout(h, ratio=0.25, train=self.train)

        h = F.relu(self.bn3_1(self.conv3_1(h), test=not self.train))
        h = F.relu(self.bn3_2(self.conv3_2(h), test=not self.train))
        h = F.relu(self.bn3_3(self.conv3_3(h), test=not self.train))
        h = F.relu(self.bn3_4(self.conv3_4(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.dropout(h, ratio=0.25, train=self.train)

        h = F.dropout(F.relu(self.fc4(h)), ratio=0.5, train=self.train)
        h = F.dropout(F.relu(self.fc5(h)), ratio=0.5, train=self.train)
        h = self.fc6(h)

        self.pred = F.softmax(h)
        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(self.pred, t)

        if self.train:
            return self.loss
        else:
            return self.pred
예제 #30
0
	def forward(self, x_data, y_data, train=True, gpu=-1):
		x, t = Variable(x_data), Variable(y_data)
		h = F.max_pooling_2d(F.relu(self.conv1(x)), ksize=2, stride=2)
		h = F.max_pooling_2d(F.relu(self.conv2(h)), ksize=3, stride=3)
		h = F.dropout(F.relu(self.l3(h)), train=train)
		y = self.l4(h)
		return F.softmax_cross_entropy(y, t), F.accuracy(y,t)
예제 #31
0
    def __call__(self, x, t):
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv1(x))),
                             3,
                             stride=2)
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv2(h))),
                             3,
                             stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)))
        h = F.dropout(F.relu(self.fc7(h)))
        h = self.fc8(h)

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
예제 #32
0
def test(net,
         val_iterator,
         val_dataset_len,
         num_gpus,
         calc_weight_count=False,
         extended_log=False):
    tic = time.time()

    predictor = CIFARPredictor(base_model=net)

    if num_gpus > 0:
        predictor.to_gpu()

    if calc_weight_count:
        weight_count = net.count_params()
        logging.info('Model: {} trainable parameters'.format(weight_count))

    in_values, out_values, rest_values = apply_to_iterator(
        predictor.predict,
        val_iterator,
        hook=ProgressHook(val_dataset_len))
    del in_values

    pred_probs, = out_values
    gt_labels, = rest_values

    y = np.array(list(pred_probs))
    t = np.array(list(gt_labels))

    acc_val_value = F.accuracy(
        y=y,
        t=t).data
    err_val = 1.0 - acc_val_value

    if extended_log:
        logging.info('Test: err={err:.4f} ({err})'.format(
            err=err_val))
    else:
        logging.info('Test: err={err:.4f}'.format(
            err=err_val))
    logging.info('Time cost: {:.4f} sec'.format(
        time.time() - tic))
예제 #33
0
파일: test_loss.py 프로젝트: weiwchu/espnet
def test_train_acc():
    pytest.importorskip("torch")
    import torch

    from e2e_asr_attctc_th import pad_list
    from e2e_asr_attctc_th import th_accuracy

    n_out = 7
    _eos = n_out - 1
    n_batch = 3
    label_length = numpy.array([4, 2, 3], dtype=numpy.int32)
    np_pred = numpy.random.rand(n_batch,
                                max(label_length) + 1,
                                n_out).astype(numpy.float32)
    # NOTE: 0 is only used for CTC, never appeared in attn target
    np_target = [
        numpy.random.randint(1, n_out - 1, size=ol, dtype=numpy.int32)
        for ol in label_length
    ]

    eos = numpy.array([_eos], 'i')
    ys_out = [F.concat([y, eos], axis=0) for y in np_target]

    # padding for ys with -1
    # pys: utt x olen
    # NOTE: -1 is default ignore index for chainer
    pad_ys_out = F.pad_sequence(ys_out, padding=-1)
    y_all = F.reshape(np_pred, (n_batch * (max(label_length) + 1), n_out))
    ch_acc = F.accuracy(y_all, F.concat(pad_ys_out, axis=0), ignore_label=-1)

    # NOTE: this index 0 is only for CTC not attn. so it can be ignored
    # unfortunately, torch cross_entropy does not accept out-of-bound ids
    th_ignore = 0
    th_pred = torch.autograd.Variable(torch.from_numpy(y_all.data))
    th_ys = [
        torch.autograd.Variable(torch.from_numpy(numpy.append(t, eos))).long()
        for t in np_target
    ]
    th_target = pad_list(th_ys, th_ignore)
    th_acc = th_accuracy(th_pred, th_target, th_ignore)

    numpy.testing.assert_allclose(ch_acc.data, th_acc)
예제 #34
0
def inception_accuracy(model,
                       ims,
                       labels,
                       batch_size=100,
                       splits=10,
                       lab_range=None):
    """Compute the inception score for given images.

     Default batch_size is 100 and split size is 10. Please refer to the
     official implementation. It is recommended to to use at least 50000
     images to obtain a reliable score.

     Reference:
     https://github.com/openai/improved-gan/blob/master/inception_score/model.py

     """
    if isinstance(ims, (list, tuple)):
        ims_list = ims
        ys_list = []
        for ims in ims_list:
            n, c, w, h = ims.shape
            n_batches = int(math.ceil(float(n) / float(batch_size)))
            xp = model.xp

            print('batch_size:{}, n_ims{}, n_batches{}'.format(
                batch_size, n, n_batches))
            print('Calculating inception accuracy...')
            ys = inception_forward(model, ims, batch_size)
            ys_list.append(ys)
        ys = sum(ys_list) / len(ys_list)
    else:
        n, c, w, h = ims.shape
        n_batches = int(math.ceil(float(n) / float(batch_size)))
        xp = model.xp

        print('batch_size:{}, n_ims{}, n_batches{}'.format(
            batch_size, n, n_batches))
        print('Calculating inception accuracy...')
        ys = inception_forward(model, ims, batch_size)
    if lab_range is not None:
        ys = ys[np.arange(len(ys)), lab_range]
    return F.accuracy(ys, labels).data
예제 #35
0
    def __call__(self, x, t):
        test = not self.train

        h = F.max_pooling_2d(F.relu(self.norm1(self.conv1(x), test=test)),
                             3,
                             stride=2,
                             pad=1)
        h = F.max_pooling_2d(F.relu(self.norm2(self.conv2(h), test=test)),
                             3,
                             stride=2,
                             pad=1)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = self.inc3c(h)
        h = self.inc4a(h)

        a = F.average_pooling_2d(h, 5, stride=3)
        a = F.relu(self.norma(self.conva(a), test=test))
        a = F.relu(self.norma2(self.lina(a), test=test))
        a = self.outa(a)
        self.loss1 = F.softmax_cross_entropy(a, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        b = F.average_pooling_2d(h, 5, stride=3)
        b = F.relu(self.normb(self.convb(b), test=test))
        b = F.relu(self.normb2(self.linb(b), test=test))
        b = self.outb(b)
        self.loss2 = F.softmax_cross_entropy(b, t)

        h = self.inc4e(h)
        h = self.inc5a(h)
        h = F.average_pooling_2d(self.inc5b(h), 7)
        h = self.out(h)
        self.loss3 = F.softmax_cross_entropy(h, t)

        self.loss = 0.3 * (self.loss1 + self.loss2) + self.loss3
        self.accuracy = F.accuracy(h, t)
        return self.loss
예제 #36
0
def evaluate(net, dataset, batch_size, device=None):
    # データを1回ずつ使用する場合はrepeat=Falseにする
    # そうしないと`for batch in iterator`が終了しない
    iterator = chainer.iterators.SerialIterator(dataset, batch_size, repeat=False, shuffle=False)
    loss_sum = 0
    acc_sum = 0
    num = 0
    for batch in iterator:
        raw_x, raw_t = convert.concat_examples(batch, device)
        # backpropagationは必要ないのでvolatileをTrueにする
        x = chainer.Variable(raw_x, volatile=True)
        t = chainer.Variable(raw_t, volatile=True)
        y = net(x)
        loss = F.softmax_cross_entropy(y, t)
        acc = F.accuracy(y, t)
        n = len(raw_x)
        loss_sum += float(loss.data) * n
        acc_sum += float(acc.data) * n
        num += n
    return loss_sum / num, acc_sum / num
예제 #37
0
    def __call__(self, x, t):
        self.clear()
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv1(x))),
                             3,
                             stride=2)
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv2(h))),
                             3,
                             stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
예제 #38
0
    def __call__(self, x, t):
        #h = F.relu(self.conv1(x))
        #h = F.relu(self.conv2(h))
        #h = F.relu(self.conv3(h))
        #h = F.relu(self.conv4(h))
        #h = F.dropout(F.leaky_relu(self.fc1(x),slope=0.1), train=self.train)
        h = F.dropout(F.sigmoid(self.fc1(x)), train=self.train, ratio=.7)
        #h = F.dropout(F.relu(self.fc2(h)), train=self.train)

        #h = F.dropout(F.relu(self.fc2(h)), train=self.train)
        #h = F.dropout(F.sigmoid(self.fc2(h)), train=self.train)
        #h = F.dropout(F.leaky_relu(self.fc3(h),0.3), train=self.train,ratio=.7)
        h = F.dropout(F.sigmoid(self.fc3(h)), train=self.train, ratio=.7)
        #h = F.dropout(F.relu(self.fc4(h)), train=self.train,ratio=.7)
        h = F.dropout(F.leaky_relu(self.fc4(h)), train=self.train, ratio=.7)
        h = self.fc_last(h)
        self.pre = F.softmax(h)
        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
예제 #39
0
def validation_classifer(_net, _iter, logger):
    """ 分類問題の validation
    """
    import chainer
    import chainer.functions as F
    import chainer.links as L
    from chainer.dataset import concat_examples

    logger.log(_tag="Report", _msg="Validation of training model.")

    _iter.reset()
    _batch = _iter.next()
    x, t = concat_examples(_batch)

    _model = L.Classifier(_net)

    accuracy = F.accuracy(_net(x), t).data
    loss = _model(x, t).data
    logging(_tag="Result",
            _msg="Loss: %1.2f, Accuracy: %1.2f" % (loss, accuracy))
예제 #40
0
파일: MLP.py 프로젝트: shizuo-kaji/LC_QSPR
    def __call__(self, x, t=0):
#        h = self['norm{}'.format(0)](x)
        h = self['layer{}'.format(0)](x)
        h = F.dropout(self.activ(h),ratio=self.dropout_ratio)
        for i in range(1,self.layers):
            h = self['norm{}'.format(i)](h)
            h = F.dropout(self.activ(self['layer{}'.format(i)](h)),ratio=self.dropout_ratio)
            #h = F.dropout(self.activ(self.bn(self.l2(h))))
        h = self['fin_layer'](h)
        if chainer.config.train:
            if self.out_ch > 1:    # classification
                loss = F.softmax_cross_entropy(h, t)
                chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
            else:   #regression
                loss = F.mean_squared_error(t, h)
                MAE = self.std*F.mean_absolute_error(t,h)
                chainer.report({'loss': loss}, self)
                chainer.report({'MAE': MAE}, self)
            return loss
        return h
예제 #41
0
    def __call__(self, ws, cs, ls, ts):
        h_w = self.emb_word(ws)  #_(batchsize, windowsize, word_dim)
        h_c = self.emb_char(
            cs)  # (batchsize, windowsize, max_char_len, char_dim)
        batchsize, windowsize, _, _ = h_c.data.shape
        # (batchsize, windowsize, char_dim)
        h_c = F.sum(h_c, 2)
        h_c, ls = F.broadcast(h_c, F.reshape(ls, (batchsize, windowsize, 1)))
        h_c = h_c / ls
        h = F.concat([h_w, h_c], 2)
        h = F.reshape(h, (batchsize, -1))
        # ys = self.linear1(h)
        h = F.relu(self.linear1(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        ys = self.linear2(h)

        loss = F.softmax_cross_entropy(ys, ts)
        acc = F.accuracy(ys, ts)
        chainer.report({"loss": loss, "accuracy": acc}, self)
        return loss
예제 #42
0
파일: alexnet.py 프로젝트: ankahira/paraDL
    def forward(self, x, t):
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, ksize=3, stride=2)
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, ksize=3, stride=2)

        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.relu(self.conv5(h))
        h = F.max_pooling_2d(h, ksize=3, stride=2)

        h = F.relu(self.fc6(h))
        h = F.dropout(h, ratio=0.5)
        h = F.relu(self.fc7(h))
        h = F.dropout(h, ratio=0.5)
        h = self.fc8(h)

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
예제 #43
0
    def __call__(self, x, t=None):
        h = F.local_response_normalization(self.conv1(x))
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = F.local_response_normalization(self.conv2(h))
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)))
        h = F.dropout(F.relu(self.fc7(h)))
        h = self.fc8(h)

        self.pred = F.softmax(h)
        if t is None:
            assert not chainer.config.train
            return

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
예제 #44
0
def TrainBatch(train_data_size, batchsize, train_iter, device, model, opt):
    train_loss = 0
    train_acc = 0
    cnt = 0
    for i in range(0, train_data_size, batchsize):
        train_batch = train_iter.next()
        x, t = chainer.dataset.concat_examples(train_batch, device)
        model.cleargrads()
        y = model(x)
        loss = F.softmax_cross_entropy(y, t)
        train_loss += loss.array
        acc = F.accuracy(y, t)
        train_acc += acc.array
        loss.backward()
        opt.update()
        cnt += 1
    avg_train_loss = train_loss / train_data_size
    avg_train_acc = train_acc / cnt
    return chainer.cuda.to_cpu(avg_train_loss), chainer.cuda.to_cpu(
        avg_train_acc)
예제 #45
0
    def __call__(self, x, t, train=True, finetune=False):
        h = x

        # First conv layer
        h = self[0](h)

        # Residual blocks
        for i in range(1, len(self) - 2):
            h = self[i](h, train, finetune)

        # BN, relu, pool, final layer
        h = self[-2](h)
        h = F.relu(h)
        n, nc, ns, nx, ny = h.data.shape
        h = F.reshape(h, (n, nc * ns, nx, ny))
        h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
        h = self[-1](h)
        h = F.reshape(h, h.data.shape[:2])

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
예제 #46
0
    def __call__(self, ws, ss, ps, ts):
        """
        xs [(w,s,p,y), ..., ]
        w: word, s: suffix, p: prefix, y: label
        """
        batchsize, length = ts.shape
        ys = self.forward(ws, ss, ps)[1:-1]
        ts = [
            F.squeeze(x, 0) for x in F.split_axis(F.transpose(ts), length, 0)
        ]
        loss = reduce(lambda x, y: x + y,
                      [F.softmax_cross_entropy(y, t) for y, t in zip(ys, ts)])

        acc = reduce(
            lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(ys, ts)])

        acc /= length
        chainer.report({"loss": loss, "accuracy": acc}, self)
        return loss
예제 #47
0
    def update_core(self):
        optimizer = self.get_optimizer("main")

        label_batch = self.get_iterator('main').next()
        x_labeled, true_label = zip(*label_batch)
        x_labeled = self.xp.asarray(x_labeled).astype("f")

        y_predict = self.net(x_labeled)

        true_label = self.xp.array(true_label).astype("int8")
        loss = F.softmax_cross_entropy(y_predict, true_label)

        accuracy = F.accuracy(y_predict.data, true_label)
        chainer.reporter.report({'train/acc': accuracy})

        self.net.cleargrads()
        loss.backward()
        optimizer.update()

        chainer.reporter.report({'train/loss': loss})
예제 #48
0
def forward_data(model, x_data, y_data, train=True):
    """
    @param model: NNの構造モデルオブジェクト
    @param x_data: 特徴量
    @param y_data: 教師信号
    """
    #データをnumpy配列からChainerのVariableという型(クラス)のオブジェクトに変換して使わないといけない
    x, t = Variable(x_data), Variable(y_data)

    #ドロップアウトでオーバーフィッティングを防止
    h1 = F.dropout(F.relu(model.l1(x)), ratio=0.4, train=train)
    h2 = F.dropout(F.relu(model.l2(h1)), ratio=0.5, train=train)

    y = model.l3(h2)

    # 多クラス分類なので誤差関数としてソフトマックス関数の
    # 交差エントロピー関数を用いて、誤差を導出

    #F.accuracy()は識別率を算出
    return F.softmax_cross_entropy(y, t), F.accuracy(y, t), F.softmax(y)
예제 #49
0
 def __call__(self, x_input, query, answer, train=True):
     m = self.encode_input(x_input)  # memory for input
     u = self.encode_query(query)  # memory for query
     c = self.encode_output(x_input)  # memory for output
     #		print "m.data.shape", m.data.shape		# (50,20)
     #		print "u.data.shape", u.data.shape		# (1,20)
     # mとuの内積
     mu = F.matmul(m, u, transb=True)  # mを転置して内積をとる
     # 文の重要度p(アテンション)
     p = F.softmax(mu)
     #		print p.data.shape		# (50,1)
     #		print c.data.shape		# (50,20)
     o = F.matmul(p, c, transa=True)  # cとpのweighted sum
     #		print o.data.shape		# (1,20)
     predict = self.W(u + o)
     #		print "answer.shape,predict.shape:", answer.shape,predict.data.shape
     if train:
         return F.softmax_cross_entropy(predict, answer)
     else:
         return F.accuracy(predict, answer)
예제 #50
0
    def val(self):
        with chainer.using_config('train',False):
            val_acc = 0
            for batch in self.val_iter:
                x_array, t_array = chainer.dataset.concat_examples(batch)
                if self.opt.nCrops > 1:
                    x_array = x_array.reshape((x_array.shape[0] * self.opt.nCrops, x_array.shape[2]))
                x = chainer.Variable(cuda.to_gpu(x_array[:, None, None, :]))
                t = chainer.Variable(cuda.to_gpu(t_array))
                with chainer.no_backprop_mode():
                    y = F.softmax(self.model(x))
                    y = F.reshape(y, (y.shape[0] // self.opt.nCrops, self.opt.nCrops, y.shape[1]))
                    y = F.mean(y, axis=1)
                    acc = F.accuracy(y, t)
                    val_acc += float(acc.data) * len(t.data)

            self.val_iter.reset()
        val_top1 = 100 * (1 - val_acc / len(self.val_iter.dataset))

        return val_top1
예제 #51
0
    def __call__(self, x, t):
        h = F.relu(self.bn1_1(self.conv1_1(x), test=not self.train))
        h = F.relu(self.bn1_2(self.conv1_2(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.dropout(h, ratio=0.25, train=self.train)

        h = F.relu(self.bn2_1(self.conv2_1(h), test=not self.train))
        h = F.relu(self.bn2_2(self.conv2_2(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.dropout(h, ratio=0.25, train=self.train)

        h = F.relu(self.bn3_1(self.conv3_1(h), test=not self.train))
        h = F.relu(self.bn3_2(self.conv3_2(h), test=not self.train))
        h = F.relu(self.bn3_3(self.conv3_3(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.dropout(h, ratio=0.25, train=self.train)

        h = F.relu(self.bn4_1(self.conv4_1(h), test=not self.train))
        h = F.relu(self.bn4_2(self.conv4_2(h), test=not self.train))
        h = F.relu(self.bn4_3(self.conv4_3(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.dropout(h, ratio=0.25, train=self.train)

        h = F.relu(self.bn5_1(self.conv5_1(h), test=not self.train))
        h = F.relu(self.bn5_2(self.conv5_2(h), test=not self.train))
        h = F.relu(self.bn5_3(self.conv5_3(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.dropout(h, ratio=0.25, train=self.train)

        h = F.dropout(F.relu(self.fc4(h)), ratio=0.5, train=self.train)
        h = F.dropout(F.relu(self.fc5(h)), ratio=0.5, train=self.train)
        h = self.fc6(h)

        self.pred = F.softmax(h)
        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(self.pred, t)

        if self.train:
            return self.loss
        else:
            return self.pred
예제 #52
0
    def __call__(self, x, t):
        h = F.max_pooling_2d(F.leaky_relu(self.conv1(x)), 2, 2)
        h = F.max_pooling_2d(F.leaky_relu(self.conv2(h)), 2, 2)
        h = F.leaky_relu(self.conv3(h))
        h = F.leaky_relu(self.conv4(h))
        h = F.leaky_relu(self.conv5(h))
        h = F.max_pooling_2d(F.leaky_relu(self.conv6(h)), 2, 2)

        h = F.leaky_relu(self.conv7(h))
        h = F.leaky_relu(self.conv8(h))
        h = F.leaky_relu(self.conv9(h))
        h = F.leaky_relu(self.conv10(h))
        h = F.leaky_relu(self.conv11(h))
        h = F.leaky_relu(self.conv12(h))
        h = F.leaky_relu(self.conv13(h))
        h = F.leaky_relu(self.conv14(h))
        h = F.leaky_relu(self.conv15(h))
        h = F.max_pooling_2d(F.leaky_relu(self.conv16(h)), 2, 2)

        h = F.leaky_relu(self.conv17(h))
        h = F.leaky_relu(self.conv18(h))
        h = F.leaky_relu(self.conv19(h))

        if self.pre_train:
            h = F.average_pooling_2d(h, 2, 2)
            h = self.fc_pre(h)
            self.loss = F.softmax_cross_entropy(h, t)
            self.accuracy = F.accuracy(h, t)
            return self.loss
        else:
            h = F.leaky_relu(self.conv20(h))
            h = F.leaky_relu(self.conv21(h))
            h = F.leaky_relu(self.conv22(h))
            h = F.leaky_relu(self.conv23(h))
            h = F.leaky_relu(self.conv24(h))
            self.h = h
            h = F.leaky_relu(self.fc25(h))
            h = F.relu(self.fc26(h))
            #self.loss = self.loss_func(h, t)
            #self.accuracy = self.loss
            self.img = (x, h)
    def __call__(self, x, t):

        x1 = x[:, 0, :, :, :]
        x2 = x[:, 1, :, :, :]
        activations1 = self.cnn(x1, layers=self.layers)
        activations2 = self.cnn(x2, layers=self.layers)

        if len(self.logit) == 3:
            # for googlenet
            loss1_1, loss1_2, loss1_3 = [
                F.softmax_cross_entropy(activations1[layer], t)
                for layer in self.logit
            ]
            loss2_1, loss2_2, loss2_3 = [
                F.softmax_cross_entropy(activations2[layer], t)
                for layer in self.logit
            ]
            loss1 = 0.3 * (loss1_1 + loss1_2) + loss1_3
            loss2 = 0.3 * (loss2_1 + loss2_2) + loss2_3
        else:
            loss1 = F.softmax_cross_entropy(activations1[self.logit[0]], t)
            loss2 = F.softmax_cross_entropy(activations2[self.logit[0]], t)

        h = (activations1[self.logit[0]] + activations2[self.logit[0]]) / 2

        texture_feat1 = compact_bilinear_pooling(
            activations1[self.texture_layer], {
                'W1': self.W1,
                'W2': self.W2
            })
        texture_feat2 = compact_bilinear_pooling(
            activations2[self.texture_layer], {
                'W1': self.W1,
                'W2': self.W2
            })
        texture_loss = F.mean_squared_error(texture_feat1, texture_feat2)

        self.loss = loss1 + loss2 + texture_loss
        self.accuracy = F.accuracy(h, t)
        chainer.report({'loss': self.loss, 'acc': self.accuracy}, self)
        return self.loss
예제 #54
0
    def output_and_loss(self, h_block, t_block):
        batch, units, length = h_block.shape

        # Output (all together at once for efficiency)
        concat_logit_block = seq_func(self.output, h_block,
                                      reconstruct_shape=False)
        rebatch, _ = concat_logit_block.shape
        # Make target
        concat_t_block = t_block.reshape((rebatch))
        ignore_mask = (concat_t_block >= 0)
        n_token = ignore_mask.sum()
        normalizer = n_token  # n_token or batch or 1
        # normalizer = 1

        if not self.use_label_smoothing:
            loss = F.softmax_cross_entropy(concat_logit_block, concat_t_block)
            loss = loss * n_token / normalizer
        else:
            log_prob = F.log_softmax(concat_logit_block)
            broad_ignore_mask = self.xp.broadcast_to(
                ignore_mask[:, None],
                concat_logit_block.shape)
            pre_loss = ignore_mask * \
                log_prob[self.xp.arange(rebatch), concat_t_block]
            loss = - F.sum(pre_loss) / normalizer

        accuracy = F.accuracy(
            concat_logit_block, concat_t_block, ignore_label=-1)
        perp = self.xp.exp(loss.data * normalizer / n_token)

        # Report the Values
        reporter.report({'loss': loss.data * normalizer / n_token,
                         'acc': accuracy.data,
                         'perp': perp}, self)

        if self.use_label_smoothing:
            label_smoothing = broad_ignore_mask * \
                - 1. / self.n_target_vocab * log_prob
            label_smoothing = F.sum(label_smoothing) / normalizer
            loss = 0.9 * loss + 0.1 * label_smoothing
        return loss
예제 #55
0
def train_myLinear(n_epoch):
    # create save model dir
    save_dir = './myModel_Linear'
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)

    # setup model
    my_model = myLinear(k_num=10)  # k_numは0~9の10個
    optimizer = optimizers.Adam()
    optimizer.setup(my_model)
    # STEP 1---------------------------------------------------
    # load MNIST
    train, test = chainer.datasets.get_mnist()  #it takes time...
    train_num = len(train)
    test_num = len(test)
    # ---------------------------------------------------------
    for epoch in range(0, n_epoch):
        for i in range(0, train_num):
            # STEP 2 ----------------------------------------------
            input, target = train[
                i]  # img data(np.float32) for input, label(int) for target
            # reshape each datum
            input = input.reshape(1, input.shape[0])
            target = np.array([target], np.int32)
            # input to model
            output = my_model(input)
            # -----------------------------------------------------
            # STEP 3 ----------------------------------------------
            loss = F.softmax_cross_entropy(output, target)
            accuracy = F.accuracy(output, target)
            # backward model
            my_model.cleargrads()
            loss.backward()
            optimizer.update()
            # -----------------------------------------------------
            print("epoch:{} {}/{}".format(epoch + 1, i + 1, train_num))
            print("\t loss:{} accuracy:{}".format(loss.data, accuracy.data))

    # save trained model
    serializers.save_npz(
        '{}/epoch{}_myLinearmodel.npz'.format(save_dir, n_epoch), my_model)
예제 #56
0
    def __call__(self, x, t=None):
        h = x
        h = F.relu(self.conv1_1(h))
        h = F.relu(self.conv1_2(h))
        h = _max_pooling(h)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = _max_pooling(h)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        h = _max_pooling(h)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        h = _max_pooling(h)

        h = F.relu(self.conv5_1(h))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        h = _max_pooling(h)

        h = self.fc6(h)
        h = self.bn_fc6(h)
        h = F.relu(h)
        h = F.dropout(h, .5)
        h = self.fc7(h)
        h = self.bn_fc7(h)
        h = F.relu(h)
        h = F.dropout(h, .5)
        h = self.fc8(h)

        if t is None:
            return h
        else:
            loss = F.softmax_cross_entropy(h, t)
            chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
            return loss
예제 #57
0
    def __call__(self, x, t=None):
        h = F.relu(self.bn1_1(self.conv1_1(x)))
        h = F.relu(self.bn1_2(self.conv1_2(h)))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.bn2_1(self.conv2_1(h)))
        h = F.relu(self.bn2_2(self.conv2_2(h)))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.bn3_1(self.conv3_1(h)))
        h = F.relu(self.bn3_2(self.conv3_2(h)))
        h = F.relu(self.bn3_3(self.conv3_3(h)))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.bn4_1(self.conv4_1(h)))
        h = F.relu(self.bn4_2(self.conv4_2(h)))
        h = F.relu(self.bn4_3(self.conv4_3(h)))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.bn5_1(self.conv5_1(h)))
        h = F.relu(self.bn5_2(self.conv5_2(h)))
        h = F.relu(self.bn5_3(self.conv5_3(h)))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.dropout(F.relu(self.fc6(h)), ratio=0.5)
        h = F.dropout(F.relu(self.fc7(h)), ratio=0.5)
        h = self.fc8(h)
        fc8 = h

        self.pred = F.softmax(h)

        if t is None:
            assert not chainer.config.train
            return

        self.loss = F.softmax_cross_entropy(fc8, t)
        self.acc = F.accuracy(self.pred, t)

        chainer.report({'loss': self.loss, 'accuracy': self.acc}, self)

        return self.loss
예제 #58
0
파일: main.py 프로젝트: AhmedYounes94/WON
def forward(
        model, batch_sents, batch_labels,
        lmd, identity_penalty,
        train):
    ys, _ = model.forward(batch_sents, train=train) # T x (N,T)    
    ys = F.concat(ys, axis=0) # => (T*N, T)

    ts, M = utils.padding(batch_labels, head=True, with_mask=True) # => (N, T), (N, T)
    ts = ts.T # => (T, N)
    ts = ts.reshape(-1,) # => (T*N,)
    M = M[:,None,:] * M[:,:,None] # => (N, T, T)
    ts = utils.convert_ndarray_to_variable(ts, seq=False, train=train) # => (T*N,)
    M = utils.convert_ndarray_to_variable(M, seq=False, train=train) # => (N, T, T)

    loss = F.softmax_cross_entropy(ys, ts)
    acc = F.accuracy(ys, ts, ignore_label=-1)

    if identity_penalty:
        loss_id = loss_identity_penalty(ys, M, train=train)
        loss = loss + lmd * loss_id
    return loss, acc
예제 #59
0
    def _forward(self,
                 image_vec_batch: cupy.ndarray,
                 label_batch: np.ndarray,
                 train=True):
        """
        順方向計算実行。
        :param cupy.ndarray image_vec_batch:
        :param np.ndarray label_batch:
        :param bool train:
        :return:
        """
        x = self.xp.array(image_vec_batch).astype(np.float32)
        t = self.xp.array(label_batch).astype(np.int32)

        # gpu を使っていれば、cupy に変換される
        with chainer.using_config('train', train):
            with chainer.using_config('enable_backprop', train):
                y = self.model(x)
                loss = F.softmax_cross_entropy(y, t)
                accuracy = F.accuracy(y, t)
        return loss, accuracy
예제 #60
0
def forward(x_data, y_data, train=True):
    x, t = chainer.Variable(x_data), chainer.Variable(y_data)
    #1層目の畳み込みの後にプーリング層
    h1 = F.max_pooling_2d(F.relu(model.conv1(x)), 2)
    #2層目の畳み込みの後にプーリング層
    h2 = F.max_pooling_2d(F.relu(model.conv2(h1)), 2)
    #3層目の出力
    h3 = F.dropout(F.relu(model.l1(h2)), train=train)
    #yの出力
    y = model.l2(h3)

    # 訓練時とテスト時で返す値を変える
    if train:
        # 訓練時は損失を返す
        # 多値分類なのでクロスエントロピーを使う
        loss = F.softmax_cross_entropy(y, t)
        return loss
    else:
        # テスト時は精度を返す
        acc = F.accuracy(y, t)
        return acc