コード例 #1
0
def nlogn_loss(prediction, label):

    residual = prediction * 255 - label * 255
    diff_abs = F.absolute(residual) + 1
    loss = F.mean(diff_abs * F.log2(diff_abs) / 256)

    return loss
コード例 #2
0
def compute_entro_X(x):
    #エントロピー的なものを計算
    delta = chainer.Variable(1e-6 * cp.ones((x.shape[0], 1, x.shape[2], x.shape[3]), dtype = np.float32 ))

    #ch平均
    x_m = F.relu(F.mean(x, axis=1, keepdims = True))
    #ノーマライズ 
    x_n = x_m / (F.sum(x_m) / x_m.shape[0] + delta) * 100
    #0-1にクリップ
    x_n = F.clip(x_n, 0, 1)
    #ピクセルごとにエントロピー*を計算
    entro = x_n * F.log2( 1/(x_n  + delta)) + (1 - x_n) * F.log2( 1/(1 - x_n  + delta)) 
    #ピクセルのエントロピーを合計
    entro = F.sum(entro)

    return cp.asnumpy(entro.data) # ,cp.asnumpy(x_n.data)*255
コード例 #3
0
    def final(self, x):
        xp = self.xp
        x = x.astype(np.int32)

        p = F.softmax(self.d_pred, axis=1)

        h = xp.eye(self.q_num)[x]
        d = xp.sum(h, axis=(0, 2, 3))

        info = -F.sum(d * F.log2(p)) / x.shape[0]
        return info
コード例 #4
0
def forward(x_data, y_data, train=True):
    x, t = Variable(x_data), Variable(y_data)
    h1 = F.dropout(F.relu(model.l1(x)), train=train)
    h2 = F.dropout(F.relu(model.l2(h1)), train=train)
    y = model.l3(h2)

    y_prob = F.softmax(y)
    entropy = -F.sum((F.log2(y_prob) * y_prob))
    if train:
        return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
    if not train:
        return entropy
コード例 #5
0
def center_corner_nlogn_loss(prediction, label):

    residual = prediction * 255 - label * 255
    diff_abs = F.absolute(residual) + 1
    diff_abs.array[:, :, 0:4, 0:4] += diff_abs.array[:, :, 0:4, 0:4]
    diff_abs.array[:, :, 0:4, -4:] += diff_abs.array[:, :, 0:4, -4:]
    diff_abs.array[:, :, -4:, 0:4] += diff_abs.array[:, :, -4:, 0:4]
    diff_abs.array[:, :, -4:, -4:] += diff_abs.array[:, :, -4:, -4:]
    diff_abs.array[:,:,diff_abs.shape[2] // 2 - 4: diff_abs.shape[2] // 2 + 4, diff_abs.shape[3] // 2 - 4: diff_abs.shape[3] // 2 + 4 ] += \
    diff_abs.array[:,:,diff_abs.shape[2] // 2 - 4: diff_abs.shape[2] // 2 + 4, diff_abs.shape[3] // 2 - 4: diff_abs.shape[3] // 2 + 4 ]
    loss = F.mean(diff_abs * F.log2(diff_abs) / 256)

    return loss
コード例 #6
0
def _predict(samples, mode='variance',
                      reduce_mean=None, reduce_var=None,
                      eps=1e-8):

    mean = F.mean(samples, axis=0)

    if mode == 'variance':
        var = samples - mean
        var = F.mean(F.square(var), axis=0)
    elif mode == 'entropy':
        var = - mean * F.log2(mean + eps)
    else:
        raise NotImplementedError('unsupported mode..')

    if reduce_mean is not None:
        mean = reduce_mean(mean)

    if reduce_var is not None:
        var  = reduce_var(var)

    return mean, var
コード例 #7
0
ファイル: gail.py プロジェクト: zxtsuper/minerl
 def _get_entropy(self, values):
     return F.average((-values * F.log2(F.sigmoid(values)
                                        + self.discriminator_value_offset)
                      - (1 - values) * F.log2(1
                                              - F.sigmoid(values)
                                              + self.discriminator_value_offset)))  # NOQA
コード例 #8
0
 def log2(self, x):
     return F.log2(x)