コード例 #1
0
def reweight(y_true,
             y_pred,
             tp_weight=.3,
             tn_weight=.3,
             fp_weight=4,
             fn_weight=0.7):
    # Get predictions
    y_pred_classes = K.greater_equal(y_pred, 0.5)
    y_pred_classes_float = K.cast(y_pred_classes, K.floatx())

    # Get misclassified examples
    wrongly_classified = K.not_equal(y_true, y_pred_classes_float)
    wrongly_classified_float = K.cast(wrongly_classified, K.floatx())

    # Get correctly classified examples
    correctly_classified = K.equal(y_true, y_pred_classes_float)
    correctly_classified_float = K.cast(correctly_classified, K.floatx())

    # Get tp, fp, tn, fn
    tp = correctly_classified_float * y_true
    tn = correctly_classified_float * (1 - y_true)
    fp = wrongly_classified_float * y_true
    fn = wrongly_classified_float * (1 - y_true)

    # Get weights
    weight_tensor = tp_weight * tp + fp_weight * fp + tn_weight * tn + fn_weight * fn

    loss = K.binary_crossentropy(y_true, y_pred)
    weighted_loss = loss * weight_tensor
    return weighted_loss
コード例 #2
0
 def binary_crossentropy_after_split(y_true, y_pred):
     y_true, payoffs = splitter(y_true)
     return K.binary_crossentropy(y_true, y_pred)
コード例 #3
0
def loss_function(y_true, y_pred):
    return K.mean(K.binary_crossentropy(y_pred, y_true))
コード例 #4
0
    def build_variational_architecture(self):
        e1 = Convolution2D(64,
                           6,
                           6,
                           subsample=(2, 2),
                           activation='relu',
                           border_mode='valid',
                           name='e1')(self.autoencoder_input)
        e3 = Convolution2D(64,
                           6,
                           6,
                           subsample=(2, 2),
                           activation='relu',
                           border_mode='same',
                           name='e3')(e1)
        e4 = Convolution2D(64,
                           6,
                           6,
                           subsample=(2, 2),
                           activation='relu',
                           border_mode='same',
                           name='e4')(e3)

        e5 = Dense(512, activation='relu')(flatten(e4))
        self.z_mean = Dense(self.latent_shape, activation='linear')(e5)
        self.z_log_sigma = Dense(self.latent_shape, activation='linear')(e5)

        batch_size = tf.shape(self.autoencoder_input)[0]

        def sample_z(args):
            z_m, z_l_s = args
            eps = K.random_normal(shape=(batch_size, self.latent_shape),
                                  mean=0.,
                                  std=1.)
            return z_m + K.exp(z_l_s / 2) * eps

        # Sample z
        z = Lambda(sample_z)([self.z_mean, self.z_log_sigma])

        # Decoder layers
        d1 = Dense(6400, activation='relu', name='d1')
        d2 = Reshape((10, 10, 64), name='d2')
        d3 = Deconvolution2D(64,
                             6,
                             6,
                             output_shape=(None, 20, 20, 64),
                             subsample=(2, 2),
                             activation='relu',
                             border_mode='same',
                             name='d3')
        d4 = Deconvolution2D(64,
                             6,
                             6,
                             output_shape=(None, 40, 40, 64),
                             subsample=(2, 2),
                             activation='relu',
                             border_mode='same',
                             name='d4')
        d5 = Deconvolution2D(1,
                             6,
                             6,
                             output_shape=(None, 84, 84, 1),
                             subsample=(2, 2),
                             activation='sigmoid',
                             border_mode='valid',
                             name='d5')

        # Full autoencoder
        d1_full = d1(z)
        d2_full = d2(d1_full)
        d3_full = d3(d2_full)
        d4_full = d4(d3_full)
        d5_full = d5(d4_full)
        d7_full = Reshape((7056, ))(d5_full)

        # Only decoding
        d1_decoder = d1(self.decoder_input)
        d2_decoder = d2(d1_decoder)
        d3_decoder = d3(d2_decoder)
        d4_decoder = d4(d3_decoder)
        d5_decoder = d5(d4_decoder)
        d7_decoder = Reshape((7056, ))(d5_decoder)

        self.decoder_output = d7_decoder
        self.autoencoder_output = d7_full
        self.encoder_output = self.z_mean

        self.emulator_reconstruction_loss = K.sum(K.binary_crossentropy(
            self.autoencoder_output, flatten(self.autoencoder_input)),
                                                  axis=1)
        kl_loss = -0.5 * K.sum(1 + self.z_log_sigma - K.square(self.z_mean) -
                               K.exp(self.z_log_sigma),
                               axis=-1)
        self.autoencoder_loss = tf.add(self.emulator_reconstruction_loss,
                                       kl_loss)