Ejemplo n.º 1
0
    >>> model.compile(
    >>>     optimizer,
    >>>     loss=MaskedPenalizedSparseCategoricalCrossentropy(0.1))
    """
    def __init__(self, penalty_weight: float):
        self.penalty_weight = penalty_weight

    def __call__(self, y_true, y_pred):
        y_true_val = y_true[:, :, 0]
        mask = y_true[:, :, 1]

        # masked per-sample means of each loss
        num_items_masked = K.sum(mask, axis=-1) + 1e-6
        masked_cross_entropy = (
            K.sum(mask * K.sparse_categorical_crossentropy(y_true_val, y_pred),
                  axis=-1) / num_items_masked)
        masked_entropy = (
            K.sum(mask * -K.sum(y_pred * K.log(y_pred), axis=-1), axis=-1) /
            num_items_masked)
        return masked_cross_entropy - self.penalty_weight * masked_entropy

    def get_config(self):
        return {'penalty_weight': self.penalty_weight}


get_custom_objects().update({
    'MaskedPenalizedSparseCategoricalCrossentropy':
    MaskedPenalizedSparseCategoricalCrossentropy,
    'masked_perplexity': masked_perplexity,
})
Ejemplo n.º 2
0
    def build(self):
        get_custom_objects().update({'mish': Mish(mish)})

        input_sig = Input(shape=self.input_shape)
        x = self._make_stem(input_sig,
                            stem_width=self.stem_width,
                            deep_stem=self.deep_stem)

        if self.preact is False:
            x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)
            x = Activation(self.active)(x)
        if self.verbose:
            print("stem_out", x.shape)

        x = MaxPool2D(pool_size=3,
                      strides=2,
                      padding="same",
                      data_format="channels_last")(x)
        if self.verbose:
            print("MaxPool2D out", x.shape)

        if self.preact is True:
            x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)
            x = Activation(self.active)(x)

        if self.using_cb:
            second_x = x
            second_x = self._make_layer(x,
                                        blocks=self.blocks_set[0],
                                        filters=64,
                                        stride=1,
                                        is_first=False)
            second_x_tmp = self._make_Composite_layer(second_x,
                                                      filters=x.shape[-1],
                                                      upsample=False)
            if self.verbose: print('layer 0 db_com', second_x_tmp.shape)
            x = Add()([second_x_tmp, x])
        x = self._make_layer(x,
                             blocks=self.blocks_set[0],
                             filters=64,
                             stride=1,
                             is_first=False)
        if self.verbose:
            print("-" * 5, "layer 0 out", x.shape, "-" * 5)

        b1_b3_filters = [64, 128, 256, 512]
        for i in range(3):
            idx = i + 1
            if self.using_cb:
                second_x = self._make_layer(x,
                                            blocks=self.blocks_set[idx],
                                            filters=b1_b3_filters[idx],
                                            stride=2)
                second_x_tmp = self._make_Composite_layer(second_x,
                                                          filters=x.shape[-1])
                if self.verbose:
                    print('layer {} db_com out {}'.format(
                        idx, second_x_tmp.shape))
                x = Add()([second_x_tmp, x])
            x = self._make_layer(x,
                                 blocks=self.blocks_set[idx],
                                 filters=b1_b3_filters[idx],
                                 stride=2)
            if self.verbose:
                print('----- layer {} out {} -----'.format(idx, x.shape))

        x = GlobalAveragePooling2D(name='avg_pool')(x)
        if self.verbose:
            print("pool_out:", x.shape)  # remove the concats var

        if self.dropout_rate > 0:
            x = Dropout(self.dropout_rate, noise_shape=None)(x)

        fc_out = Dense(self.nb_classes,
                       kernel_initializer="he_normal",
                       use_bias=False,
                       name="fc_NObias")(x)  # replace concats to x
        if self.verbose:
            print("fc_out:", fc_out.shape)

        if self.fc_activation:
            fc_out = Activation(self.fc_activation)(fc_out)

        model = models.Model(inputs=input_sig, outputs=fc_out)

        if self.verbose:
            print("Resnest builded with input {}, output{}".format(
                input_sig.shape, fc_out.shape))
            print("-------------------------------------------")
            print("")

        return model
    s2 = tf.stack([tf.sin(2 * np.pi * i * t) for i in range(p2)], axis=0)
    if p == 1:
        s = s2
    else:
        s = tf.keras.backend.concatenate([s1, s2], axis=0)
    s = tf.cast(s, np.float32)
    return tf.keras.backend.dot(thetas, s)


def trend_model(thetas, backcast_length, forecast_length, is_forecast):
    p = thetas.shape[-1]
    t = linear_space(backcast_length, forecast_length, fwd_looking=is_forecast)
    t = tf.transpose(tf.stack([t**i for i in range(p)], axis=0))
    t = tf.cast(t, np.float32)
    return tf.keras.backend.dot(thetas, tf.transpose(t))


class Mish(tf.keras.layers.Activation):
    def __init__(self, activation, **kwargs):
        super(Mish, self).__init__(activation, **kwargs)
        self.__name__ = 'Mish'


def mish(x):
    return x * tf.keras.backend.tanh(tf.keras.backend.softplus(x))


get_custom_objects().update({'mish': Mish(mish)})

## in functional modeling
#x = Activation('mish')(x)
Ejemplo n.º 4
0
custom_config = {
    'TokenEmbedding': TokenEmbedding,
    'PositionEmbedding': PositionEmbedding,
    'MultiHeadAttention': MultiHeadAttention,
    'EmbeddingSimilarity': EmbeddingSimilarity,
    'LayerNormalization': LayerNormalization,
    'FeedForward': FeedForward,
    'Masked': Masked,
    'Extract': Extract,
    'gelu': gelu,
    'CRF': CRF,
    # 'ScaledDotProductAttention': ScaledDotProductAttention
}

get_custom_objects().update(custom_config)


class BERT:

    def __init__(self, 
                 bert_file_path: str = config.bert_path,
                 base: bool or int = False,
                 max_len: int = 512,
                 load_pre: bool = True
                 ):
        self.bert_file_path = bert_file_path
        self.conf_file = self.bert_file_path + '/bert_config.json'
        self.ckpt_file = self.bert_file_path + '/bert_model.ckpt'
        self.config = self.get_config(self.conf_file)
        self.base = base
Ejemplo n.º 5
0
        kernel = K.constant(kernel)
        kernel = K.expand_dims(kernel, axis=-1)
        kernel = K.repeat_elements(kernel, 3, axis=-1)
        kernel = K.expand_dims(kernel)

        blurred = tf.nn.depthwise_conv2d(input_batch,
                                         kernel, (1, 1, 1, 1),
                                         padding='SAME')

        D = (input_batch - blurred)
        lambdas_batch = self.reshape_function(lambdas_batch)
        Z = lambdas_batch * D
        V = (input_batch + Z)
        V = K.clip(V, 0, 255)
        return V

    def compute_output_shape(self, input_shape):
        return input_shape

    def get_config(self):
        # config = super(UnsharpMask, self).get_config()
        config = {
            'kernel_size': self.kernel_size,
            'found_sigma': self.found_sigma,
            'sigma': self.sigma
        }
        return config


get_custom_objects().update({'UnsharpMaskFixedLambda': UnsharpMaskFixedLambda})
Ejemplo n.º 6
0
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_docs.modeling as tfmod
import tensorflow.keras as keras
import tensorflow.keras.backend as kback
import tensorflow.keras.layers as klays
import tensorflow.keras.utils as kutils
import tensorflow.keras.initializers as kinit
import sklearn.preprocessing as procs
import pickle
import scipy.stats as stats

def swish(x,beta=1):
  return (x*kback.sigmoid(beta*x))
kutils.get_custom_objects().update({'swish':klays.Activation(swish)})

def build_model():
  model=keras.Sequential([
    klays.Dense(5,activation='relu',input_shape=[len(raw_dataset.keys())]),
    klays.Dense(256,activation='relu'),
    klays.Dense(256,activation='relu'),
    klays.Dense(128,activation='sigmoid'),
    klays.Dense(1)])
  learn_decay=keras.optimizers.schedules.ExponentialDecay(
    initial_learning_rate=1e-3,
    decay_steps=10000,
    decay_rate=0.9)
  optimizer=tf.keras.optimizers.RMSprop(learning_rate=learn_decay,centered=True)
  model.compile(loss='mse',optimizer=optimizer,metrics=['mse','mae'])
 return model
Ejemplo n.º 7
0
        5D tensor with shape:
        `(samples, channels + 2, conv_dim1, conv_dim2, conv_dim3)`
        if `data_format` is `"channels_first"`
        or 5D tensor with shape:
        `(samples, conv_dim1, conv_dim2, conv_dim3, channels + 2)`
        if `data_format` is `"channels_last"`.

    # References:
        - [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247)
    """

    def __init__(self, data_format=None,
                 **kwargs):
        super(CoordinateChannel3D, self).__init__(
            rank=3,
            use_radius=False,
            data_format=data_format,
            **kwargs
        )

    def get_config(self):
        config = super(CoordinateChannel3D, self).get_config()
        config.pop('rank')
        config.pop('use_radius')
        return config


get_custom_objects().update({'CoordinateChannel1D': CoordinateChannel1D,
                             'CoordinateChannel2D': CoordinateChannel2D,
                             'CoordinateChannel3D': CoordinateChannel3D})
Ejemplo n.º 8
0
def get_model(hyperparameter_file_name: str):
    opt = tf.keras.optimizers.Adam(learning_rate=0.0001,
                                   beta_1=0.7,
                                   beta_2=0.9)
    get_custom_objects().update(
        {'custom_activation': Activation(custom_activation)})

    time_in = Input(shape=(2016, 1), name='time_in')
    time0 = BatchNormalization()(time_in)
    time0 = Conv1D(
        64,
        32,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv1-time0')(time0)
    time0 = BatchNormalization()(time0)
    time0 = Activation(custom_activation, name='act1-time0')(time0)

    time0X = Conv1D(
        96,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv-time0x')(time0)
    time0X = BatchNormalization()(time0X)

    time1 = Conv1D(
        32,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv1-time1')(time0)
    time1 = BatchNormalization()(time1)
    time1 = Activation(custom_activation, name='act1-time1')(time1)
    time1 = Conv1D(
        32,
        16,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv2-time1')(time1)
    time1 = BatchNormalization()(time1)
    time1 = Activation(custom_activation, name='act2-time1')(time1)
    time1 = Conv1D(
        96,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv3-time1')(time1)
    time1 = BatchNormalization()(time1)
    time2 = add([time0X, time1])
    time2 = Activation(custom_activation, name='act1-time2')(time2)

    time2 = AveragePooling1D(pool_size=2)(time2)
    time2 = BatchNormalization()(time2)
    time2 = Activation(custom_activation, name='POOL1-act')(time2)

    time3 = Conv1D(
        32,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv1-time3')(time2)
    time3 = BatchNormalization()(time3)
    time3 = Activation(custom_activation, name='act1-time3')(time3)
    time3 = Conv1D(
        32,
        8,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv2-time3')(time3)
    time3 = BatchNormalization()(time3)
    time3 = Activation(custom_activation, name='act2-time3')(time3)
    time3 = Conv1D(
        96,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv3-time3')(time3)
    time3 = BatchNormalization()(time3)
    time4 = add([time2, time3])
    time4 = Activation(custom_activation, name='act1-time4')(time4)

    time5 = Conv1D(
        32,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv1-time5')(time4)
    time5 = BatchNormalization()(time5)
    time5 = Activation(custom_activation, name='act1-time5')(time5)
    time5 = Conv1D(
        32,
        8,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv2-time5')(time5)
    time5 = BatchNormalization()(time5)
    time5 = Activation(custom_activation, name='act2-time5')(time5)
    time5 = Conv1D(
        96,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv3-time5')(time5)
    time5 = BatchNormalization()(time5)
    time6 = add([time4, time5])
    time6 = Activation(custom_activation, name='act1-time6')(time6)

    time6 = AveragePooling1D(pool_size=4)(time6)
    time6 = BatchNormalization()(time6)
    time6 = Activation(custom_activation, name='POOL2-act')(time6)

    time6X = Conv1D(
        128,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv-time6x')(time6)
    time6X = BatchNormalization()(time6X)

    time7 = Conv1D(
        64,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv1-time7')(time6)
    time7 = BatchNormalization()(time7)
    time7 = Activation(custom_activation, name='act1-time7')(time7)
    time7 = Conv1D(
        64,
        4,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv2-time7')(time7)
    time7 = BatchNormalization()(time7)
    time7 = Activation(custom_activation, name='act2-time7')(time7)
    time7 = Conv1D(
        128,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv3-time7')(time7)
    time7 = BatchNormalization()(time7)
    time8 = add([time6X, time7])
    time8 = Activation(custom_activation, name='act1-time8')(time8)

    time9 = Conv1D(
        64,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv1-time9')(time8)
    time9 = BatchNormalization()(time9)
    time9 = Activation(custom_activation, name='act1-time9')(time9)
    time9 = Conv1D(
        64,
        4,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv2-time9')(time9)
    time9 = BatchNormalization()(time9)
    time9 = Activation(custom_activation, name='act2-time9')(time9)
    time9 = Conv1D(
        128,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv3-time9')(time9)
    time9 = BatchNormalization()(time9)
    time10 = add([time8, time9])
    time10 = Activation(custom_activation, name='act1-time10')(time10)

    time11 = Conv1D(
        64,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv1-time11')(time10)
    time11 = BatchNormalization()(time11)
    time11 = Activation(custom_activation, name='act1-time11')(time11)
    time11 = Conv1D(
        64,
        4,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv2-time11')(time11)
    time11 = BatchNormalization()(time11)
    time11 = Activation(custom_activation, name='act2-time11')(time11)
    time11 = Conv1D(
        128,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv3-time11')(time11)
    time11 = BatchNormalization()(time11)
    time12 = add([time10, time11])
    time12 = Activation(custom_activation, name='act1-time12')(time12)

    time12 = AveragePooling1D(pool_size=8)(time12)
    time12 = BatchNormalization()(time12)
    time12 = Activation(custom_activation, name='POOL3-act')(time12)

    time12X = Conv1D(
        196,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv-time12x')(time12)
    time12X = BatchNormalization()(time12X)

    time13 = Conv1D(
        96,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv1-time13')(time12)
    time13 = BatchNormalization()(time13)
    time13 = Activation(custom_activation, name='act1-time13')(time13)
    time13 = Conv1D(
        96,
        2,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv2-time13')(time13)
    time13 = BatchNormalization()(time13)
    time13 = Activation(custom_activation, name='act2-time13')(time13)
    time13 = Conv1D(
        196,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv3-time13')(time13)
    time13 = BatchNormalization()(time13)
    time14 = add([time12X, time13])
    time14 = Activation(custom_activation, name='act1-time14')(time14)

    time15 = Conv1D(
        96,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv1-time15')(time14)
    time15 = BatchNormalization()(time15)
    time15 = Activation(custom_activation, name='act1-time15')(time15)
    time15 = Conv1D(
        96,
        2,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv2-time15')(time15)
    time15 = BatchNormalization()(time15)
    time15 = Activation(custom_activation, name='act2-time15')(time15)
    time15 = Conv1D(
        196,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv3-time15')(time15)
    time15 = BatchNormalization()(time15)
    time16 = add([time14, time15])
    time16 = Activation(custom_activation, name='act1-time16')(time16)

    time17 = Conv1D(
        96,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv1-time17')(time16)
    time17 = BatchNormalization()(time17)
    time17 = Activation(custom_activation, name='act1-time17')(time17)
    time17 = Conv1D(
        96,
        2,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv2-time17')(time17)
    time17 = BatchNormalization()(time17)
    time17 = Activation(custom_activation, name='act2-time17')(time17)
    time17 = Conv1D(
        196,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv3-time17')(time17)
    time17 = BatchNormalization()(time17)
    time18 = add([time16, time17])
    time18 = Activation(custom_activation, name='act1-time18')(time18)

    time19 = Conv1D(
        96,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv1-time19')(time18)
    time19 = BatchNormalization()(time19)
    time19 = Activation(custom_activation, name='act1-time19')(time19)
    time19 = Conv1D(
        96,
        2,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv2-time19')(time19)
    time19 = BatchNormalization()(time19)
    time19 = Activation(custom_activation, name='act2-time19')(time19)
    time19 = Conv1D(
        196,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv3-time19')(time19)
    time19 = BatchNormalization()(time19)
    time20 = add([time18, time19])
    time20 = Activation(custom_activation, name='act1-time20')(time20)

    time21 = Conv1D(
        96,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv1-time21')(time20)
    time21 = BatchNormalization()(time21)
    time21 = Activation(custom_activation, name='act1-time21')(time21)
    time21 = Conv1D(
        96,
        2,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv2-time21')(time21)
    time21 = BatchNormalization()(time21)
    time21 = Activation(custom_activation, name='act2-time21')(time21)
    time21 = Conv1D(
        196,
        1,
        strides=1,
        padding="same",
        kernel_initializer=tf.random_normal_initializer(stddev=0.001),
        bias_initializer='zeros',
        name='conv3-time21')(time21)
    time21 = BatchNormalization()(time21)
    time22 = add([time20, time21])
    time22 = Activation(custom_activation, name='act1-time22')(time22)

    time22X = AveragePooling1D(pool_size=2)(time22)
    time22X = BatchNormalization()(time22X)
    time22X = Activation(custom_activation, name='POOL4-act')(time22X)

    cnn1d_feature = Flatten()(time22X)
    DTX_out = Dense(1225,
                    kernel_initializer=tf.random_normal_initializer(
                        stddev=0.001))(cnn1d_feature)
    DTX_out = Activation(custom_activation, name='act1-dtx_out')(DTX_out)
    DTX_out = Dense(
        1225,
        activation='linear',
        kernel_initializer=tf.random_normal_initializer(stddev=0.001))(DTX_out)

    DTX_out = tf.keras.layers.Lambda(Nor_L2, name="lambda_layer")(DTX_out)

    model = Model([time_in], [DTX_out])

    model.compile(optimizer=opt,
                  loss=custom_loss,
                  metrics=['accuracy', 'mae', 'mape', 'mse'])
    model.load_weights(
        os.path.join(CURRENT_PATH, "../model_args", hyperparameter_file_name))

    return model
Ejemplo n.º 9
0
    def build_generator(self, model_yaml, is_training=True):
        img_input = Input(shape=(self.data_X.shape[1], self.data_X.shape[2],
                                 self.data_X.shape[3]),
                          name="g_input_data")

        def build_resnet_block(input, id=0):
            """Define the ResNet block"""
            x = Conv2D(model_yaml["dim_resnet"],
                       model_yaml["k_resnet"],
                       padding=model_yaml["padding"],
                       strides=tuple(model_yaml["stride"]),
                       name="g_block_{}_conv1".format(id))(input)
            x = BatchNormalization(momentum=model_yaml["bn_momentum"],
                                   trainable=is_training,
                                   name="g_block_{}_bn1".format(id))(x)
            x = ReLU(name="g_block_{}_relu1".format(id))(x)
            x = Dropout(rate=model_yaml["do_rate"],
                        name="g_block_{}_do".format(id))(x)
            x = Conv2D(model_yaml["dim_resnet"],
                       model_yaml["k_resnet"],
                       padding=model_yaml["padding"],
                       strides=tuple(model_yaml["stride"]),
                       name="g_block_{}_conv2".format(id))(x)
            x = BatchNormalization(momentum=model_yaml["bn_momentum"],
                                   trainable=is_training,
                                   name="g_block_{}_bn2".format(id))(x)
            x = Add(name="g_block_{}_add".format(id))([x, input])
            x = ReLU(name="g_block_{}_relu2".format(id))(x)
            return x

        if model_yaml["last_activation"] == "tanh":
            print("use tanh keras")
            get_custom_objects().update({'tanh': tanh})
            last_activ = 'tanh'
        else:
            last_activ = model_yaml["last_activation"]
        x = img_input

        for i, param_lay in enumerate(
                model_yaml["param_before_resnet"]
        ):  # build the blocks before the Resnet Blocks
            x = Conv2D(param_lay[0],
                       param_lay[1],
                       strides=tuple(model_yaml["stride"]),
                       padding=model_yaml["padding"],
                       name="g_conv{}".format(i))(x)
            x = BatchNormalization(momentum=model_yaml["bn_momentum"],
                                   trainable=is_training,
                                   name="g_{}_bn".format(i))(x)
            x = ReLU(name="g_{}_lay_relu".format(i))(x)

        for j in range(model_yaml["nb_resnet_blocs"]):  # add the Resnet blocks
            x = build_resnet_block(x, id=j)

        for i, param_lay in enumerate(model_yaml["param_after_resnet"]):
            x = Conv2D(param_lay[0],
                       param_lay[1],
                       strides=tuple(model_yaml["stride"]),
                       padding=model_yaml["padding"],
                       name="g_conv_after_resnetblock{}".format(i))(x)
            x = BatchNormalization(
                momentum=model_yaml["bn_momentum"],
                trainable=is_training,
                name="g_after_resnetblock{}_bn2".format(i))(x)
            x = ReLU(name="g_after_resnetblock_relu_{}".format(i))(x)
        # The last layer
        x = Conv2D(model_yaml["last_layer"][0],
                   model_yaml["last_layer"][1],
                   strides=tuple(model_yaml["stride"]),
                   padding=model_yaml["padding"],
                   name="g_final_conv",
                   activation=last_activ)(x)
        model_gene = Model(img_input, x, name="Generator")
        model_gene.summary()
        return model_gene
Ejemplo n.º 10
0
"""TF lite converter for larq models."""
import tensorflow as tf
import numpy as np
import larq as lq
from larq_compute_engine import bsign, bconv2d64
from larq_compute_engine.tf.python.utils import tf_2_or_newer
from tensorflow.keras.utils import get_custom_objects

get_custom_objects()["bsign"] = bsign

quantizer_replacements = {
    "SteSign": bsign,
    "ste_sign": bsign,
    "approx_sign": bsign,
    "MagnitudeAwareSign": None,
    "magnitude_aware_sign": None,
    "swish_sign": bsign,
    "SwishSign": bsign,
    "SteTern": None,
    "ste_tern": None,
    "SteHeaviside": None,
    "ste_heaviside": None,
    "DoReFaQuantizer": None,
    "dorefa_quantizer": None,
}


def create_bconv_layer(weights,
                       strides,
                       padding,
                       transpose=True,
Copyright 2019, SangJae Kang, All rights reserved.
Mail : [email protected]
"""
from tensorflow.keras.utils import get_custom_objects
from .losses import *
from .layers import *
from .backbone import *
from .backbone.ResNext import *
from .metrics import *
from .normalization import *
from .optimizers import *
from .prior import *
from .callbacks import *

# Base Model 전처리에 필요한 Custom Keras Object
get_custom_objects().update({'BackBonePreProcess': BackBonePreProcess})

# ResNext 구성에 필요한 Custom Keras Object
get_custom_objects().update({
    'MergeGroups': MergeGroups,
    'ReduceGroups': ReduceGroups,
    'SplitGroups': SplitGroups
})

# RetinaMask & DeepLabV3+ Modeling에 필요한 Custom Keras Object
get_custom_objects().update({
    'RestoreBoxes': RestoreBoxes,
    'PriorLayer': PriorLayer,
    "Identity": Identity,
    "FeaturePyramid": FeaturePyramid,
    "ClassificationSubNet": ClassificationSubNet,
Ejemplo n.º 12
0
        kernel = self.LoG_np_new(self.sigma, self.kernel_size)

        kernel = K.constant(kernel)
        kernel = K.expand_dims(kernel, axis=-1)
        kernel = K.repeat_elements(kernel, 3, axis=-1)
        kernel = K.expand_dims(kernel)

        B = tf.nn.depthwise_conv2d(input_batch,
                                   kernel, (1, 1, 1, 1),
                                   padding='SAME')

        U = (input_batch + (self.amount[0] * B))

        U = K.clip(U, 0, 255)

        return U

    def compute_output_shape(self, input_shape):
        return input_shape

    def get_config(self):
        config = {
            'kernel_size': self.kernel_size,
            'found_sigma': self.found_sigma,
            'sigma': self.sigma
        }
        return config


get_custom_objects().update({'UnsharpMaskLoG': UnsharpMaskLoG})
Ejemplo n.º 13
0
# import sklearn
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC, OneClassSVM
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier, IsolationForest
from sklearn.neural_network import MLPClassifier
# from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
import tsa
from data import load_data

def rmse(y_true, y_pred):
    return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))


get_custom_objects().clear()
get_custom_objects()["rmse"] = rmse

m = 50
t0 = time()
xN_1, xF_1, yN_1, yF_1 = load_data(m=m, d=1)
t1 = time()
print('data processing time: ', t1 - t0, '(s)')

forecaster = load_model('forecaster_4_m50_3cnn_rmse1.4%.h5')

yhN_1 = forecaster.predict(xN_1, batch_size=np.power(2, 16), verbose=0, steps=None)
yhF_1 = forecaster.predict(xF_1, batch_size=np.power(2, 16), verbose=0, steps=None)

print('yh:', yhN_1.shape, yhF_1.shape)
Ejemplo n.º 14
0
def register_keras_custom_object(cls):
    get_custom_objects()[cls.__name__] = cls
    return cls
Ejemplo n.º 15
0
        # some real computational time.
        if self.zeros_like_input is None:
            self.zeros_like_input = K.zeros_like(
                inputs, name='zeros_like_input')
        # just because K.any(step_is_active) doesn't work in PlaidML
        any_step_is_active = K.greater(
            K.sum(K.cast(step_is_active, 'int32')), 0)
        step_weighted_output = K.switch(
            any_step_is_active,
            K.expand_dims(halting_prob, -1) * inputs,
            self.zeros_like_input)
        if self.weighted_output is None:
            self.weighted_output = step_weighted_output
        else:
            self.weighted_output += step_weighted_output
        return [inputs, self.weighted_output]

    def compute_output_shape(self, input_shape):
        return [input_shape, input_shape]

    def finalize(self):
        self.add_loss(self.ponder_cost)


get_custom_objects().update({
    'LayerNormalization': LayerNormalization,
    'TransformerTransition': TransformerTransition,
    'TransformerACT': TransformerACT,
    'gelu': gelu,
})
Ejemplo n.º 16
0
            "groups": self.groups,
            "axis": self.axis,
            "epsilon": self.epsilon,
            "center": self.center,
            "scale": self.scale,
            "beta_initializer": initializers.serialize(self.beta_initializer),
            "gamma_initializer":
            initializers.serialize(self.gamma_initializer),
            "beta_regularizer": regularizers.serialize(self.beta_regularizer),
            "gamma_regularizer":
            regularizers.serialize(self.gamma_regularizer),
            "beta_constraint": constraints.serialize(self.beta_constraint),
            "gamma_constraint": constraints.serialize(self.gamma_constraint)
        }
        base_config = super(GroupNormalization, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

    def compute_output_shape(self, input_shape):
        return input_shape


get_custom_objects().update({"GroupNormalization": GroupNormalization})

if __name__ == "__main__":
    from tensorflow.keras.layers import Input
    from tensorflow.keras.models import Model
    ip = Input(shape=(None, None, 4))
    x = GroupNormalization(groups=2, axis=-1, epsilon=0.1)(ip)
    model = Model(inputs=ip, outputs=x)
    model.summary()
Ejemplo n.º 17
0
def register_permanent_dropout():
    get_custom_objects()['PermanentDropout'] = PermanentDropout
Ejemplo n.º 18
0
	   
    numerator = tf.reduce_sum(onehots_true * probabilities, axis=0)
	   
    denominator = tf.reduce_sum(onehots_true + probabilities, axis=0)
	   
    loss = 1.0 - 2.0 * (numerator + 1) / (denominator + 1)
    return loss

def main()
    with open(args.config, 'r') as f:
        yam = yaml.load(f)
    img_path = yam['img_path']
    mask_path = yam['mask_path']
    epochs = yam['epochs']
    image_size = yam['image_size']
    start_neurons = yam['start_neurons']
    batch_size = yam['batch_size']
    get_custom_objects().update({'lrelu': Activation(tf.keras.layers.LeakyReLU(alpha=0.3))})
    train_generator = directory_to_generator(img_path , mask_path , image_size)
    steps_per_epoch = int( np.ceil(train_generator.shape[0] / batch_size) )
    input_layer = Input((image_size, image_size, 3))
    output_layer = build_model(input_layer, start_neurons)
    model = Model(input_layer, output_layer)
    model.compile(loss = dice_loss, optimizer='adam', metrics=["accuracy"])
    model.fit(train_generator , epochs = epochs , steps_per_epoch = steps_per_epoch , batch_size = batch_size)

if __name__ == "__main__":
    main()


Ejemplo n.º 19
0
 def __init__(self, save_all_models=False):
     Callback.__init__(self)
     self.save_all_models = save_all_models
     get_custom_objects()['PermanentDropout'] = PermanentDropout
Ejemplo n.º 20
0
import tensorflow
from tensorflow import keras
from tensorflow.keras.utils import get_custom_objects
import numpy as np


def gelu(x):
    return 0.5 * x * (1 + keras.activations.tanh(
        ((2 / 3.14)**0.5) * (x + 0.044715 * x**3)))


def softplus(x):
    return tensorflow.math.log(1 + tensorflow.math.exp(x))


get_custom_objects().update({'gelu': keras.layers.Activation(gelu)})
get_custom_objects().update(
    {'custom_softplus': keras.layers.Activation(softplus)})
model = keras.Sequential([
    keras.layers.Reshape([28, 28, 1]),
    keras.layers.Conv2D(filters=512,
                        activation=tensorflow.nn.elu,
                        padding='same',
                        kernel_size=[17, 17]),
    keras.layers.Conv2D(filters=256,
                        activation=tensorflow.nn.elu,
                        padding='same',
                        kernel_size=[11, 11]),
    keras.layers.Conv2D(filters=128,
                        activation=tensorflow.nn.elu,
                        padding='same',
Ejemplo n.º 21
0
    def __init__(self, drop_connect_rate=0., **kwargs):
        super().__init__(**kwargs)
        self.drop_connect_rate = drop_connect_rate

    def call(self, inputs, training=None):

        def drop_connect():
            keep_prob = 1.0 - self.drop_connect_rate

            # Compute drop_connect tensor
            batch_size = tf.shape(inputs)[0]
            random_tensor = keep_prob
            random_tensor += tf.random.uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
            binary_tensor = tf.floor(random_tensor)
            output = tf.math.truediv(inputs, keep_prob) * binary_tensor
            return output


        return K.in_train_phase(drop_connect, inputs, training=training)

    def get_config(self):
        config = super().get_config()
        config['drop_connect_rate'] = self.drop_connect_rate
        return config


get_custom_objects().update({
    'DropConnect': DropConnect,
    'Swish': Swish,
})
Ejemplo n.º 22
0
    Xt[column + '_counts'] = test_values
categorical_variables = [col for col in X.columns if '_counts' not in col]
numeric_variables = [col for col in X.columns if '_counts' in col]
print(X.head())

from tensorflow.keras.utils import get_custom_objects
from tensorflow.keras.layers import Activation, LeakyReLU


# Add the GELU function to Keras
def gelu(x):
    return 0.5 * x * (
        1 + tf.tanh(tf.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))


get_custom_objects().update({'gelu': Activation(gelu)})

# Add leaky-relu so we can use it as a string
get_custom_objects().update({'leaky-relu': Activation(LeakyReLU(alpha=0.2))})
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam, Nadam
from tensorflow.keras.layers import Input, Embedding, Reshape, GlobalAveragePooling1D
from tensorflow.keras.layers import Flatten, concatenate, Concatenate, Lambda, Dropout, SpatialDropout1D
from tensorflow.keras.layers import Reshape, MaxPooling1D, BatchNormalization, AveragePooling1D, Conv1D
from tensorflow.keras.layers import Activation, LeakyReLU
from tensorflow.keras.optimizers import SGD, Adam, Nadam
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
Ejemplo n.º 23
0
#choose gpu on processing
os.environ["CUDA_VISIBLE_DEVICES"] = "1"  # second gpu

import sys
sys.path.append(
    '/media/tohn/SSD/Nor_ABnor_Network_24/content/efficientnet_keras_transfer_learning'
)

##load model
from efficientnet.layers import Swish, DropConnect
from efficientnet.model import ConvKernalInitializer
from tensorflow.keras.utils import get_custom_objects

get_custom_objects().update({
    'ConvKernalInitializer': ConvKernalInitializer,
    'Swish': Swish,
    'DropConnect': DropConnect
})

from tensorflow.keras.models import load_model
model = load_model(
    '/media/tohn/SSD/Nor_ABnor_Network_24/content/efficientnet_keras_transfer_learning/models/Nor_ABnor_b3_R2.h5'
)
model.summary()
model.pop()
model.summary()

##สร้าง Network ใหม่
model2 = models.Sequential()
model2.add(model)
model2.add(layers.Dense(1, activation='sigmoid', name="fc_out"))  #class --> 2
Ejemplo n.º 24
0
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.layers import *
from tensorflow.keras.utils import get_custom_objects

from interlacer import utils


def piecewise_relu(x):
    """Custom nonlinearity for freq-space convolutions."""
    return x + keras.activations.relu(1 / 2 * (x - 1)) + \
        keras.activations.relu(1 / 2 * (-1 - x))


get_custom_objects().update({'piecewise_relu': Activation(piecewise_relu)})


def get_nonlinear_layer(nonlinearity):
    """Selects and returns an appropriate nonlinearity."""
    if (nonlinearity == 'relu'):
        return tf.keras.layers.Lambda(keras.activations.relu)
    elif (nonlinearity == '3-piece'):
        return tf.keras.layers.Lambda(Activation(piecewise_relu))


class BatchNormConv(Layer):
    """Custom layer that combines BN and a convolution."""
    def __init__(self, features, kernel_size, **kwargs):
        self.features = features
        self.kernel_size = kernel_size
Ejemplo n.º 25
0
if __name__ == '__main__':
    args = setup_parser()
    logger.setLevel(logger.ERROR)

    weights, misc = setup_files(args)
    actor_weights, encoder_weights, value_weights = weights
    postfix, fileid, outdir, has_value_model = misc

    env = gym.make(args.env_id)
    env = wrappers.Monitor(env, directory=outdir, force=True)
    env.seed(0)

    # register softplusk activation. just in case the reader wants
    # to use this activation
    get_custom_objects().update({'softplusk': Activation(softplusk)})

    agent, train = setup_agent(env, args)

    if args.train or train:
        train = True
        csvfile, writer = setup_writer(fileid, postfix)

    # number of episodes we run the training
    episode_count = 1000
    state_dim = env.observation_space.shape[0]
    n_solved = 0
    start_time = datetime.datetime.now()
    # sampling and fitting
    for episode in range(episode_count):
        state = env.reset()
Ejemplo n.º 26
0
            "scale": self.scale,
            "beta_initializer": initializers.serialize(self.beta_initializer),
            "gamma_initializer":
            initializers.serialize(self.gamma_initializer),
            "beta_regularizer": regularizers.serialize(self.beta_regularizer),
            "gamma_regularizer":
            regularizers.serialize(self.gamma_regularizer),
            "beta_constraint": constraints.serialize(self.beta_constraint),
            "gamma_constraint": constraints.serialize(self.gamma_constraint),
        }
        base_config = super(InstanceNormalization, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


# TODO(comment): Reading requires knowledge of the get_custom_objects function
get_custom_objects().update({"InstanceNormalization": InstanceNormalization})


def distortPoints(points, intrinsicMatrix, radialDistortion,
                  tangentialDistortion):
    """Distort points according to camera parameters.

    Ported from Matlab 2018a
    """
    # unpack the intrinisc matrix
    cx = intrinsicMatrix[2, 0]
    cy = intrinsicMatrix[2, 1]
    fx = intrinsicMatrix[0, 0]
    fy = intrinsicMatrix[1, 1]
    skew = intrinsicMatrix[1, 0]
Ejemplo n.º 27
0
get_custom_objects().update({
    "ssp": shifted_softplus,
    "shifted_softplus": shifted_softplus,
    "tfn_mae": tfn_mae,
    RadialFactory.__name__: RadialFactory,
    DenseRadialFactory.__name__: DenseRadialFactory,
    Radial.__name__: Radial,
    EquivariantLayer.__name__: EquivariantLayer,
    Convolution.__name__: Convolution,
    MolecularConvolution.__name__: MolecularConvolution,
    HarmonicFilter.__name__: HarmonicFilter,
    SelfInteraction.__name__: SelfInteraction,
    MolecularSelfInteraction.__name__: MolecularSelfInteraction,
    EquivariantActivation.__name__: EquivariantActivation,
    MolecularActivation.__name__: MolecularActivation,
    Preprocessing.__name__: Preprocessing,
    UnitVectors.__name__: UnitVectors,
    MaskedDistanceMatrix.__name__: MaskedDistanceMatrix,
    OneHot.__name__: OneHot,
    DistanceMatrix.__name__: DistanceMatrix,
    KernelBasis.__name__: KernelBasis,
    GaussianBasis.__name__: GaussianBasis,
    AtomicNumberBasis.__name__: AtomicNumberBasis,
    Unstandardization.__name__: Unstandardization,
    DummyAtomMasking.__name__: DummyAtomMasking,
    CutoffLayer.__name__: CutoffLayer,
    CosineCutoff.__name__: CosineCutoff,
    TanhCutoff.__name__: TanhCutoff,
    LongTanhCutoff.__name__: LongTanhCutoff,
})
Ejemplo n.º 28
0
from __future__ import absolute_import, division, print_function, unicode_literals

import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, Model, Sequential
from tensorflow.keras.utils import get_custom_objects


def gelu(x):
    return 0.5 * x * (
        1 + tf.tanh(tf.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))


get_custom_objects().update({'gelu': layers.Activation(gelu)})

act = 'relu'


def get_angles(pos, i, d_model):
    angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
    return pos * angle_rates


def spatial_posenc(position_r, position_c, d_model):
    angle_rads_r = get_angles(position_r,
                              np.arange(d_model)[np.newaxis, :], d_model)

    angle_rads_c = get_angles(position_c,
                              np.arange(d_model)[np.newaxis, :], d_model)

    pos_encoding = np.zeros(angle_rads_r.shape, dtype=np.float32)
class ReLU6(Activation):
    def __init__(self, **kwargs):
        super(ReLU6, self).__init__(lambda x: K.maximum(x, 6), **kwargs)
        self.__name__ = 'ReLU6'


class Swish(Activation):
    def __init__(self, **kwargs):
        super(Swish, self).__init__(lambda x: x * K.sigmoid(x), **kwargs)
        self.__name__ = "Switch"


class LeakyReLU(Activation):
    def __init__(self, alpha=0.2, **kwargs):

        super(LeakyReLU,
              self).__init__(lambda x: tf.nn.leaky_relu(x, alpha=alpha),
                             **kwargs)
        self.__name__ = "LeakyReLU"


custom_objects = {
    'relu6': ReLU6(),
    'mish': Mish(),
    'swish': Swish(),
    'leaky_relu': LeakyReLU()
}
get_custom_objects().update(custom_objects)

__all__ = ['Mish', "ReLU6", "Swish"]
Ejemplo n.º 30
0
            tf.keras.constraints.serialize(self.beta_constraint),
            'gamma_constraint':
            tf.keras.constraints.serialize(self.gamma_constraint),
            'mean_weights_constraints':
            tf.keras.constraints.serialize(self.mean_weights_constraints),
            'variance_weights_constraints':
            tf.keras.constraints.serialize(self.variance_weights_constraints),
        }
        base_config = super(SwitchNormalization, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

    def compute_output_shape(self, input_shape):
        return input_shape


KU.get_custom_objects().update({'SwitchNormalization': SwitchNormalization})

if __name__ == '__main__':

    ip = KL.Input(shape=(None, None, 4))
    #ip = Input(batch_shape=(100, None, None, 2))
    x = SwitchNormalization(axis=-1)(ip)
    model = KM.Model(ip, x)
    model.compile('adam', 'mse')
    model.summary()

    import numpy as np
    x = np.random.normal(0.0, 1.0, size=(10, 8, 8, 4))
    K.get_session().run(tf.global_variables_initializer())
    model.fit(
        x,