コード例 #1
0
def init_keras_custom_objects():
    custom_objects = {
        '_relu6': inject_keras_modules(get_relu6)(),
        '_hard_swish': inject_keras_modules(get_hard_swish)()
    }

    get_custom_objects().update(custom_objects)
コード例 #2
0
    # Output shape
        2D tensor with shape:
        `(batch_size, channels)`
    """
    def call(self, inputs):
        if self.data_format == 'channels_last':
            pooled = K.std(inputs, axis=[1, 2])
        else:
            pooled = K.std(inputs, axis=[2, 3])
        return pooled


class L2_normalize(Layer):
    def __init__(self, axis, **kwargs):
        self.axis = axis
        super(L2_normalize, self).__init__(**kwargs)

    def call(self, x):
        return K.l2_normalize(x, self.axis)

    def get_config(self):
        config = super(L2_normalize, self).get_config()
        config["axis"] = self.axis
        return config


# Update layers into Keras custom objects
for name, obj in inspect.getmembers(sys.modules[__name__]):
    if inspect.isclass(obj) and obj.__module__ == __name__:
        get_custom_objects().update({name: obj})
コード例 #3
0
      an initialization for the variable
    """
    def __call__(self, shape, dtype=K.floatx(), **kwargs):
        """Initialization for dense kernels.

        This initialization is equal to
          tf.variance_scaling_initializer(scale=1.0/3.0, mode='fan_out',
                                          distribution='uniform').
        It is written out explicitly here for clarity.

        Args:
          shape: shape of variable
          dtype: dtype of variable

        Returns:
          an initialization for the variable
        """
        init_range = 1.0 / np.sqrt(shape[1])
        return tf.random_uniform(shape, -init_range, init_range, dtype=dtype)


conv_kernel_initializer = EfficientConv2DKernelInitializer()
dense_kernel_initializer = EfficientDenseKernelInitializer()

get_custom_objects().update({
    'EfficientDenseKernelInitializer':
    EfficientDenseKernelInitializer,
    'EfficientConv2DKernelInitializer':
    EfficientConv2DKernelInitializer,
})
コード例 #4
0
            d_val = self.d_max_value / (1 + (
                (self.d_max_value / 1e-3) - 1) * np.exp(-(2 * t_val)))
            t_val += float(self.t_delta)

            self.add_update([
                K.update(self.r_max, r_val),
                K.update(self.d_max, d_val),
                K.update(self.t, t_val)
            ], x)

        return x_normed

    def get_config(self):
        config = {
            'epsilon': self.epsilon,
            'mode': self.mode,
            'axis': self.axis,
            'gamma_regularizer':
            regularizers.serialize(self.gamma_regularizer),
            'beta_regularizer': regularizers.serialize(self.beta_regularizer),
            'momentum': self.momentum,
            'r_max_value': self.r_max_value,
            'd_max_value': self.d_max_value,
            't_delta': self.t_delta
        }
        base_config = super(BatchRenormalization, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'BatchRenormalization': BatchRenormalization})
コード例 #5
0
ファイル: agent.py プロジェクト: adamklein/CS221
infinity = float('inf')

fit_estim = None
tf_model = None


def r_squared(y_true, y_pred):
    from keras import backend as K
    SS_res = K.sum(K.square(y_true - y_pred))
    SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
    return (1 - SS_res / (SS_tot + K.epsilon()))


from tensorflow.python.keras.utils.generic_utils import get_custom_objects
get_custom_objects().update({"r_squared": r_squared})


def utility1(board, print_board=False):
    global node_count
    node_count += 1

    factor = (1 if (board.turn == chess.WHITE) else -1)

    if board.is_variant_win():
        return 1000 * factor
    elif board.is_variant_loss():
        return -1000 * factor

    fen = Counter(board.board_fen())
    wk_rank = chess.square_rank(board.pieces(chess.KING, chess.WHITE).pop())
コード例 #6
0
ファイル: clstm.py プロジェクト: ganguagua/asr
import time
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, LSTM, Dense, MaxPooling2D, BatchNormalization
from tensorflow.python.keras.utils.generic_utils import get_custom_objects
import numpy as np


def gelu(x):
    cdf = 0.5 * (1.0 + tf.tanh(
        (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
    return x * cdf


get_custom_objects().update({'gelu': tf.keras.layers.Activation(gelu)})


class CLSTM(tf.keras.Model):
    def __init__(self, frames, nfilt, nlabel, batch):
        super(CLSTM, self).__init__(self)
        self.nfilt = nfilt
        self.nlabel = nlabel
        self.frames = frames
        self.batch_size = batch
        self.hidden_size_m = 128
        self.hidden_size_s = 256
        self.hidden_size_l = 512
        self.hidden_size_xl = 1024

        self.cnnLayer1 = Conv2D(32, (3, 3),
                                strides=(1, 1),
コード例 #7
0
def get_activation_function():
    get_custom_objects().update({'comb-H-sine': Activation(comb_h_sine)})
コード例 #8
0
        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            normed = normed * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            normed = normed + broadcast_beta
        return normed

    def get_config(self):
        config = {
            'axis': self.axis,
            'epsilon': self.epsilon,
            'center': self.center,
            'scale': self.scale,
            'beta_initializer': initializers.serialize(self.beta_initializer),
            'gamma_initializer': initializers.serialize(self.gamma_initializer),
            'beta_regularizer': regularizers.serialize(self.beta_regularizer),
            'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
            'beta_constraint': constraints.serialize(self.beta_constraint),
            'gamma_constraint': constraints.serialize(self.gamma_constraint)
        }
        base_config = super(InstanceNormalization, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'InstanceNormalization': InstanceNormalization})
コード例 #9
0
            'gamma_initializer':
            initializers.serialize(self.gamma_initializer),
            'beta_regularizer': regularizers.serialize(self.beta_regularizer),
            'gamma_regularizer':
            regularizers.serialize(self.gamma_regularizer),
            'beta_constraint': constraints.serialize(self.beta_constraint),
            'gamma_constraint': constraints.serialize(self.gamma_constraint)
        }
        base_config = super(GroupNorm, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

    def compute_output_shape(self, input_shape):
        return input_shape


get_custom_objects().update({'GroupNorm': GroupNorm})


class SwitchNorm(Layer):
    """Switchable Normalization layer
    Switch Normalization performs Instance Normalization, Layer Normalization and Batch
    Normalization using its parameters, and then weighs them using learned parameters to
    allow different levels of interaction of the 3 normalization schemes for each layer.
    Only supports the moving average variant from the paper, since the `batch average`
    scheme requires dynamic graph execution to compute the mean and variance of several
    batches at runtime.
    # Arguments
        axis: Integer, the axis that should be normalized
            (typically the features axis).
            For instance, after a `Conv2D` layer with
            `data_format="channels_first"`,
コード例 #10
0
    def score(gt, pr):
        return iou_score(gt,
                         pr,
                         class_weights=class_weights,
                         smooth=smooth,
                         per_image=per_image)

    return score


jaccard_score = iou_score
get_jaccard_score = get_iou_score

# Update custom objects
get_custom_objects().update({
    'iou_score': iou_score,
    'jaccard_score': jaccard_score,
})

# ============================== F/Dice - score ==============================


def f_score(gt, pr, class_weights=1, beta=1, smooth=SMOOTH, per_image=True):
    r"""The F-score (Dice coefficient) can be interpreted as a weighted average of the precision and recall,
    where an F-score reaches its best value at 1 and worst score at 0.
    The relative contribution of ``precision`` and ``recall`` to the F1-score are equal.
    The formula for the F score is:

    .. math:: F_\beta(precision, recall) = (1 + \beta^2) \frac{precision \cdot recall}
        {\beta^2 \cdot precision + recall}

    The formula in terms of *Type I* and *Type II* errors:
コード例 #11
0
from flask_restful import Api
import tensorflow as tf
from tensorflow.python.keras.backend import set_session
from tensorflow.python.keras.models import load_model

import src.config as global_config
from models.icnet import ICNet
from src.app.resources import ObjectSegmentation, ColorfulObjectSegmentation
from src.utils.api import compose_relative_resource_url
from src.app.config import SERVICE_NAME, API_VERSION, \
    WEIGHTS_PATH, CONFIDENCE_THRESHOLD, SEGMENTATION_ENDPOINT, COLORFUL_SEGMENTATION_ENDPOINT

from tensorflow.python.keras.utils.generic_utils import get_custom_objects
from src.evaluation.losses.dice import dice_loss, ce_dice_loss

get_custom_objects().update({"bce_dice_loss": ce_dice_loss})
get_custom_objects().update({"dice_loss": dice_loss})


def build_icnet_model():
    ic_net = ICNet(num_classes=len(global_config.CLASS_MAPPINGS) + 1)
    input_shape = global_config.MODEL_INPUT_SIZE.to_compact_form() + (3, )
    model = ic_net.build_model(input_shape=input_shape)
    model.load_weights(WEIGHTS_PATH)
    return model


INTER_SERVICES_TOKEN = None
app = Flask(__name__)
GRAPH = None
TF_SESSION = None
コード例 #12
0
        self.interpolation = interpolation

    def compute_output_shape(self, input_shape):
        if self.data_format == 'channels_first':
            height = self.factor[0] * input_shape[2] if input_shape[2] is not None else None
            width = self.factor[1] * input_shape[3] if input_shape[3] is not None else None
            return (input_shape[0],
                    input_shape[1],
                    height,
                    width)
        elif self.data_format == 'channels_last':
            height = self.factor[0] * input_shape[1] if input_shape[1] is not None else None
            width = self.factor[1] * input_shape[2] if input_shape[2] is not None else None
            return (input_shape[0],
                    height,
                    width,
                    input_shape[3])

    def call(self, inputs):
        return resize_images(inputs, self.factor[0], self.factor[1],
                             self.data_format, self.interpolation)

    def get_config(self):
        config = {'factor': self.factor,
                  'data_format': self.data_format}
        base_config = super(ResizeImage, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'ResizeImage': ResizeImage})
コード例 #13
0
ファイル: layers.py プロジェクト: rmohashi/efficientnet
class DropConnect(KL.Layer):
    def __init__(self, drop_connect_rate=0., **kwargs):
        super().__init__(**kwargs)
        self.drop_connect_rate = drop_connect_rate

    def call(self, inputs, training=None):
        def drop_connect():
            keep_prob = 1.0 - self.drop_connect_rate

            # Compute drop_connect tensor
            batch_size = tf.shape(inputs)[0]
            random_tensor = keep_prob
            random_tensor += tf.random_uniform([batch_size, 1, 1, 1],
                                               dtype=inputs.dtype)
            binary_tensor = tf.floor(random_tensor)
            output = tf.div(inputs, keep_prob) * binary_tensor
            return output

        return K.in_train_phase(drop_connect, inputs, training=training)

    def get_config(self):
        config = super().get_config()
        config['drop_connect_rate'] = self.drop_connect_rate
        return config


get_custom_objects().update({
    'DropConnect': DropConnect,
    'Swish': Swish,
})
コード例 #14
0
        if self.data_format == 'channels_first':
            height = self.size[0] * input_shape[2] if input_shape[
                2] is not None else None
            width = self.size[1] * input_shape[3] if input_shape[
                3] is not None else None
            return tensor_shape.TensorShape(
                [input_shape[0], input_shape[1], height, width])
        else:
            height = self.size[0] * input_shape[1] if input_shape[
                1] is not None else None
            width = self.size[1] * input_shape[2] if input_shape[
                2] is not None else None
            return tensor_shape.TensorShape(
                [input_shape[0], height, width, input_shape[3]])

    def call(self, inputs):
        return resize_images_bilinear(inputs, self.size[0], self.size[1],
                                      self.data_format)

    def get_config(self):
        config = {'size': self.size, 'data_format': self.data_format}
        base_config = super(BilinearUpSampling2D, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


# add this to custom objects for restoring model save files
get_custom_objects().update({
    'SeparableConv2DKeras': SeparableConv2DKeras,
    'BilinearUpSampling2D': BilinearUpSampling2D
})
コード例 #15
0
                     class_weights=1.,
                     smooth=SMOOTH,
                     per_image=True):
    cce = categorical_crossentropy(gt, pr) * class_weights
    cce = K.mean(cce)
    return cce_weight * cce + jaccard_loss(gt,
                                           pr,
                                           smooth=smooth,
                                           class_weights=class_weights,
                                           per_image=per_image)


# Update custom objects
get_custom_objects().update({
    'jaccard_loss': jaccard_loss,
    'bce_jaccard_loss': bce_jaccard_loss,
    'cce_jaccard_loss': cce_jaccard_loss,
})

# ============================== Dice Losses ================================


def dice_loss(gt, pr, class_weights=1., smooth=SMOOTH, per_image=True):
    r"""Dice loss function for imbalanced datasets:

    .. math:: L(precision, recall) = 1 - (1 + \beta^2) \frac{precision \cdot recall}
        {\beta^2 \cdot precision + recall}

    Args:
        gt: ground truth 4D keras tensor (B, H, W, C)
        pr: prediction 4D keras tensor (B, H, W, C)
コード例 #16
0
    model.add(BatchNormalization(axis=-1, momentum=0.99)) 
    model.add(Activation('relu'))
   
    model.add(TimeDistributed(Dense(1, activation=None)))
    model.add(BatchNormalization(axis=-1, momentum=0.99)) 
    model.add(Activation('sigmoid'))
    
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[rec_acc] )
    return model

ZOO = {
    'version_1': version_1,
    'version_2': version_2,
    'version_3': version_3,
    'version_4': version_4,
    'version_5': version_5,
    'version_6': version_6,
    'version_7': version_7,
}

def get_model_fn(model_ver):
    return ZOO[model_ver]


_CUSTOM_OBJECTS = [
    rec_acc,
]
for each in _CUSTOM_OBJECTS:
    key = each.func_name
    get_custom_objects()[key] = each
コード例 #17
0
            self.units,
            'nac_only':
            self.nac_only,
            'kernel_W_initializer':
            initializers.serialize(self.kernel_W_initializer),
            'kernel_M_initializer':
            initializers.serialize(self.kernel_M_initializer),
            'gate_initializer':
            initializers.serialize(self.gate_initializer),
            'kernel_W_regularizer':
            regularizers.serialize(self.kernel_W_regularizer),
            'kernel_M_regularizer':
            regularizers.serialize(self.kernel_M_regularizer),
            'gate_regularizer':
            regularizers.serialize(self.gate_regularizer),
            'kernel_W_constraint':
            constraints.serialize(self.kernel_W_constraint),
            'kernel_M_constraint':
            constraints.serialize(self.kernel_M_constraint),
            'gate_constraint':
            constraints.serialize(self.gate_constraint),
            'epsilon':
            self.epsilon
        }

        base_config = super(NALU, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'NALU': NALU})
コード例 #18
0
ファイル: base_ESPNet.py プロジェクト: tanmaysingha/ESPNet
        Args:
          shape: shape of variable
          dtype: dtype of variable
        Returns:
          an initialization for the variable
        """
        init_range = 1.0 / np.sqrt(shape[1])
        return tf.random.uniform(shape, -init_range, init_range, dtype=dtype)


conv_kernel_initializer = EfficientConv2DKernelInitializer()
dense_kernel_initializer = EfficientDenseKernelInitializer()

get_custom_objects().update({
    "EfficientDenseKernelInitializer":
    EfficientDenseKernelInitializer,
    "EfficientConv2DKernelInitializer":
    EfficientConv2DKernelInitializer,
})


class Swish(KL.Layer):
    def call(self, inputs):
        return tf.nn.swish(inputs)


class DropConnect(KL.Layer):
    def __init__(self, drop_connect_rate=0.0, **kwargs):
        super().__init__(**kwargs)
        self.drop_connect_rate = drop_connect_rate

    def call(self, inputs, training=None):
コード例 #19
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.python.keras.utils.generic_utils import get_custom_objects

from keras4hep.layers.gather import Gather
from keras4hep.layers.multi_head_attention import MultiHeadAttention
from keras4hep.layers.multi_head_attention import MultiHeadSelfAttention
from keras4hep.layers.layer_normalization import LayerNormalization
from keras4hep.layers.pos_wise_ffn import PosWiseFFN

_LOCAL_CUSTOM_OBJECTS = [
    Gather,
    MultiHeadAttention,
    MultiHeadSelfAttention,
    LayerNormalization,
    PosWiseFFN,
]

# It returns the global dictionary of names to classes (_GLOBAL_CUSTOM_OBJECTS).
for each in _LOCAL_CUSTOM_OBJECTS:
    name = each.get_class_name()
    get_custom_objects()[name] = each