Esempio n. 1
0
 def load(self, fullpath=None):
     """ Load model """
     fullpath = fullpath if fullpath else self.filename
     logger.debug("Loading model: '%s'", fullpath)
     try:
         network = load_model(self.filename,
                              custom_objects=get_custom_objects())
     except ValueError as err:
         if str(err).lower().startswith(
                 "cannot create group in read only mode"):
             self.convert_legacy_weights()
             return True
         logger.warning(
             "Failed loading existing training data. Generating new models")
         logger.debug("Exception: %s", str(err))
         return False
     except OSError as err:  # pylint: disable=broad-except
         logger.warning(
             "Failed loading existing training data. Generating new models")
         logger.debug("Exception: %s", str(err))
         return False
     self.config = network.get_config()
     self.network = network  # Update network with saved model
     self.network.name = self.name
     return True
    def test_relation_layer():
        backend.set_session(None)
        input_data = np.array(
            [[[3, 2, 4], [1, 5, 2]], [[30, 20, 40], [10, 50, 20]]],
            dtype=np.float32)
        weights = np.array([[1, 0], [5, 6], [7, 8]], dtype=np.float32)

        bias = np.array([4, 7], dtype=np.float32)

        expected_output = np.array([[[6926, 8642], [6845, 8822]],
                                    [[663440, 807500], [655340, 825500]]],
                                   dtype=np.float32)
        tf.reset_default_graph()
        get_custom_objects()['Relation'] = Relation
        kwargs = {
            'relations': 2,
            'kernel_initializer': tf.constant_initializer(weights),
            'bias_initializer': tf.constant_initializer(bias)
        }
        a = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        output = testing_utils.layer_test(Relation,
                                          kwargs=kwargs,
                                          input_data=input_data,
                                          expected_output=expected_output)
        if not np.array_equal(output, expected_output):
            raise AssertionError(
                'The output is not equal to our expected output')
Esempio n. 3
0
        self.scale_factor = scale_factor
        self.data_format = "channels_last"

    def build(self, input_shape):
        pass

    def call(self, x, mask=None):
        y = depth_to_space(x, self.scale_factor, self.data_format)
        return y

    def compute_output_shape(self, input_shape):
        if self.data_format == 'channels_first':
            b, k, r, c = input_shape
            return (b, k // (self.scale_factor**2), r * self.scale_factor,
                    c * self.scale_factor)
        else:
            b, r, c, k = input_shape
            return (b, r * self.scale_factor, c * self.scale_factor,
                    k // (self.scale_factor**2))

    def get_config(self):
        config = {
            'scale_factor': self.scale_factor,
            'data_format': self.data_format
        }
        base_config = super(SubPixelUpscaling, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'SubPixelUpscaling': SubPixelUpscaling})
Esempio n. 4
0
        loc = os.path.join(self.path(), path)
        print("Loading weights", loc)
        self.probabilityNetwork.load_weights(loc)
        return self

    def save_model(self, path):
        self.probabilityNetwork.save(path)

    def load_model(self, path):
        # loc = os.path.join(self.path(), path)
        # print("Loading model", path)
        self.probabilityNetwork = load_model(path)
        return self

    def path(self):
        return os.path.dirname(os.path.realpath(__file__))


def contrastive_loss(y_true, y_pred):
    '''Contrastive loss from Hadsell-et-al.'06
    http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
    '''
    margin = 1
    return K.mean(y_true * K.square(y_pred) +
                  (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))


from tensorflow.python.keras.utils import get_custom_objects

get_custom_objects().update({"contrastive_loss": contrastive_loss})
        base_lr_params = [p for p in params if self._get_multiplier(p) is None]

        updates = []
        base_lr = self._optimizer.lr
        for param, multiplier in mult_lr_params.items():
            self._optimizer.lr = base_lr * multiplier
            updates.extend(self._optimizer.get_updates(loss, [param]))

        self._optimizer.lr = base_lr
        updates.extend(self._optimizer.get_updates(loss, base_lr_params))

        return updates

    def get_config(self):
        config = {'optimizer': self._class,
                  'lr_multipliers': self._lr_multipliers}
        base_config = self._optimizer.get_config()
        return dict(list(base_config.items()) + list(config.items()))

    def __getattr__(self, name):
        return getattr(self._optimizer, name)

    def __setattr__(self, name, value):
        if name.startswith('_'):
            super(LearningRateMultiplier, self).__setattr__(name, value)
        else:
            self._optimizer.__setattr__(name, value)


get_custom_objects().update({'LearningRateMultiplier': LearningRateMultiplier})
class DropConnect(KL.Layer):
    def __init__(self, drop_connect_rate=0., **kwargs):
        super().__init__(**kwargs)
        self.drop_connect_rate = drop_connect_rate

    def call(self, inputs, training=None):
        def drop_connect():
            keep_prob = 1.0 - self.drop_connect_rate

            # Compute drop_connect tensor
            batch_size = tf.shape(inputs)[0]
            random_tensor = keep_prob
            random_tensor += tf.random.uniform([batch_size, 1, 1, 1],
                                               dtype=inputs.dtype)
            binary_tensor = tf.floor(random_tensor)
            output = tf.divide(inputs, keep_prob) * binary_tensor
            return output

        return K.in_train_phase(drop_connect, inputs, training=training)

    def get_config(self):
        config = super().get_config()
        config['drop_connect_rate'] = self.drop_connect_rate
        return config


get_custom_objects().update({
    'DropConnect': DropConnect,
    'Swish': Swish,
})
Esempio n. 7
0
from tensorflow.python.keras.engine.base_layer import AddMetric, AddLoss
from tensorflow.python.keras.utils import get_custom_objects
from models.layers import *
from models.optimizer import *
from models.losses import *

# Custom Layer 구성하기
get_custom_objects().update({
    "ConvFeatureExtractor": ConvFeatureExtractor,
    "ResidualConvFeatureExtractor": ResidualConvFeatureExtractor,
    "Map2Sequence": Map2Sequence,
    "BGRUEncoder": BGRUEncoder,
    "CTCDecoder": CTCDecoder,
    "DotAttention": DotAttention,
    "JamoCompose": JamoCompose,
    'JamoDeCompose': JamoDeCompose,
    "JamoEmbedding": JamoEmbedding,
    "JamoClassifier": JamoClassifier,
    "TeacherForcing": TeacherForcing
})

# Custom Optimizer 구성하기
get_custom_objects().update({'AdamW': AdamW, 'RectifiedAdam': RectifiedAdam})

# Custom Loss 구성하기
get_custom_objects().update({
    'ctc_loss':
    ctc_loss,
    "JamoCategoricalCrossEntropy":
    JamoCategoricalCrossEntropy
})
Esempio n. 8
0
        return super().build(input_shape)

    def call(self, inputs, **kwargs):
        #if not K.is_tensor(inputs):
        #    raise ValueError(
        #        'The layer can be called only with one tensor as an argument')
        _, seq_len, d_model = K.int_shape(inputs)
        # The first thing we need to do is to perform affine transformations
        # of the inputs to get the Queries, the Keys and the Values.
        qkv = K.dot(K.reshape(inputs, [-1, d_model]), self.qkv_weights)
        # splitting the keys, the values and the queries before further
        # processing
        pre_q, pre_k, pre_v = [
            K.reshape(
                # K.slice(qkv, (0, i * d_model), (-1, d_model)),
                qkv[:, i * d_model:(i + 1) * d_model],
                (-1, seq_len, self.num_heads, d_model // self.num_heads))
            for i in range(3)]
        attention_out = self.attention(pre_q, pre_v, pre_k, seq_len, d_model,
                                       training=kwargs.get('training'))
        return attention_out

    def compute_output_shape(self, input_shape):
        return input_shape


get_custom_objects().update({
    'MultiHeadSelfAttention': MultiHeadSelfAttention,
    'MultiHeadAttention': MultiHeadAttention,
})
Esempio n. 9
0
        print('{} - {}'.format(type(sequence_length), type(d_model)))
        print('{}'.format(type(self.max_depth)))
        self.word_position_embeddings = self.add_weight(
            shape=(sequence_length, d_model),
            initializer='uniform',
            name='word_position_embeddings',
            trainable=True)
        self.depth_embeddings = self.add_weight(
            shape=(self.max_depth, d_model),
            initializer='uniform',
            name='depth_position_embeddings',
            trainable=True)
        super().build(input_shape)

    def call(self, inputs, **kwargs):
        depth = kwargs.get('step')
        if depth is None:
            raise ValueError("Please, provide current Transformer's step"
                             "using 'step' keyword argument.")
        result = inputs + self.word_position_embeddings
        if depth is not None:
            result = result + self.depth_embeddings[depth]
        return result


get_custom_objects().update({
    'TransformerCoordinateEmbedding': TransformerCoordinateEmbedding,
    'AddCoordinateEncoding': AddCoordinateEncoding,
    'AddPositionalEncoding': AddCoordinateEncoding,
})
Esempio n. 10
0
        # some real computational time.
        if self.zeros_like_input is None:
            self.zeros_like_input = K.zeros_like(
                inputs, name='zeros_like_input')
        # just because K.any(step_is_active) doesn't work in PlaidML
        any_step_is_active = K.greater(
            K.sum(K.cast(step_is_active, 'int32')), 0)
        step_weighted_output = K.switch(
            any_step_is_active,
            K.expand_dims(halting_prob, -1) * inputs,
            self.zeros_like_input)
        if self.weighted_output is None:
            self.weighted_output = step_weighted_output
        else:
            self.weighted_output += step_weighted_output
        return [inputs, self.weighted_output]

    def compute_output_shape(self, input_shape):
        return [input_shape, input_shape]

    def finalize(self):
        self.add_loss(self.ponder_cost)


get_custom_objects().update({
    'LayerNormalization': LayerNormalization,
    'TransformerTransition': TransformerTransition,
    'TransformerACT': TransformerACT,
    'gelu': gelu,
})
Esempio n. 11
0
import pdb
import netCDF4 as nc
import xarray as xr
import h5py
from glob import glob
import sys, os
import seaborn as sns
from tf_losses import *
#from .models import PartialReLU, QLayer, ELayer
from tf_models import PartialReLU, QLayer, ELayer

#from tensorflow.keras.utils.generic_utils import get_custom_objects
from tensorflow.python.keras.utils import get_custom_objects

metrics_dict = dict([(f.__name__, f) for f in all_metrics])
get_custom_objects().update(metrics_dict)
get_custom_objects().update({
    'PartialReLU': PartialReLU,
    'QLayer': QLayer,
    'ELayer': ELayer,
})
from configargparse import ArgParser

from ipykernel.kernelapp import IPKernelApp


def in_notebook():
    return IPKernelApp.initialized()


if in_notebook():
Esempio n. 12
0
        if self.add_biases:
            projected = K.bias_add(projected,
                                   self.embedding_weights['biases'],
                                   data_format='channels_last')
        if 0 < self.projection_dropout < 1:
            projected = K.in_train_phase(
                lambda: K.dropout(projected, self.projection_dropout),
                projected,
                training=kwargs.get('training'))
        attention = K.dot(projected, K.transpose(embedding_matrix))
        if self.scaled_attention:
            # scaled dot-product attention, described in
            # "Attention is all you need" (https://arxiv.org/abs/1706.03762)
            sqrt_d = K.constant(math.sqrt(emb_output_dim), dtype=K.floatx())
            attention = attention / sqrt_d
        result = K.reshape(
            self.activation(attention),
            (input_shape_tensor[0], input_shape_tensor[1], emb_input_dim))
        return result

    def compute_output_shape(self, input_shape):
        main_input_shape, embedding_matrix_shape = input_shape
        emb_input_dim, _ = embedding_matrix_shape
        return main_input_shape[0], main_input_shape[1], emb_input_dim


get_custom_objects().update({
    'ReusableEmbedding': ReusableEmbedding,
    'TiedOutputEmbedding': TiedOutputEmbedding,
})
Esempio n. 13
0
        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            normed = normed * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            normed = normed + broadcast_beta
        return normed

    def get_config(self):
        config = {
            'axis': self.axis,
            'epsilon': self.epsilon,
            'center': self.center,
            'scale': self.scale,
            'beta_initializer': initializers.serialize(self.beta_initializer),
            'gamma_initializer': initializers.serialize(self.gamma_initializer),
            'beta_regularizer': regularizers.serialize(self.beta_regularizer),
            'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
            'beta_constraint': constraints.serialize(self.beta_constraint),
            'gamma_constraint': constraints.serialize(self.gamma_constraint)
        }
        base_config = super(InstanceNormalization, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'InstanceNormalization': InstanceNormalization})
Esempio n. 14
0
    else:
      # When softmax activation function is used for output operation, we
      # use logits from the softmax function directly to compute loss in order
      # to prevent collapsing zero when training.
      # See b/117284466
      assert len(output.op.inputs) == 1
      output = output.op.inputs[0]

  rank = len(output.shape)
  axis = axis % rank
  if axis != rank - 1:
    permutation = list(range(axis)) + list(range(axis + 1, rank)) + [axis]
    output = array_ops.transpose(output, perm=permutation)

  output_shape = output.shape
  targets = cast(flatten(target), 'int64')
  logits = array_ops.reshape(output, [-1, int(output_shape[-1])])
  res = nn.sparse_softmax_cross_entropy_with_logits(
      labels=targets, logits=logits)
  if len(output_shape) >= 3:
    # If our output includes timesteps or spatial dimensions we need to reshape
    return array_ops.reshape(res, array_ops.shape(output)[:-1])
  else:
    return res



# Doing this allows you to then to refer to your custom object via string.
get_custom_objects().update({'cast':cast,
                             'sparse_categorical_crossentropy': sparse_categorical_crossentropy})
Esempio n. 15
0
    def __init__(self, scale_factor=2, data_format=None, **kwargs):
        super(SubPixelUpscaling, self).__init__(**kwargs)

        self.scale_factor = scale_factor
        self.data_format = "channels_last"

    def build(self, input_shape):
        pass

    def call(self, x, mask=None):
        y = depth_to_space(x, self.scale_factor, self.data_format)
        return y

    def compute_output_shape(self, input_shape):
        if self.data_format == 'channels_first':
            b, k, r, c = input_shape
            return (b, k // (self.scale_factor ** 2), r * self.scale_factor, c * self.scale_factor)
        else:
            b, r, c, k = input_shape
            return (b, r * self.scale_factor, c * self.scale_factor, k // (self.scale_factor ** 2))

    def get_config(self):
        config = {'scale_factor': self.scale_factor,
                  'data_format': self.data_format}
        base_config = super(SubPixelUpscaling, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'SubPixelUpscaling': SubPixelUpscaling})
Esempio n. 16
0
            'center': self.center,
            'scale': self.scale,
            'beta_initializer': initializers.serialize(self.beta_initializer),
            'gamma_initializer':
            initializers.serialize(self.gamma_initializer),
            'beta_regularizer': regularizers.serialize(self.beta_regularizer),
            'gamma_regularizer':
            regularizers.serialize(self.gamma_regularizer),
            'beta_constraint': constraints.serialize(self.beta_constraint),
            'gamma_constraint': constraints.serialize(self.gamma_constraint)
        }
        base_config = super(InstanceNormalization, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'InstanceNormalization': InstanceNormalization})


def _moments(x, axes, shift=None, keep_dims=False):
    ''' Wrapper over tensorflow backend call '''
    if K.backend() == 'tensorflow':
        import tensorflow as tf
        return tf.nn.moments(x, axes, shift=shift, keep_dims=keep_dims)
    elif K.backend() == 'theano':
        import theano.tensor as T

        mean_batch = T.mean(x, axis=axes, keepdims=keep_dims)
        var_batch = T.var(x, axis=axes, keepdims=keep_dims)
        return mean_batch, var_batch
    else:
        raise RuntimeError("Currently does not support CNTK backend")
    self.input_spec = InputSpec(ndim=4)

  def _compute_output_shape(self, input_shape):
    input_shape = tensor_shape.TensorShape(input_shape).as_list()
    if self.data_format == 'channels_first':
      height = self.size[0] * input_shape[
          2] if input_shape[2] is not None else None
      width = self.size[1] * input_shape[
          3] if input_shape[3] is not None else None
      return tensor_shape.TensorShape(
          [input_shape[0], input_shape[1], height, width])
    else:
      height = self.size[0] * input_shape[
          1] if input_shape[1] is not None else None
      width = self.size[1] * input_shape[
          2] if input_shape[2] is not None else None
      return tensor_shape.TensorShape(
          [input_shape[0], height, width, input_shape[3]])

  def call(self, inputs):
    return resize_images_bilinear(inputs, self.size[0], self.size[1], self.data_format)

  def get_config(self):
    config = {'size': self.size, 'data_format': self.data_format}
    base_config = super(BilinearUpSampling2D, self).get_config()
    return dict(list(base_config.items()) + list(config.items()))


# add this to custom objects for restoring model save files
get_custom_objects().update({'SeparableConv2DKeras': SeparableConv2DKeras, 'BilinearUpSampling2D':BilinearUpSampling2D})