Exemplo n.º 1
0
 def from_config(cls, config, custom_objects=None):
     linear_config = config.pop('linear_model')
     linear_model = layer_module.deserialize(linear_config, custom_objects)
     dnn_config = config.pop('dnn_model')
     dnn_model = layer_module.deserialize(dnn_config, custom_objects)
     activation = activations.deserialize(config.pop('activation', None),
                                          custom_objects=custom_objects)
     return cls(linear_model=linear_model,
                dnn_model=dnn_model,
                activation=activation,
                **config)
Exemplo n.º 2
0
def assemble_narx(params, final_reshape=True):
    """Construct a NARX model of the form: X-[H1-H2-...-HN]-Y.
    All the H-layers are Dense and optional, i.e., depend on whether they are
    specified in the params dictionary. Here, X is a sequence.
    """
    # Input layer
    input_shape = params['input_shape']
    inputs = layers.Input(shape=input_shape)

    # Flatten the time dimension
    target_shape = (np.prod(input_shape), )
    previous = layers.Reshape(target_shape)(inputs)

    # Hidden layers
    for layer in params['hidden_layers']:
        Layer = layers.deserialize({
            'class_name': layer['name'],
            'config': layer['config']
        })
        previous = Layer(previous)
        if 'dropout' in layer and layer['dropout'] is not None:
            previous = layers.Dropout(layer['dropout'])(previous)
        if 'batch_norm' in layer and layer['batch_norm'] is not None:
            previous = layers.BatchNormalization(
                **layer['batch_norm'])(previous)

    # Output layer
    output_shape = params['output_shape']
    output_dim = np.prod(output_shape)
    outputs = layers.Dense(output_dim)(previous)

    if final_reshape:
        outputs = layers.Reshape(output_shape)(outputs)

    return KerasModel(inputs=inputs, outputs=outputs)
Exemplo n.º 3
0
def assemble_rnn(params, final_reshape=True):
    """Construct an RNN/LSTM/GRU model of the form: X-[H1-H2-...-HN]-Y.
    All the H-layers are optional recurrent layers and depend on whether they
    are specified in the params dictionary.
    """
    # Input layer
    input_shape = params['input_shape']
    inputs = layers.Input(shape=input_shape)
    # inputs = layers.Input(batch_shape=[20] + list(input_shape))

    # Masking layer
    previous = layers.Masking(mask_value=0.0)(inputs)

    # Hidden layers
    for layer in params['hidden_layers']:
        Layer = layers.deserialize({
            'class_name': layer['name'],
            'config': layer['config']
        })
        previous = Layer(previous)
        if 'dropout' in layer and layer['dropout'] is not None:
            previous = layers.Dropout(layer['dropout'])(previous)
        if 'batch_norm' in layer and layer['batch_norm'] is not None:
            previous = layers.BatchNormalization(
                **layer['batch_norm'])(previous)

    # Output layer
    output_shape = params['output_shape']
    output_dim = np.prod(output_shape)
    outputs = layers.Dense(output_dim)(previous)

    if final_reshape:
        outputs = layers.Reshape(output_shape)(outputs)

    return KerasModel(inputs=inputs, outputs=outputs)
Exemplo n.º 4
0
def get_copy_of_layer(layer):
    from keras.applications.mobilenet import relu6
    from keras.layers.core import Activation
    from keras import layers
    config = layer.get_config()

    # Non-standard relu6 layer (from MobileNet)
    if layer.__class__.__name__ == 'Activation':
        if config['activation'] == 'relu6':
            layer_copy = Activation(relu6, name=layer.name)
            return layer_copy

    # DeepLabV3+ non-standard layer
    if layer.__class__.__name__ == 'BilinearUpsampling':
        from neural_nets.deeplab_v3_plus_model import BilinearUpsampling
        layer_copy = BilinearUpsampling(upsampling=config['upsampling'],
                                        output_size=config['output_size'],
                                        name=layer.name)
        return layer_copy

    layer_copy = layers.deserialize({
        'class_name': layer.__class__.__name__,
        'config': config
    })
    layer_copy.name = layer.name
    return layer_copy
Exemplo n.º 5
0
def model_from_config(config, custom_objects=None):
    """Instantiates a Keras model from its config.
 
  Usage:
  ```
  # for a Functional API model
  tf.keras.Model().from_config(model.get_config())

  # for a Sequential model
  tf.keras.Sequential().from_config(model.get_config())
  ```

  Args:
      config: Configuration dictionary.
      custom_objects: Optional dictionary mapping names
          (strings) to custom classes or functions to be
          considered during deserialization.

  Returns:
      A Keras model instance (uncompiled).

  Raises:
      TypeError: if `config` is not a dictionary.
  """
    if isinstance(config, list):
        raise TypeError(
            '`model_from_config` expects a dictionary, not a list. '
            'Maybe you meant to use '
            '`Sequential.from_config(config)`?')
    from keras.layers import deserialize  # pylint: disable=g-import-not-at-top
    return deserialize(config, custom_objects=custom_objects)
Exemplo n.º 6
0
    def from_config(cls, config, custom_objects=None):
        wrapper = deserialize(config.pop('wrapper'),
                              custom_objects=custom_objects)
        num_steps = config.get('num_steps', 3)
        mode = config.get('mode', 'lr_per_layer')

        return cls(wrapper, num_steps, mode)
Exemplo n.º 7
0
def optimize_conv2d_batchnorm_block(m, initial_model, input_layers, conv, bn):
    from keras import layers
    from keras.models import Model

    conv_layer_type = conv.__class__.__name__
    conv_config = conv.get_config()
    conv_config['use_bias'] = True
    bn_config = bn.get_config()
    if conv_config['activation'] != 'linear':
        print('Only linear activation supported for conv + bn optimization!')
        exit()

    # Copy Conv2D layer
    layer_copy = layers.deserialize({'class_name': conv.__class__.__name__, 'config': conv_config})
    layer_copy.name = bn.name # We use batch norm name here to find it later

    # Create new model to initialize layer. We need to store other output tensors as well
    output_tensor, output_names = get_layers_without_output(m)
    input_layer_name = initial_model.layers[input_layers[0]].name
    prev_layer = m.get_layer(name=input_layer_name)
    x = layer_copy(prev_layer.output)

    output_tensor_to_use = [x]
    for i in range(len(output_names)):
        if output_names[i] != input_layer_name:
            output_tensor_to_use.append(output_tensor[i])

    if len(output_tensor_to_use) == 1:
        output_tensor_to_use = output_tensor_to_use[0]

    tmp_model = Model(inputs=m.input, outputs=output_tensor_to_use)

    if conv.get_config()['use_bias']:
        (conv_weights, conv_bias) = conv.get_weights()
    else:
        (conv_weights,) = conv.get_weights()

    if bn_config['scale']:
        gamma, beta, run_mean, run_std = bn.get_weights()
    else:
        gamma = 1.0
        beta, run_mean, run_std = bn.get_weights()

    eps = bn_config['epsilon']
    A = gamma / np.sqrt(run_std + eps)

    if conv.get_config()['use_bias']:
        B = conv_bias + beta - ((gamma * run_mean) / np.sqrt(run_std + eps))
    else:
        B = beta - ((gamma * run_mean) / np.sqrt(run_std + eps))

    if conv_layer_type == 'Conv2D':
        for i in range(conv_weights.shape[-1]):
            conv_weights[:, :, :, i] *= A[i]
    elif conv_layer_type == 'DepthwiseConv2D':
        for i in range(conv_weights.shape[-2]):
            conv_weights[:, :, i, :] *= A[i]

    tmp_model.get_layer(layer_copy.name).set_weights((conv_weights, B))
    return tmp_model
Exemplo n.º 8
0
def insert_layer(model, new_layer, index):
    res = Sequential()
    for i,layer in enumerate(model.layers):
        if i==index: res.add(new_layer)
        copied = deserialize(wrap_config(layer))  # Keras2
        res.add(copied)
        copied.set_weights(layer.get_weights())
    return res
Exemplo n.º 9
0
def crossover(parent1, parent2):  #different parents
    p1break = random.choice(range(len(parent1.layers) - 4))
    p2break = random.choice(range(len(parent2.layers) - 4))
    #make child1
    child1 = Sequential()
    i = 0
    while i <= p1break:
        layer = parent1.layers[i]
        config = parent1.layers[i].get_config()
        layer = layers.deserialize({
            'class_name': layer.__class__.__name__,
            'config': config
        })
        child1.add(layer)
        i += 1
    for j in range(len(parent2.layers)):
        if j > p2break:
            layer = parent2.layers[j]
            config = parent2.layers[j].get_config()
            layer = layers.deserialize({
                'class_name': layer.__class__.__name__,
                'config': config
            })
            child1.add(layer)
    #make child2
    child2 = Sequential()
    i = 0
    while i <= p2break:
        layer = parent2.layers[i]
        config = parent2.layers[i].get_config()
        layer = layers.deserialize({
            'class_name': layer.__class__.__name__,
            'config': config
        })
        child2.add(layer)
        i += 1
    for j in range(len(parent1.layers)):
        if j > p1break:
            layer = parent1.layers[j]
            cconfig = parent1.layers[j].get_config()
            layer = layers.deserialize({
                'class_name': layer.__class__.__name__,
                'config': config
            })
            child2.add(layer)
    return child1, child2
Exemplo n.º 10
0
  def from_config(cls, config, custom_objects=None):
    wrapper = deserialize(config.pop('wrapper'), custom_objects=custom_objects)
    use_second_order_derivatives = config.get('use_second_order_derivatives', False)
    use_lr_per_step = config.get('use_lr_per_step', False)
    use_kld_regularization = config.get('use_kld_regularization', False)
    train_params = config.get('train_params', True)
    correctly_serialized = config.get('correctly_serialized', False)

    return cls(wrapper, config.get('num_steps', 3), use_second_order_derivatives, use_lr_per_step, use_kld_regularization, train_params, correctly_serialized=correctly_serialized)
Exemplo n.º 11
0
    def build_model():
        from keras.layers import deserialize

        # Set input layer shape
        serialized_model["config"][0]["config"]["batch_input_shape"] \
            = (None, df_train.shape[1])

        model = deserialize(serialized_model)

        model.compile(**compile_params)

        return model
Exemplo n.º 12
0
	def from_config(cls, config):
		model_config = config['model']
		del config['model']
		rc = cls(**config)
		from . import cells
		rc.model = Sequential()
		for layer_config in model_config:
			if 'config' in layer_config and 'name' in layer_config['config']:
				del layer_config['config']['name']
			layer = deserialize(layer_config, cells.__dict__)
			rc.add(layer)
		return rc
Exemplo n.º 13
0
def load_layer(path, custom_layers={}):

    fp = path + '.json'
    config = {}
    layer = None

    with open(fp) as json_file:
        config = json.load(json_file)

    with CustomObjectScope(custom_layers):
        layer = deserialize(config)

    return layer
Exemplo n.º 14
0
def model_from_json(json_string, custom_objects=None):
    """Parses a JSON model configuration file and returns a model instance.

    # Arguments
        json_string: JSON string encoding a model configuration.
        custom_objects: Optional dictionary mapping names
            (strings) to custom classes or functions to be
            considered during deserialization.

    # Returns
        A Keras model instance (uncompiled).
    """
    config = json.loads(json_string)
    from keras.layers import deserialize
    return deserialize(config, custom_objects=custom_objects)
Exemplo n.º 15
0
def clone_model(src_model, trainable=False, prefix="", output_names=None):
    outputs = []
    for layer in src_model.layers:
        if type(layer) == layers.InputLayer:
            x = input_layer = Input(batch_shape=layer.input_shape, name=prefix + layer.name)
        else:
            new_layer = layers.deserialize({'class_name': layer.__class__.__name__, 'config': layer.get_config()})
            new_layer.name = prefix + layer.name
            new_layer.trainable = trainable
            x = new_layer(x)
            if output_names is not None and layer.name in output_names:
                outputs.append(x)
            new_layer.set_weights(layer.get_weights())

    return input_layer, x if output_names is None else outputs
Exemplo n.º 16
0
 def from_config(cls, config, custom_objects=None):
   if 'name' in config:
     name = config['name']
     build_input_shape = config.get('build_input_shape')
     layer_configs = config['layers']
   else:
     name = None
     build_input_shape = None
     layer_configs = config
   model = cls(name=name)
   for layer_config in layer_configs:
     layer = layer_module.deserialize(layer_config,
                                      custom_objects=custom_objects)
     model.add(layer)
   if (not model.inputs and build_input_shape and
       isinstance(build_input_shape, (tuple, list))):
     model.build(build_input_shape)
   return model
Exemplo n.º 17
0
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]
        #kernel_shape = self.kernel_size + (input_dim, self.filters)

        #self.kernel = self.tied_to.kernel

        if self.layer_inner:
            print('BUILDING!!')
            classconfig = self.layer_inner.pop('config')
            classname = self.layer_inner.pop('class_name')
            li_weights = np.asarray(self.layer_inner.pop('weights').pop('value'))
            li_bias = np.asarray(self.layer_inner.pop('bias').pop('value'))

            print(str((li_weights.shape)))
            print(str((li_bias.shape)))

            self.tied_to = layers.deserialize({'class_name': classname,
                                'config': classconfig})
            self.tied_to.build(input_shape)
            self.tied_to.set_weights([ li_weights, li_bias ])

        self.regularizers = []
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.filters,),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None

        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})
        self.built = True
Exemplo n.º 18
0
def model_from_config(config, custom_objects=None):
    """Instantiates a Keras model from its config.

    # Arguments
        config: Configuration dictionary.
        custom_objects: Optional dictionary mapping names
            (strings) to custom classes or functions to be
            considered during deserialization.

    # Returns
        A Keras model instance (uncompiled).

    # Raises
        TypeError: if `config` is not a dictionary.
    """
    if isinstance(config, list):
        raise TypeError('`model_from_config` expects a dictionary, '
                        'not a list. Maybe you meant to use '
                        '`Sequential.from_config(config)`?')
    from keras.layers import deserialize
    return deserialize(config, custom_objects=custom_objects)
Exemplo n.º 19
0
def model_from_yaml(yaml_string, custom_objects=None):
    """Parses a yaml model configuration file and returns a model instance.

  Usage:

  >>> model = tf.keras.Sequential([
  ...     tf.keras.layers.Dense(5, input_shape=(3,)),
  ...     tf.keras.layers.Softmax()])
  >>> try:
  ...   import yaml
  ...   config = model.to_yaml()
  ...   loaded_model = tf.keras.models.model_from_yaml(config)
  ... except ImportError:
  ...   pass

  Args:
      yaml_string: YAML string or open file encoding a model configuration.
      custom_objects: Optional dictionary mapping names
          (strings) to custom classes or functions to be
          considered during deserialization.

  Returns:
      A Keras model instance (uncompiled).

  Raises:
      ImportError: if yaml module is not found.
  """
    if yaml is None:
        raise ImportError(
            'Requires yaml module installed (`pip install pyyaml`).')
    # The method unsafe_load only exists in PyYAML 5.x+, so which branch of the
    # try block is covered by tests depends on the installed version of PyYAML.
    try:
        # PyYAML 5.x+
        config = yaml.unsafe_load(yaml_string)
    except AttributeError:
        config = yaml.load(yaml_string)
    from keras.layers import deserialize  # pylint: disable=g-import-not-at-top
    return deserialize(config, custom_objects=custom_objects)
Exemplo n.º 20
0
def model_from_json(json_string, custom_objects=None):
    """Parses a JSON model configuration string and returns a model instance.

  Usage:

  >>> model = tf.keras.Sequential([
  ...     tf.keras.layers.Dense(5, input_shape=(3,)),
  ...     tf.keras.layers.Softmax()])
  >>> config = model.to_json()
  >>> loaded_model = tf.keras.models.model_from_json(config)

  Args:
      json_string: JSON string encoding a model configuration.
      custom_objects: Optional dictionary mapping names
          (strings) to custom classes or functions to be
          considered during deserialization.

  Returns:
      A Keras model instance (uncompiled).
  """
    config = json_utils.decode(json_string)
    from keras.layers import deserialize  # pylint: disable=g-import-not-at-top
    return deserialize(config, custom_objects=custom_objects)
Exemplo n.º 21
0
                                (with the same shapes as the output of get_weights).
    layer.get_config():         returns a dictionary containing the configuration of the layer.
                                The layer can be reinstantiated from its config via:

"""
layer = Dense(32)
config = layer.get_config()
reconstructed_layer = Dense.from_config(config)

# Or:

from keras import layers

config = layer.get_config()
layer = layers.deserialize({
    'class_name': layer.__class__.__name__,
    'config': config
})
"""
If a layer has a single node (i.e. if it isn't a shared layer), you can get its input tensor,
output tensor, input shape and output shape via:

    layer.input
    layer.output
    layer.input_shape
    layer.output_shape

If the layer has multiple nodes
(see: the concept of layer node and shared layers
https://keras.io/getting-started/functional-api-guide/#multi-input-and-multi-output-models
), you can use the following methods:
Exemplo n.º 22
0
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 15:29:28 2020

@author: s1430
"""

layer = Dense(32)
config = layer.get_config()
reconstructed_layer = Dense.from_config(config)

from keras import layers

config = layer.get_config()
layer = layers.deserialize({
    "class_name": layer.__class__.__name__,
    "config": config
})

layer.input
layer.output
layer.input_shape
layer.output_shape

layer.get_input_at(node_index)
layer.get_output_at(node_index)
layer.get_input_shape_at(node_index)
layer.get_output_shape_at(node_index)
Exemplo n.º 23
0
for model_idx in range(len(all_models)):
    for i, layer in enumerate(all_layers[model_idx]):
        config = layer.get_config()
        # print(config)
        if "batch_input_shape" in config and one_input_length in config[
                "batch_input_shape"]:
            # first specification of batch_input_shape
            config["batch_input_shape"] = (1, 1, one_input_length)

        if "stateful" in config:
            # if it's a recurrent layer, make it stateful
            config["stateful"] = True

        all_layers[model_idx][i] = deserialize({
            "class_name": layer.__class__.__name__,
            "config": config
        })

all_models = [Sequential(layers) for layers in all_layers]
for model_idx, model in enumerate(all_models):
    model.set_weights(all_weights[model_idx])
    # print(all_models[model_idx].summary())

year_count, day_count = 0, -1
days_in_stk_yr = 252
total_cash = 0
traders = [Trader(init_cash=TEST_CASH, ticker=tick) for tick in test_secs]
prev_trader_values = [0] * len(traders)

# split into trading years
for year in [
Exemplo n.º 24
0
def copy_layer(layer):
    return deserialize(wrap_config(layer))  # Keras2
Exemplo n.º 25
0
def copy_layer(layer): return deserialize(wrap_config(layer))  # Keras2


def copy_layers(layers): return [copy_layer(layer) for layer in layers]
Exemplo n.º 26
0
def copy_layer(layer): return layers.deserialize(wrap_config(layer))


def copy_layers(layers): return [copy_layer(layer) for layer in layers]
Exemplo n.º 27
0
def add_model_output(modelIn, mode=None, num_add=None, activation=None):
    """ This function modifies the last dense layer in the passed keras model. The modification includes adding units and optionally changing the activation function.

    Parameters
    ----------
    modelIn : keras model
        Keras model to be modified.
    mode : string
        Mode to modify the layer. It could be:
        'abstain' for adding an arbitrary number of units for the abstention optimization strategy.
        'qtl' for quantile regression which needs the outputs to be tripled.
        'het' for heteroscedastic regression which needs the outputs to be doubled.
    num_add : integer
        Number of units to add. This only applies to the 'abstain' mode.
    activation : string
        String with keras specification of activation function (e.g. 'relu', 'sigomid', 'softmax', etc.)

    Return
    ----------
    modelOut : keras model
        Keras model after last dense layer has been modified as specified. If there is no mode specified it returns the same model. If the mode is not one of 'abstain', 'qtl' or 'het' an exception is raised.
    """

    if mode is None:
        return modelIn

    numlayers = len(modelIn.layers)
    # Find last dense layer
    i = -1
    while 'dense' not in (modelIn.layers[i].name) and ((i + numlayers) > 0):
        i -= 1
    # Minimal verification about the validity of the layer found
    assert ((i + numlayers) >= 0)
    assert ('dense' in modelIn.layers[i].name)

    # Compute new output size
    if mode == 'abstain':
        assert num_add is not None
        new_output_size = modelIn.layers[i].output_shape[-1] + num_add
    elif mode == 'qtl':  # for quantile UQ
        new_output_size = 3 * modelIn.layers[i].output_shape[-1]
    elif mode == 'het':  # for heteroscedastic UQ
        new_output_size = 2 * modelIn.layers[i].output_shape[-1]
    else:
        raise Exception(
            'ERROR ! Type of mode specified for adding outputs to the model: '
            + mode + ' not implemented... Exiting')

    # Recover current layer options
    config = modelIn.layers[i].get_config()
    # Update number of units
    config['units'] = new_output_size
    # Update activation function if requested
    if activation is not None:
        config['activation'] = activation
    # Bias initialization seems to help het and qtl
    if mode == 'het' or mode == 'qtl':
        config['bias_initializer'] = 'ones'
    # Create new Dense layer
    reconstructed_layer = Dense.from_config(config)
    # Connect new Dense last layer to previous one-before-last layer
    additional = reconstructed_layer(modelIn.layers[i - 1].output)
    # If the layer to replace is not the last layer, add the remainder layers
    if i < -1:
        for j in range(i + 1, 0):
            config_j = modelIn.layers[j].get_config()
            aux_j = layers.deserialize({
                'class_name':
                modelIn.layers[j].__class__.__name__,
                'config':
                config_j
            })
            reconstructed_layer = aux_j.from_config(config_j)
            additional = reconstructed_layer(additional)

    modelOut = Model(modelIn.input, additional)

    return modelOut
Exemplo n.º 28
0
def import_json(request):
    loadFromText = False
    if request.method == 'POST':
        if ('file' in request.FILES):
            f = request.FILES['file']
        elif 'sample_id' in request.POST:
            try:
                f = open(os.path.join(settings.BASE_DIR,
                                      'example', 'keras',
                                      request.POST['sample_id'] + '.json'), 'r')
            except Exception:
                return JsonResponse({'result': 'error',
                                     'error': 'No JSON model file found'})
        elif 'config' in request.POST:
            loadFromText = True
        elif 'url' in request.POST:
            try:
                url = urlparse(request.POST['url'])
                if url.netloc == 'github.com':
                    url = url._replace(netloc='raw.githubusercontent.com')
                    url = url._replace(path=url.path.replace('blob/', ''))
                f = urllib2.urlopen(url.geturl())
            except Exception as ex:
                return JsonResponse({'result': 'error', 'error': 'Invalid URL\n' + str(ex)})
        try:
            if loadFromText is True:
                model = json.loads(request.POST['config'])
            else:
                model = json.load(f)
        except Exception:
            return JsonResponse({'result': 'error', 'error': 'Invalid JSON'})

    model = model_from_json(json.dumps(model), custom_objects={'LRN': LRN})
    layer_map = {
        'InputLayer': Input,
        'Dense': Dense,
        'Activation': Activation,
        'softmax': Activation,
        'selu': Activation,
        'softplus': Activation,
        'softsign': Activation,
        'relu': Activation,
        'tanh': Activation,
        'sigmoid': Activation,
        'hard_sigmoid': Activation,
        'linear': Activation,
        'Dropout': Dropout,
        'Flatten': Flatten,
        'Reshape': Reshape,
        'Permute': Permute,
        'RepeatVector': RepeatVector,
        'ActivityRegularization': ActivityRegularization,
        'Masking': Masking,
        'Conv1D': Convolution,
        'Conv2D': Convolution,
        'Conv2DTranspose': Deconvolution,
        'Conv3D': Convolution,
        'SeparableConv2D': DepthwiseConv,
        'UpSampling1D': Upsample,
        'UpSampling2D': Upsample,
        'UpSampling3D': Upsample,
        'ZeroPadding1D': Padding,
        'ZeroPadding2D': Padding,
        'ZeroPadding3D': Padding,
        'MaxPooling1D': Pooling,
        'MaxPooling2D': Pooling,
        'MaxPooling3D': Pooling,
        'AveragePooling1D': Pooling,
        'AveragePooling2D': Pooling,
        'AveragePooling3D': Pooling,
        'GlobalMaxPooling1D': Pooling,
        'GlobalAveragePooling1D': Pooling,
        'GlobalMaxPooling2D': Pooling,
        'GlobalAveragePooling2D': Pooling,
        'LocallyConnected1D': LocallyConnected,
        'LocallyConnected2D': LocallyConnected,
        'SimpleRNN': Recurrent,
        'GRU': Recurrent,
        'LSTM': Recurrent,
        'Embedding': Embed,
        'Add': Eltwise,
        'Multiply': Eltwise,
        'Average': Eltwise,
        'Maximum': Eltwise,
        'Concatenate': Concat,
        'Dot': Eltwise,
        'LeakyReLU': LeakyReLU,
        'PReLU': PReLU,
        'elu': ELU,
        'ELU': ELU,
        'ThresholdedReLU': ThresholdedReLU,
        'BatchNormalization': BatchNorm,
        'GaussianNoise': GaussianNoise,
        'GaussianDropout': GaussianDropout,
        'AlphaDropout': AlphaDropout,
        'TimeDistributed': TimeDistributed,
        'Bidirectional': Bidirectional,
        'LRN': lrn
    }

    hasActivation = ['Conv1D', 'Conv2D', 'Conv3D', 'Conv2DTranspose', 'Dense', 'LocallyConnected1D',
                     'LocallyConnected2D', 'SeparableConv2D', 'LSTM', 'SimpleRNN', 'GRU']

    net = {}
    # Add dummy input layer if sequential model
    if (isinstance(model, Sequential)):
        input_layer = model.layers[0].inbound_nodes[0].inbound_layers[0]
        # If embedding is the first layer, the input has shape (None, None)
        if (model.layers[0].__class__.__name__ == 'Embedding'):
            input_layer.batch_input_shape = (None, model.layers[0].input_dim)
        net[input_layer.name] = Input(input_layer)
        net[input_layer.name]['connection']['output'] = [model.layers[0].name]
    for idx, layer in enumerate(model.layers):
        name = ''
        class_name = layer.__class__.__name__
        wrapped = False
        if (class_name in layer_map):
            # This is to handle wrappers and the wrapped layers.
            if class_name == 'InputLayer':
                found = 0
                for find_layer in model.layers:
                    if len(find_layer.inbound_nodes[0].inbound_layers):
                        if find_layer.inbound_nodes[0].inbound_layers[0].__class__.__name__ == 'InputLayer':
                            net[layer.name] = Input(layer)
                            if find_layer.__class__.__name__ in ['Bidirectional', 'TimeDistributed']:
                                net[layer.name]['connection']['output'] = [
                                    find_layer.name]
                                found = 1
                                break
                if not found:
                    net[layer.name] = Input(layer)

            elif class_name in ['Bidirectional', 'TimeDistributed']:
                net[layer.name] = layer_map[class_name](layer)
                wrapped_layer = layer.get_config()['layer']
                name = wrapped_layer['config']['name']
                new_layer = deserialize({
                    'class_name': wrapped_layer['class_name'],
                    'config': wrapped_layer['config']
                })
                new_layer.wrapped = True
                new_layer.wrapper = [layer.name]
                if new_layer.activation.func_name != 'linear':
                    net[name + wrapped_layer['class_name']
                        ] = layer_map[wrapped_layer['class_name']](new_layer)
                    net[name] = layer_map[new_layer.activation.func_name](
                        new_layer)
                    net[name + wrapped_layer['class_name']
                        ]['connection']['output'].append(name)
                    net[name]['connection']['input'] = [
                        name + wrapped_layer['class_name']]
                    net[layer.name]['connection']['output'] = [
                        name + wrapped_layer['class_name']]
                else:
                    net[name] = layer_map[wrapped_layer['class_name']](
                        new_layer)
                    net[name]['connection']['input'] = [layer.name]
                    net[layer.name]['connection']['output'] = [name]
                if len(model.layers) >= idx + 2:
                    net[name]['connection']['output'] = [
                        model.layers[idx + 1].name]
                    model.layers[idx +
                                 1].inbound_nodes[0].inbound_layers = [new_layer]
                else:
                    net[name]['connection']['output'] = []
                wrapped = True
            # This extra logic is to handle connections if the layer has an Activation
            elif (class_name in hasActivation and layer.activation.func_name != 'linear'):
                net[layer.name + class_name] = layer_map[class_name](layer)
                net[layer.name] = layer_map[layer.activation.func_name](layer)
                net[layer.name +
                    class_name]['connection']['output'].append(layer.name)
                name = layer.name + class_name
            # To check if a Scale layer is required
            elif (class_name == 'BatchNormalization' and (
                    layer.center or layer.scale)):
                net[layer.name + class_name] = layer_map[class_name](layer)
                net[layer.name] = Scale(layer)
                net[layer.name +
                    class_name]['connection']['output'].append(layer.name)
                name = layer.name + class_name
            else:
                net[layer.name] = layer_map[class_name](layer)
                name = layer.name
            if (layer.inbound_nodes[0].inbound_layers) and not wrapped:
                for node in layer.inbound_nodes[0].inbound_layers:
                    net[node.name]['connection']['output'].append(name)
        else:
            return JsonResponse({'result': 'error',
                                 'error': 'Cannot import layer of ' + layer.__class__.__name__ + ' type'})
            raise Exception('Cannot import layer of ' +
                            layer.__class__.__name__ + ' type')
    # collect names of all zeroPad layers
    zeroPad = []
    # Transfer parameters and connections from zero pad
    # The 'pad' param is a list with upto 3 elements
    for node in net:
        if (net[node]['info']['type'] == 'Pad'):
            net[net[node]['connection']['output'][0]]['connection']['input'] = \
                net[node]['connection']['input']
            net[net[node]['connection']['input'][0]]['connection']['output'] = \
                net[node]['connection']['output']
            net[net[node]['connection']['output'][0]]['params']['pad_w'] += \
                net[node]['params']['pad'][0]
            if (net[net[node]['connection']['output'][0]]['params']['layer_type'] == '2D'):
                net[net[node]['connection']['output'][0]]['params']['pad_h'] += \
                    net[node]['params']['pad'][1]
            elif (net[net[node]['connection']['output'][0]]['params']['layer_type'] == '3D'):
                net[net[node]['connection']['output'][0]]['params']['pad_h'] += \
                    net[node]['params']['pad'][1]
                net[net[node]['connection']['output'][0]]['params']['pad_d'] += \
                    net[node]['params']['pad'][2]
            zeroPad.append(node)
        # Switching connection order to handle visualization
        elif (net[node]['info']['type'] == 'Eltwise'):
            net[node]['connection']['input'] = net[node]['connection']['input'][::-1]
    for node in zeroPad:
        net.pop(node, None)
    return JsonResponse({'result': 'success', 'net': net, 'net_name': model.name})
Exemplo n.º 29
0
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]
        #        print(self.kernel_size)
        #        print(type(self.kernel_size))
        #        print(input_dim)
        #        print(type(input_dim))
        #        print(self.filters)
        #        print(type(self.filters))
        #        a = self.kernel_size
        #        b = input_dim
        #        c = self.filters
        #        print(a+(b,c))
        #        kernel_shape = self.kernel_size + (input_dim, self.filters)

        #        self.kernel = self.tied_to.kernel

        if self.layer_inner:
            print('BUILDING!!')
            print(type(self.layer_inner))
            #            print(self.layer_inner)
            #            config = self.layer_inner.get_config()
            classconfig = self.layer_inner.pop('config')
            #            print(classconfig)
            classname = self.layer_inner.pop('class_name')
            #            print(self.layer_inner.pop('weights'))
            #            print(self.layer_inner.pop('weights').pop('value'))
            li_weights = np.asarray(
                self.layer_inner.pop('weights'))  #.pop('value'))
            #            print(self.layer_inner.pop('weights'))
            #            print(self.layer_inner.pop('weights').pop('value'))
            li_bias = np.asarray(self.layer_inner.pop('bias'))  #.pop('value'))

            #            print(str((li_weights.shape)))
            #            print(str((li_bias.shape)))

            self.tied_to = layers.deserialize({
                'class_name': classname,  #self.layer_inner.__class__.__name__,
                'config': classconfig
            })
            self.tied_to.build(input_shape)
            self.tied_to.set_weights([li_weights, li_bias])

        self.regularizers = []
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None

        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})
        self.built = True
Exemplo n.º 30
0
def get_copy_of_layer(layer, verbose=False):
    from keras.layers.core import Activation
    from keras import layers
    config = layer.get_config()

    # Non-standard relu6 layer (from MobileNet)
    if layer.__class__.__name__ == 'Activation':
        if config['activation'] == 'relu6':
            if get_keras_sub_version() == 1:
                from keras.applications.mobilenet import relu6
            else:
                from keras_applications.mobilenet import relu6
            layer_copy = Activation(relu6, name=layer.name)
            return layer_copy

    # DeepLabV3+ non-standard layer
    if layer.__class__.__name__ == 'BilinearUpsampling':
        from neural_nets.deeplab_v3_plus_model import BilinearUpsampling
        layer_copy = BilinearUpsampling(upsampling=config['upsampling'],
                                        output_size=config['output_size'],
                                        name=layer.name)
        return layer_copy

    # RetinaNet non-standard layer
    if layer.__class__.__name__ == 'UpsampleLike':
        from keras_retinanet.layers import UpsampleLike
        layer_copy = UpsampleLike(name=layer.name)
        return layer_copy

    # RetinaNet non-standard layer
    if layer.__class__.__name__ == 'Anchors':
        from keras_retinanet.layers import Anchors
        layer_copy = Anchors(name=layer.name,
                             size=config['size'],
                             stride=config['stride'],
                             ratios=config['ratios'],
                             scales=config['scales'])
        return layer_copy

    # RetinaNet non-standard layer
    if layer.__class__.__name__ == 'RegressBoxes':
        from keras_retinanet.layers import RegressBoxes
        layer_copy = RegressBoxes(name=layer.name,
                                  mean=config['mean'],
                                  std=config['std'])
        return layer_copy

    # RetinaNet non-standard layer
    if layer.__class__.__name__ == 'PriorProbability':
        from keras_retinanet.layers import PriorProbability
        layer_copy = PriorProbability(name=layer.name,
                                      mean=config['mean'],
                                      std=config['std'])
        return layer_copy

    # RetinaNet non-standard layer
    if layer.__class__.__name__ == 'ClipBoxes':
        from keras_retinanet.layers import ClipBoxes
        layer_copy = ClipBoxes(name=layer.name)
        return layer_copy

    # RetinaNet non-standard layer
    if layer.__class__.__name__ == 'FilterDetections':
        from keras_retinanet.layers import FilterDetections
        layer_copy = FilterDetections(
            name=layer.name,
            max_detections=config['max_detections'],
            nms_threshold=config['nms_threshold'],
            score_threshold=config['score_threshold'],
            nms=config['nms'],
            class_specific_filter=config['class_specific_filter'],
            trainable=config['trainable'],
            parallel_iterations=config['parallel_iterations'])
        return layer_copy

    layer_copy = layers.deserialize({
        'class_name': layer.__class__.__name__,
        'config': config
    })
    layer_copy.name = layer.name
    return layer_copy
Exemplo n.º 31
0
    def from_config(cls, config, custom_objects=None):
        wrapper = deserialize(config.pop('wrapper'),
                              custom_objects=custom_objects)

        return cls(wrapper)
Exemplo n.º 32
0
    def create(cls):
        if cls.instance is None:
            cls.instance = cls()

            # Get ip address and create model according to ip config file.
            s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            try:
                s.connect(('8.8.8.8', 80))
                ip = s.getsockname()[0]
            except Exception:
                ip = '127.0.0.1'
            finally:
                s.close()

            node_config = ConfigParser.ConfigParser()
            node_config.read(HOME + '/node.cfg')
            sys_model_name = node_config.get('Node Config', 'model', 0)
            sys_node_count = node_config.get('Node Config', 'system', 0)
            node_id = node_config.get('IP Node', ip, 0)

            with open(DIR_PATH + '/resource/system/' + sys_model_name + '/' +
                      sys_node_count + '/config.json') as f:
                system_config = yaml.safe_load(f)[node_id]
                cls.instance.id = node_id

                model = Sequential()

                # The model config is predefined. Extract each layer's config
                # according to the config from system config.
                with open(DIR_PATH + '/resource/model/' + sys_model_name +
                          '/config.json') as f2:
                    model_config = yaml.safe_load(f2)
                    for layer_name in system_config['model']:
                        class_name = model_config[layer_name]['class_name']
                        config = model_config[layer_name]['config']
                        input_shape = model_config[layer_name]['input_shape']
                        layer = layers.deserialize({
                            'class_name': class_name,
                            'config': config
                        })
                        model.add(InputLayer(input_shape))
                        model.add(layer)

                cls.instance.model = model if len(model.layers) != 0 else None
                cls.log(cls.instance, 'model finishes', model.summary())

                for n_id in system_config['devices']:
                    ip = node_config.get('Node IP', n_id, 0)
                    cls.instance.ip.put(ip)

                cls.instance.merge = int(system_config['merge'])
                cls.instance.split = int(system_config['split'])
                cls.instance.op = system_config['op']

                if cls.instance.model:
                    shape = list(model.input_shape[1:])
                    shape[-1] = shape[
                        -1] / cls.instance.merge if cls.instance.op == 'cat' else shape[
                            -1]
                    cls.instance.input_shape = tuple(shape)
                if 'input_shape' in system_config:
                    cls.instance.input_shape = ([
                        int(entry)
                        for entry in system_config['input_shape'].split(' ')
                    ])
                    cls.instance.force_generate = True

        return cls.instance
    def build_encoder_decoder_inference(self, model, sentence_encoder, include_sentence_encoder=True, attention=False):

        latent_dim = 0
        initial_encoder_states = []
        initial_input = []

        encoder_inputs = Input(shape=model.get_layer("encoder_input_layer").get_config()['batch_input_shape'][1:])
        print("encoder inputs shape: ", encoder_inputs.shape)

        mask_layer = Masking(mask_value=0, name="mask_layer")
        mask_output = mask_layer(encoder_inputs)

        encoder_lstm_prefix = "encoder_layer_"
        num_encoder = self.get_number_of_layers(model, encoder_lstm_prefix)
        print("num: ", num_encoder)
        for i in range(num_encoder):
            encoder = model.get_layer(encoder_lstm_prefix + str(i))
            weights = encoder.get_weights()
            config = encoder.get_config()
            config['dropout'] = 0.0
            config['recurrent_dropout'] = 0.0
            encoder = layers.deserialize({'class_name': encoder.__class__.__name__, 'config': config})

            if i == 0:
                encoder_outputs = encoder(mask_output)
                encoder.set_weights(weights)
                latent_dim = encoder.get_config()['units']
            else:
                encoder_outputs = encoder(encoder_outputs[0])
                encoder.set_weights(weights)

        encoder_states = encoder_outputs[1:]

        if include_sentence_encoder:

            encoder_sentence_inputs = Input(shape=(22,))
            initial_input = [encoder_inputs, encoder_sentence_inputs]

            sentence_encoder_embedding_layer = model.get_layer('sentence_embedding_layer')
            sentence_embedding_outputs = sentence_encoder_embedding_layer(encoder_sentence_inputs)

            if attention:
                sentence_encoder_outputs, initial_encoder_states, new_latent_dim = sentence_encoder.get_last_layer_inference(
                    model,
                    encoder_states,
                    sentence_embedding_outputs,
                    attention=attention)
                #this is just for now, because the our model only accepts only the hidden state from the image encoder
                initial_encoder_states = encoder_states[0]
            else:
                initial_encoder_states, new_latent_dim = sentence_encoder.get_last_layer_inference(model,
                                                                                                   encoder_states,
                                                                                                   sentence_embedding_outputs)
        else:
            initial_input = encoder_inputs
            initial_encoder_states = encoder_states
            new_latent_dim = latent_dim

        if attention:
            encoder_model = Model(initial_input, [sentence_encoder_outputs, initial_encoder_states])
        else:
            encoder_model = Model(initial_input, initial_encoder_states)

        decoder_inputs = Input(shape=(None,))

        embedding_layer = model.get_layer("decoder_embedding_layer")
        embedding_outputs = embedding_layer(decoder_inputs)

        decoder_prefix = "decoder_layer_"
        num_decoder = self.get_number_of_layers(model, decoder_prefix)

        if len(encoder_states) == 1:
            # decoder_states_inputs = [decoder_state_input_h1, decoder_state_input_h2]
            decoder_states_inputs = []
            for i in range(num_decoder):
                decoder_states_inputs.append(Input(shape=(new_latent_dim,)))
        else:  # TODO : test if this works with stacked LSTM model
            # decoder_states_inputs = [decoder_state_input_h1, decoder_state_input_c]
            decoder_states_inputs = []
            for i in range(num_decoder):
                decoder_states_inputs.append(Input(shape=(new_latent_dim,)))
                decoder_states_inputs.append(Input(shape=(new_latent_dim,)))
        decoder_states_inputs.append(Input(shape=(sentence_encoder_outputs.shape[1],sentence_encoder_outputs.shape[2],)))

        decoder_states = []
        decoder_outputs = embedding_outputs
        for i in range(num_decoder):

            decoder = model.get_layer(decoder_prefix + str(i))
            weights = decoder.get_weights()
            config = decoder.get_config()
            config['dropout'] = 0.0
            config['recurrent_dropout'] = 0.0

            if attention:
                if i == num_decoder - 1:
                    decoder = layers.deserialize({'class_name': decoder.__class__.__name__, 'config': config},
                                                 custom_objects={'AttentionGRU': AttentionGRU})
                    decoder_outputs = decoder(decoder_outputs, initial_state=decoder_states_inputs[i],
                                              constants=decoder_states_inputs[-1])
                else:
                    decoder = layers.deserialize({'class_name': decoder.__class__.__name__, 'config': config})
                    decoder_outputs = decoder(decoder_outputs, initial_state=decoder_states_inputs[i])
            else:
                decoder = layers.deserialize({'class_name': decoder.__class__.__name__, 'config': config})
                decoder_outputs = decoder(decoder_outputs, initial_state=decoder_states_inputs[i])

            decoder.set_weights(weights)
            decoder_states = decoder_states + list(decoder_outputs[1:])
            decoder_outputs = decoder_outputs[0]

        decoder_dense = model.get_layer("dense_layer")
        decoder_outputs = decoder_dense(decoder_outputs)
        decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)

        # return im_model, sent_model
        return encoder_model, decoder_model