Beispiel #1
0
    def __init__(self,
                 val=1.0,
                 min_max=None,
                 inputs=None,
                 name=None,
                 non_neg=None):

        inputs = to_list(inputs)
        if not all([isinstance(x, (Variable, Functional)) for x in inputs]):
            raise TypeError

        input_tensors, layers = [], []
        for v in inputs:
            input_tensors += v.outputs
            layers += v.layers

        if non_neg is None:
            non_neg = True
        layers.append(
            ParameterBase(val=val, min_max=min_max, non_neg=non_neg,
                          name=name))

        if len(input_tensors) > 1:
            lay = Concatenate()
            lay.name = "conct_" + lay.name.split("_")[-1]
            lay_input = lay(input_tensors)
        else:
            lay_input = input_tensors[0]
        outputs = layers[-1](lay_input)

        super(Parameter, self).__init__(inputs=to_list(input_tensors),
                                        outputs=to_list(outputs),
                                        layers=to_list(layers))
Beispiel #2
0
    def __init__(self,
                 val=1.0,
                 inputs=None,
                 name=None,
                 non_neg=None):

        inputs = to_list(inputs)
        if not all([isinstance(x, Variable) for x in inputs]):
            raise TypeError

        inputs_tensors, layers = [], []
        for v in inputs:
            inputs_tensors += v.outputs
            layers += v.layers

        if non_neg is None:
            non_neg = True
        layers.append(
            Parameter(val=val, non_neg=non_neg, name=name)
        )

        lay = Concatenate()
        lay.name = "conct_" + lay.name.split("_")[-1]
        lay_input = lay(inputs_tensors)
        outputs = layers[-1](lay_input)

        super(ToBeInferred, self).__init__(
            inputs=to_list(inputs_tensors),
            outputs=to_list(outputs),
            layers=to_list(layers)
        )
Beispiel #3
0
    def __init__(self,
                 fields=None,
                 variables=None,
                 hidden_layers=None,
                 activation="linear",
                 enrichment="linear",
                 kernel_initializer=default_kernel_initializer,
                 bias_initializer=default_bias_initializer,
                 dtype=None,
                 trainable=True,
                 **kwargs):
        # check data-type.
        if dtype is None:
            dtype = K.floatx()
        elif not K.floatx() == dtype:
            K.set_floatx(dtype)
        # check for copy constructor.
        if all([x in kwargs for x in ('inputs', 'outputs', 'layers')]):
            self._inputs = kwargs['inputs']
            self._outputs = kwargs['outputs']
            self._layers = kwargs['layers']
            return
        # prepares fields.
        fields = to_list(fields)
        if all([isinstance(fld, str) for fld in fields]):
            outputs = [
                Field(
                    name=fld,
                    dtype=dtype,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer,
                    trainable=trainable,
                ) for fld in fields
            ]
        elif all([isinstance(fld, Field) for fld in fields]):
            outputs = fields
        else:
            raise TypeError('Please provide a "list" of field names of' +
                            ' type "String" or "Field" objects.')
        # prepare inputs/outputs/layers.
        inputs = []
        layers = []
        variables = to_list(variables)
        if all([isinstance(var, Functional) for var in variables]):
            for var in variables:
                inputs += var.outputs
            for var in variables:
                for lay in var.layers:
                    layers.append(lay)
        else:
            raise TypeError(
                "Input error: Please provide a `list` of `Functional`s. \n"
                "Provided - {}".format(variables))
        # prepare hidden layers.
        if hidden_layers is None:
            hidden_layers = []
        else:
            hidden_layers = to_list(hidden_layers)
        # Check and convert activation functions to proper format.
        assert not isinstance(activation, list), \
            'Expected an activation function name not a "list". '
        afunc = get_activation(activation)

        # check enrichment functions.
        enrichment = to_list(enrichment)
        efuncs = get_activation(enrichment)

        # Input layers.
        if len(inputs) == 1:
            net_input = inputs[0]
        else:
            layer = Concatenate()
            layer.name = "conct_" + layer.name.split("_")[-1]
            net_input = layer(inputs)

        # Define the output network.
        net = []
        for enrich in efuncs:
            net.append(net_input)
            for nLay, nNeuron in enumerate(hidden_layers):
                # Add the layer.
                layer = Dense(
                    nNeuron,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer,
                    trainable=trainable,
                    dtype=dtype,
                )
                layer.name = "D{:d}b_".format(nNeuron) + layer.name.split(
                    "_")[-1]
                layers.append(layer)
                net[-1] = layer(net[-1])
                # Apply the activation.
                if nLay < len(hidden_layers) - 1 and afunc.__name__ != 'linear':
                    layer = Activation(afunc)
                    layer.name = "{}_".format(
                        afunc.__name__) + layer.name.split("_")[-1]
                    layers.append(layer)
                    net[-1] = layer(net[-1])

            # add the activations.
            if enrich.__name__ != 'linear':
                layer = Activation(enrich)
                layer.name = "{}_".format(
                    enrich.__name__) + layer.name.split("_")[-1]
                layers.append(layer)
                net[-1] = layer(net[-1])

        # store output layers.
        for out in outputs:
            layers.append(out)

        # Assign to the output variable
        if len(net) == 1:
            net_output = net[0]
        else:
            layer = Concatenate()
            layer.name = "conct_" + layer.name.split("_")[-1]
            net_output = layer(net)

        # Define the final outputs of each network
        outputs = [out(net_output) for out in outputs]

        self._inputs = inputs
        self._outputs = outputs
        self._layers = layers
Beispiel #4
0
def insert_layer_nonseq(model,
                        layer_regex,
                        insert_layer_factory,
                        insert_layer_name=None,
                        position='after',
                        special=False,
                        special_layer=None):

    # Auxiliary dictionary to describe the network graph
    network_dict = {'input_layers_of': {}, 'new_output_tensor_of': {}}

    # Set the input layers of each layer
    for layer in model.layers:
        for node in layer._outbound_nodes:
            layer_name = node.outbound_layer.name
            if layer_name not in network_dict['input_layers_of']:
                network_dict['input_layers_of'].update(
                    {layer_name: [layer.name]})
            else:
                network_dict['input_layers_of'][layer_name].append(layer.name)

    # Set the output tensor of the input layer
    network_dict['new_output_tensor_of'].update(
        {model.layers[0].name: model.input})

    # Iterate over all layers after the input
    for layer in model.layers[1:]:

        # Determine input tensors
        layer_input = [
            network_dict['new_output_tensor_of'][layer_aux]
            for layer_aux in network_dict['input_layers_of'][layer.name]
        ]
        if len(layer_input) == 1:
            layer_input = layer_input[0]

        # Insert layer if name matches the regular expression
        if re.match(layer_regex, layer.name):
            if position == 'replace':
                x = layer_input
            elif position == 'after':
                x = layer(layer_input)
            elif position == 'before':
                pass
            else:
                raise ValueError('position must be: before, after or replace')

            if (special == False):
                new_layer = insert_layer_factory()
                if insert_layer_name:
                    new_layer.name = insert_layer_name
                else:
                    new_layer.name = layer.name
                x = new_layer(x)
            else:
                new_layer = Concatenate([layer_input, special_layer])
                if insert_layer_name:
                    new_layer.name = insert_layer_name
                else:
                    new_layer.name = layer.name
                x = new_layer

            if position == 'before':
                x = layer(x)
        else:
            x = layer(layer_input)

        # Set new output tensor (the original one, or the one of the inserted
        # layer)
        network_dict['new_output_tensor_of'].update({layer.name: x})

    return Model(inputs=model.inputs, outputs=x)