Ejemplo n.º 1
0
    def __init__(self,
                 input_shape=None,
                 batch_size=None,
                 batch_input_shape=None,
                 dtype=None,
                 input_tensor=None,
                 sparse=False,
                 name=None):
        #self.input_spec = None
        self.supports_masking = False
        self.uses_learning_phase = False
        self.trainable = False
        self.built = True

        self._inbound_nodes = []
        self._outbound_nodes = []

        self.trainable_weights = []
        self.non_trainable_weights = []
        self.constraints = {}

        self.sparse = sparse

        if not name:
            prefix = 'input'
            name = prefix + '_' + str(K.get_uid(prefix))
        self.name = name

        if not batch_input_shape:
            assert input_shape, 'An Input layer should be passed either a `batch_input_shape` or an `input_shape`.'
            batch_input_shape = (None, ) + tuple(input_shape)
        else:
            batch_input_shape = tuple(batch_input_shape)
        if not dtype:
            dtype = K.floatx()

        self.batch_input_shape = batch_input_shape
        self.dtype = dtype

        input_tensor = K.placeholder(shape=batch_input_shape,
                                     dtype=dtype,
                                     sparse=self.sparse,
                                     name=self.name)

        input_tensor._uses_learning_phase = False
        input_tensor._keras_history = (self, 0, 0)
        shape = input_tensor._keras_shape
        Node(self,
             inbound_layers=[],
             node_indices=[],
             tensor_indices=[],
             input_tensors=[input_tensor],
             output_tensors=[input_tensor],
             input_masks=[None],
             output_masks=[None],
             input_shapes=[shape],
             output_shapes=[shape])
Ejemplo n.º 2
0
    def __init__(self,
                 shape,
                 my_initializer='RandomNormal',
                 name=None,
                 mult=1.0,
                 **kwargs):
        self.shape = [1, *shape]
        self.my_initializer = my_initializer
        self.mult = mult

        if not name:
            prefix = 'param'
            name = '%s_%d' % (prefix, K.get_uid(prefix))
        Layer.__init__(self, name=name, **kwargs)

        # Create a trainable weight variable for this layer.
        with K.name_scope(self.name):
            self.kernel = self.add_weight(name='kernel',
                                          shape=self.shape,
                                          initializer=self.my_initializer,
                                          trainable=True)

        # prepare output tensor, which is essentially the kernel.
        output_tensor = self.kernel * self.mult
        output_tensor._keras_shape = self.shape
        output_tensor._uses_learning_phase = False
        output_tensor._keras_history = (self, 0, 0)
        output_tensor._batch_input_shape = self.shape

        self.trainable = True
        self.built = True
        self.is_placeholder = False

        # create new node
        Node(self,
             inbound_layers=[],
             node_indices=[],
             tensor_indices=[],
             input_tensors=[],
             output_tensors=[output_tensor],
             input_masks=[],
             output_masks=[None],
             input_shapes=[],
             output_shapes=[self.shape])
Ejemplo n.º 3
0
    def __init__(self, 
                 size,
                 initializer='glorot_uniform',
                 regularizer=None,
                 name=None,
                 **kwargs):
        self.size = tuple(size)
        self.initializer = initializers.get(initializer)
        self.regularizer = regularizers.get(regularizer)

        if not name:
            prefix = 'shared_weight'
            name = prefix + '_' + str(K.get_uid(prefix))

        Layer.__init__(self, name=name, **kwargs)
        with K.name_scope(self.name):
            self.kernel = self.add_weight(shape=self.size,
                                        initializer=self.initializer,
                                        name='kernel',
                                        regularizer=self.regularizer)


        self.trainable = True
        self.built = True
        # self.sparse = sparse

        input_tensor = self.kernel * 1.0

        self.is_placeholder = False
        input_tensor._keras_shape = self.size
        
        input_tensor._uses_learning_phase = False
        input_tensor._keras_history = (self, 0, 0)

        Node(self,
            inbound_layers=[],
            node_indices=[],
            tensor_indices=[],
            input_tensors=[input_tensor],
            output_tensors=[input_tensor],
            input_masks=[None],
            output_masks=[None],
            input_shapes=[self.size],
            output_shapes=[self.size])
Ejemplo n.º 4
0
 def __init__(self, name=None, **kwargs):
     if not name:
         prefix = 'optional_input_placeholder'
         name = prefix + '_' + str(K.get_uid(prefix))
     kwargs['batch_input_shape'] = (2, )
     super(_OptionalInputPlaceHolder, self).__init__(**kwargs)
     self.tensor = K.zeros(shape=(2, ))
     self.tensor._keras_shape = (2, )
     self.tensor._uses_learning_phase = False
     self.tensor._keras_history = (self, 0, 0)
     Node(self,
          inbound_layers=[],
          node_indices=[],
          tensor_indices=[],
          input_tensors=[],
          output_tensors=[self.tensor],
          input_masks=[None],
          output_masks=[None],
          input_shapes=[],
          output_shapes=[(2, )])
Ejemplo n.º 5
0
def swap_layer_connection(old_layer: Layer, new_layer: Layer) -> None:
    '''connect nodes of calc graph for new_layer and disconnect ones for old_layers

    Keras manages calculation graph by nodes which hold connection between
    layres. To swap old layer and new layer, it is required to delete nodes
    of old layer and to create new nodes of new layer.

    :arg old_layer: Old layer. The connection to/from this layer will be removed.
    :arg new_layer: New layer. The connection to/from old layer will be connected to/from
        this layer.
    :return: None
    '''

    # the set of inbound layer which have old outbound_node
    inbound_layers = set()

    # create new inbound nodes
    for node in old_layer._inbound_nodes:  # type: Node
        Node(
            new_layer,
            node.inbound_layers,
            node.node_indices,
            node.tensor_indices,
            node.input_tensors,
            node.output_tensors,
            node.input_masks,
            node.output_masks,
            node.input_shapes,
            node.output_shapes,
        )
        inbound_layers.union(set(node.inbound_layers))

    # remove old outbound node of inbound layers
    for layer in inbound_layers:  # type: Layer
        old_nodes = filter(
            lambda n: n.outbound_layer == old_layer,
            layer._outbound_nodes,
        )
        for n in old_nodes:  # type: Node
            layer._outbound_nodes.remove(n)

    # the set of outbound layer which have old inbound_nodes
    outbound_layers = set()
    # create new outbound nodes
    for node in old_layer._outbound_nodes:  # type: Node
        layers = list(node.inbound_layers)
        while old_layer in layers:
            idx = layers.index(old_layer)
            layers[idx] = new_layer
        Node(
            node.outbound_layer,
            layers,
            node.node_indices,
            node.tensor_indices,
            node.input_tensors,
            node.output_tensors,
            node.input_masks,
            node.output_masks,
            node.input_shapes,
            node.output_shapes,
        )
        outbound_layers.add(node.outbound_layer)

    # remove old inbound_node of outbound layers
    for layer in outbound_layers:  # type: Layer
        old_nodes = filter(
            lambda n: old_layer in n.inbound_layers,
            layer._inbound_nodes,
        )
        for n in old_nodes:
            layer._inbound_nodes.remove(n)
Ejemplo n.º 6
0
    def __init__(self, input, output, recursiveInput, name=None):
        # handle name argument
        self.recursiveInput = recursiveInput
        if not name:
            prefix = self.__class__.__name__.lower()
            name = prefix + '_' + str(K.get_uid(prefix))
        self.name = name

        # Container-specific properties
        if type(input) in {list, tuple}:
            self.inputs = list(input)  # tensor or list of tensors
        else:
            self.inputs = [input]
        if type(output) in {list, tuple}:
            self.outputs = list(output)
        else:
            self.outputs = [output]

        # check for redundancy in inputs:
        inputs_set = set(self.inputs)
        if len(inputs_set) != len(self.inputs):
            raise Exception('The list of inputs passed to the model '
                            'is redundant. All inputs should only appear once.'
                            ' Found: ' + str(self.inputs))

        # list of initial layers (1 to 1 mapping with self.inputs,
        # hence the same layer might appear twice)
        self.input_layers = []
        # TODO: probably useless because input layers must be Input layers (node_indices = [0], tensor_indices = [0])
        self.input_layers_node_indices = []
        self.input_layers_tensor_indices = []
        # list of layers (1 to 1 mapping with self.inputs,
        # hence the same layer might appear twice)
        self.output_layers = []
        # TODO: probably useless
        self.output_layers_node_indices = []
        self.output_layers_tensor_indices = []
        # all layers in order of horizontal graph traversal.
        # Entries are unique. Includes input and output layers.
        self.layers = []

        # this is for performance optimization
        # when calling the Container on new inputs.
        # every time the Container is called on a set on input tensors,
        # we compute the output tensors,
        # output masks and output shapes in one pass,
        # then cache them here. When of of these output is queried later,
        # we retrieve it from there instead of recomputing it.
        self._output_mask_cache = {}
        self._output_tensor_cache = {}
        self._output_shape_cache = {}

        # arguments validation
        for x in self.inputs:
            # check that x is a Keras tensor
            if not hasattr(x, '_keras_history'):
                cls_name = self.__class__.__name__
                raise Exception('Input tensors to a ' + cls_name + ' ' +
                                'must be Keras tensors. Found: ' + str(x) +
                                ' (missing Keras metadata).')
            # check that x is an input tensor
            layer, node_index, tensor_index = x._keras_history
            if len(layer.inbound_nodes) > 1 or (
                    layer.inbound_nodes
                    and layer.inbound_nodes[0].inbound_layers):
                cls_name = self.__class__.__name__
                warnings.warn(cls_name + ' inputs must come from '
                              'a Keras Input layer, '
                              'they cannot be the output of '
                              'a previous non-Input layer. '
                              'Here, a tensor specified as '
                              'input to "' + self.name +
                              '" was not an Input tensor, '
                              'it was generated by layer ' + layer.name + '.\n'
                              'Note that input tensors are '
                              'instantiated via `tensor = Input(shape)`.\n'
                              'The tensor that caused the issue was: ' +
                              str(x.name))
        for x in self.outputs:
            if not hasattr(x, '_keras_history'):
                cls_name = self.__class__.__name__
                raise Exception('Output tensors to a ' + cls_name + ' must be '
                                'Keras tensors. Found: ' + str(x))
        # build self.output_layers:
        for x in self.outputs:
            layer, node_index, tensor_index = x._keras_history
            self.output_layers.append(layer)
            self.output_layers_node_indices.append(node_index)
            self.output_layers_tensor_indices.append(tensor_index)

        # fill in the output mask cache
        masks = []
        for x in self.inputs:
            layer, node_index, tensor_index = x._keras_history
            node = layer.inbound_nodes[node_index]
            mask = node.output_masks[tensor_index]
            masks.append(mask)
        mask_cache_key = ','.join([str(id(x)) for x in self.inputs])
        mask_cache_key += '_' + ','.join([str(id(x)) for x in masks])
        masks = []
        for x in self.outputs:
            layer, node_index, tensor_index = x._keras_history
            node = layer.inbound_nodes[node_index]
            mask = node.output_masks[tensor_index]
            masks.append(mask)
        if len(masks) == 1:
            mask = masks[0]
        else:
            mask = masks
        self._output_mask_cache[mask_cache_key] = mask

        # build self.input_layers:
        for x in self.inputs:
            layer, node_index, tensor_index = x._keras_history
            # it's supposed to be an input layer, so only one node
            # and one tensor output
            assert node_index == 0
            assert tensor_index == 0
            self.input_layers.append(layer)
            self.input_layers_node_indices.append(node_index)
            self.input_layers_tensor_indices.append(tensor_index)

        # build self.input_names and self.output_names
        self.input_names = []
        self.output_names = []
        for layer in self.input_layers:
            self.input_names.append(layer.name)
        for layer in self.output_layers:
            self.output_names.append(layer.name)

        self.internal_input_shapes = [x._keras_shape for x in self.inputs]
        self.internal_output_shapes = [x._keras_shape for x in self.outputs]

        # container_nodes: set of nodes included in the graph
        # (not all nodes included in the layers are relevant to the current graph).
        container_nodes = set()  # ids of all nodes relevant to the Container
        nodes_depths = {}  # map {node: depth value}
        layers_depths = {}  # map {layer: depth value}

        def make_node_marker(node, depth):
            return str(id(node)) + '-' + str(depth)

        def build_map_of_graph(tensor,
                               seen_nodes=set(),
                               depth=0,
                               layer=None,
                               node_index=None,
                               tensor_index=None):
            '''This recursively updates the maps nodes_depths,
            layers_depths and the set container_nodes.
            Does not try to detect cycles in graph (TODO?)
            # Arguments
                tensor: some tensor in a graph
                seen_nodes: set of node ids ("{layer.name}_ib-{node_index}")
                    of nodes seen so far. Useful to prevent infinite loops.
                depth: current depth in the graph (0 = last output).
                layer: layer from which `tensor` comes from. If not provided,
                    will be obtained from `tensor._keras_history`.
                node_index: node index from which `tensor` comes from.
                tensor_index: tensor_index from which `tensor` comes from.
            '''
            if not layer or node_index is None or tensor_index is None:
                layer, node_index, tensor_index = tensor._keras_history
            node = layer.inbound_nodes[node_index]

            # prevent cycles
            seen_nodes.add(make_node_marker(node, depth))

            node_key = layer.name + '_ib-' + str(node_index)
            # update container_nodes
            container_nodes.add(node_key)
            # update nodes_depths
            node_depth = nodes_depths.get(node)
            if node_depth is None:
                nodes_depths[node] = depth
            else:
                nodes_depths[node] = max(depth, node_depth)
            # update layers_depths
            previously_seen_depth = layers_depths.get(layer)
            if previously_seen_depth is None:
                current_depth = depth
            else:
                current_depth = max(depth, previously_seen_depth)
            layers_depths[layer] = current_depth

            # propagate to all previous tensors connected to this node
            for i in range(len(node.inbound_layers)):
                x = node.input_tensors[i]
                layer = node.inbound_layers[i]
                node_index = node.node_indices[i]
                tensor_index = node.tensor_indices[i]
                next_node = layer.inbound_nodes[node_index]
                # use node_marker to prevent cycles
                node_marker = make_node_marker(next_node, current_depth + 1)
                if node_marker not in seen_nodes:
                    build_map_of_graph(x, seen_nodes, current_depth + 1, layer,
                                       node_index, tensor_index)

        for x in self.outputs:
            seen_nodes = set()
            build_map_of_graph(x, seen_nodes, depth=0)

        # build a map {depth: list of nodes with this depth}
        nodes_by_depth = {}
        for node, depth in list(nodes_depths.items()):
            if depth not in nodes_by_depth:
                nodes_by_depth[depth] = []
            nodes_by_depth[depth].append(node)

        # build a map {depth: list of layers with this depth}
        layers_by_depth = {}
        for layer, depth in list(layers_depths.items()):
            if depth not in layers_by_depth:
                layers_by_depth[depth] = []
            layers_by_depth[depth].append(layer)

        # get sorted list of layer depths
        depth_keys = list(layers_by_depth.keys())
        depth_keys.sort(reverse=True)

        # set self.layers and self.layers_by_depth
        layers = []
        for depth in depth_keys:
            layers_for_depth = layers_by_depth[depth]
            # container.layers needs to have a deterministic order
            layers_for_depth.sort(key=lambda x: x.name)
            for layer in layers_for_depth:
                layers.append(layer)
        self.layers = layers
        self.layers_by_depth = layers_by_depth

        # get sorted list of node depths
        depth_keys = list(nodes_by_depth.keys())
        depth_keys.sort(reverse=True)

        # check that all tensors required are computable.
        # computable_tensors: all tensors in the graph
        # that can be computed from the inputs provided
        computable_tensors = [self.recursiveInput]
        for x in self.inputs:
            computable_tensors.append(x)

        layers_with_complete_input = []  # to provide a better error msg
        for depth in depth_keys:
            for node in nodes_by_depth[depth]:
                layer = node.outbound_layer
                if layer:
                    for x in node.input_tensors:
                        if x not in computable_tensors:
                            raise Exception('Graph disconnected: '
                                            'cannot obtain value for tensor ' +
                                            str(x) + ' at layer "' +
                                            layer.name + '". '
                                            'The following previous layers '
                                            'were accessed without issue: ' +
                                            str(layers_with_complete_input))
                    for x in node.output_tensors:
                        computable_tensors.append(x)
                    layers_with_complete_input.append(layer.name)

        # set self.nodes and self.nodes_by_depth
        self.container_nodes = container_nodes
        self.nodes_by_depth = nodes_by_depth

        # ensure name unicity, which will be crucial for serialization
        # (since serialized nodes refer to layers by their name).
        all_names = [layer.name for layer in self.layers]
        for name in all_names:
            if all_names.count(name) != 1:
                raise Exception('The name "' + name + '" is used ' +
                                str(all_names.count(name)) +
                                ' times in the model. ' +
                                'All layer names should be unique.')

        # layer parameters
        # the new container starts with a single inbound node
        # for its inputs, and no outbound nodes.
        self.outbound_nodes = [
        ]  # will be appended to by future calls to __call__
        self.inbound_nodes = [
        ]  # will be appended to below, and by future calls to __call__
        # create the node linking internal inputs to internal outputs
        Node(
            outbound_layer=self,
            inbound_layers=[],
            node_indices=[],
            tensor_indices=[],
            input_tensors=self.inputs,
            output_tensors=self.outputs,
            # no container-level masking for now
            input_masks=[None for _ in self.inputs],
            output_masks=[None for _ in self.outputs],
            input_shapes=[x._keras_shape for x in self.inputs],
            output_shapes=[x._keras_shape for x in self.outputs])
        self.built = True
        self.supports_masking = False
Ejemplo n.º 7
0
    def __init__(self,
                 input_shape=None,
                 batch_size=None,
                 batch_input_shape=None,
                 dtype=None,
                 input_tensor=None,
                 sparse=False,
                 name=None):

        if not name:
            prefix = 'input'
            name = prefix + '_' + str(K.get_uid(prefix))
        super(InputLayer, self).__init__(dtype=dtype, name=name)

        self.trainable = False
        self.built = True
        self.sparse = sparse

        if input_shape and batch_input_shape:
            raise ValueError('Only provide the input_shape OR '
                             'batch_input_shape argument to '
                             'InputLayer, not both at the same time.')
        if input_tensor is not None and batch_input_shape is None:
            # If input_tensor is set, and batch_input_shape is not set:
            # Attempt automatic input shape inference.
            try:
                batch_input_shape = K.int_shape(input_tensor)
            except TypeError:
                if not input_shape and not batch_input_shape:
                    raise ValueError('InputLayer was provided '
                                     'an input_tensor argument, '
                                     'but its input shape cannot be '
                                     'automatically inferred. '
                                     'You should pass an input_shape or '
                                     'batch_input_shape argument.')
        if not batch_input_shape:
            if not input_shape:
                raise ValueError('An Input layer should be passed either '
                                 'a `batch_input_shape` or an `input_shape`.')
            else:
                batch_input_shape = (batch_size, ) + tuple(input_shape)
        else:
            batch_input_shape = tuple(batch_input_shape)

        if not dtype:
            if input_tensor is None:
                dtype = K.floatx()
            else:
                dtype = K.dtype(input_tensor)

        self.batch_input_shape = batch_input_shape
        self.dtype = dtype

        if input_tensor is None:
            self.is_placeholder = True
            input_tensor = K.placeholder(shape=batch_input_shape,
                                         dtype=dtype,
                                         sparse=self.sparse,
                                         name=self.name)
        else:
            self.is_placeholder = False
            input_tensor._keras_shape = batch_input_shape

        # Create an input node to add to self.outbound_node
        # and set output_tensors' _keras_history.
        input_tensor._uses_learning_phase = False
        input_tensor._keras_history = (self, 0, 0)
        Node(self,
             inbound_layers=[],
             node_indices=[],
             tensor_indices=[],
             input_tensors=[input_tensor],
             output_tensors=[input_tensor],
             input_masks=[None],
             output_masks=[None],
             input_shapes=[batch_input_shape],
             output_shapes=[batch_input_shape])