Beispiel #1
0
 def __call__(self, inputs, initial_state=None, initial_readout=None, ground_truth=None, **kwargs):
     req_num_inputs = 1 + self.num_states
     inputs = _to_list(inputs)
     inputs = inputs[:]
     if len(inputs) == 1:
         if initial_state is not None:
             if type(initial_state) is list:
                 inputs += initial_state
             else:
                 inputs.append(initial_state)
         else:
             if self.readout:
                 initial_state = self._get_optional_input_placeholder('initial_state', self.num_states - 1)
             else:
                 initial_state = self._get_optional_input_placeholder('initial_state', self.num_states)
             inputs += _to_list(initial_state)
         if self.readout:
             if initial_readout is None:
                 initial_readout = self._get_optional_input_placeholder('initial_readout')
             inputs.append(initial_readout)
         if self.teacher_force:
             req_num_inputs += 1
             if ground_truth is None:
                 ground_truth = self._get_optional_input_placeholder('ground_truth')
             inputs.append(ground_truth)
     assert len(inputs) == req_num_inputs, "Required " + str(req_num_inputs) + " inputs, received " + str(len(inputs)) + "."
     with K.name_scope(self.name):
         if not self.built:
             self.build(K.int_shape(inputs[0]))
             if self._initial_weights is not None:
                 self.set_weights(self._initial_weights)
                 del self._initial_weights
                 self._initial_weights = None
         previous_mask = _collect_previous_mask(inputs[:1])
         user_kwargs = kwargs.copy()
         if not _is_all_none(previous_mask):
             if 'mask' in inspect.getargspec(self.call).args:
                 if 'mask' not in kwargs:
                     kwargs['mask'] = previous_mask
         input_shape = _collect_input_shape(inputs)
         output = self.call(inputs, **kwargs)
         output_mask = self.compute_mask(inputs[0], previous_mask)
         output_shape = self.compute_output_shape(input_shape[0])
         self._add_inbound_node(input_tensors=inputs, output_tensors=output,
                                input_masks=previous_mask, output_masks=output_mask,
                                input_shapes=input_shape, output_shapes=output_shape,
                                arguments=user_kwargs)
         if hasattr(self, 'activity_regularizer') and self.activity_regularizer is not None:
             regularization_losses = [self.activity_regularizer(x) for x in _to_list(output)]
             self.add_loss(regularization_losses, _to_list(inputs))
     return output
Beispiel #2
0
    def __call__(self, inputs, **kwargs):
        if isinstance(inputs, list):
            inputs = inputs[:]

        with K.name_scope(self.name):
            # Raise exceptions in case the input is not compatible
            # with the input_spec specified in the layer constructor.
            self.assert_input_compatibility(inputs)

            # Handle laying building (weight creating, input spec locking).
            if not self.built:
                self.build(inputs)
                self.built = True

            # Handle mask propagation.
            previous_mask = _collect_previous_mask(inputs)
            user_kwargs = copy.copy(kwargs)
            if not _is_all_none(previous_mask):
                # The previous layer generated a mask.
                if has_arg(self.call, 'mask'):
                    if 'mask' not in kwargs:
                        # If mask is explicitly passed to __call__,
                        # we should override the default mask.
                        kwargs['mask'] = previous_mask
            # Handle automatic shape inference (only useful for Theano).
            input_shape = _collect_input_shape(inputs)

            # Actually call the layer, collecting output(s), mask(s), and shape(s).
            output = self.call(inputs, **kwargs)
            output_mask = self.compute_mask(inputs, previous_mask)

            # If the layer returns tensors from its inputs, unmodified,
            # we copy them to avoid loss of tensor metadata.
            output_ls = _to_list(output)
            inputs_ls = _to_list(inputs)
            output_ls_copy = []
            for x in output_ls:
                if x in inputs_ls:
                    x = K.identity(x)
                output_ls_copy.append(x)
            if len(output_ls_copy) == 1:
                output = output_ls_copy[0]
            else:
                output = output_ls_copy

            # Inferring the output shape is only relevant for Theano.
            if all([s is not None for s in _to_list(input_shape)]):
                output_shape = self.compute_output_shape(input_shape)
            else:
                if isinstance(input_shape, list):
                    output_shape = [None for _ in input_shape]
                else:
                    output_shape = None

            if not isinstance(output_mask,
                              (list, tuple)) and len(output_ls) > 1:
                # Augment the mask to match the length of the output.
                output_mask = [output_mask] * len(output_ls)

            # Add an inbound node to the layer, so that it keeps track
            # of the call and of all new variables created during the call.
            # This also updates the layer history of the output tensor(s).
            # If the input tensor(s) had not previous Keras history,
            # this does nothing.
            self._add_inbound_node(input_tensors=inputs,
                                   output_tensors=output,
                                   input_masks=previous_mask,
                                   output_masks=output_mask,
                                   input_shapes=input_shape,
                                   output_shapes=output_shape,
                                   arguments=user_kwargs)

            # Apply activity regularizer if any:
            if hasattr(self, 'activity_regularizer'
                       ) and self.activity_regularizer is not None:
                regularization_losses = [
                    self.activity_regularizer(x) for x in _to_list(output)
                ]
                self.add_loss(regularization_losses, _to_list(inputs))
        return output