def _validate_in_shapes(self): """Ensure all in_shapes are valid by comparing to `expected_inputs`. Raises: LayerValidationError: if there are unrecognized inputs, missing inputs or inputs that don't match the `StructureTemplate` from `expected_inputs`. """ in_shape_names = set(self.in_shapes.keys()) input_names = set(self.expected_inputs.keys()) if not in_shape_names.issubset(input_names): raise LayerValidationError( 'Invalid in_shapes. {} has no input(s) named "{}". Choices ' 'are: {}'.format(self.name, in_shape_names - input_names, input_names)) if not input_names.issubset(in_shape_names): raise LayerValidationError( '{}: All inputs need to be connected. Missing {}.'.format( self.name, input_names - in_shape_names)) for input_name, in_shape in self.in_shapes.items(): if not self.expected_inputs[input_name].matches(in_shape): raise LayerValidationError( "{}: in_shape ({}) for {} doesn't match StructureTemplate " "{}".format(self.name, in_shape, input_name, self.expected_inputs[input_name]))
def setup(self, kwargs, in_shapes): if 'out_shapes' not in kwargs: raise LayerValidationError("InputLayer requires 'out_shapes'") if in_shapes: raise LayerValidationError( 'InputLayer cannot have any incoming connections!' '(But had these: {})'.format(in_shapes)) outputs = OrderedDict() for n, s in self.kwargs['out_shapes'].items(): outputs[n] = BufferStructure(*s) return outputs, OrderedDict(), OrderedDict()
def setup(self, kwargs, in_shapes): self.activation = kwargs.get('activation', 'tanh') self.size = kwargs.get('size', self.in_shapes['default'].feature_size) self.recurrence_depth = kwargs.get('recurrence_depth', 1) if not isinstance(self.size, int): raise LayerValidationError('size must be int but was {}'.format( self.size)) if not isinstance(self.recurrence_depth, int): raise LayerValidationError( 'recurrence_depth must be int but was {}'.format( self.recurrence_depth)) in_size = self.in_shapes['default'].feature_size outputs = OrderedDict() outputs['default'] = BufferStructure('T', 'B', self.size, context_size=1) parameters = OrderedDict() parameters['W_H'] = BufferStructure(self.size, in_size) parameters['W_T'] = BufferStructure(self.size, in_size) parameters['R_T'] = BufferStructure(self.recurrence_depth, self.size, self.size) parameters['bias_T'] = BufferStructure(self.recurrence_depth, self.size) parameters['R_H'] = (BufferStructure(self.recurrence_depth, self.size, self.size)) parameters['bias_H'] = BufferStructure(self.recurrence_depth, self.size) internals = OrderedDict() for i in range(self.recurrence_depth): internals['H_{}'.format(i)] = BufferStructure('T', 'B', self.size, context_size=1) internals['T_{}'.format(i)] = BufferStructure('T', 'B', self.size, context_size=1) internals['Y_{}'.format(i)] = BufferStructure('T', 'B', self.size, context_size=1) internals['dH_{}'.format(i)] = BufferStructure( 'T', 'B', self.size, context_size=1, is_backward_only=True) internals['dT_{}'.format(i)] = BufferStructure( 'T', 'B', self.size, context_size=1, is_backward_only=True) internals['dY_{}'.format(i)] = BufferStructure( 'T', 'B', self.size, context_size=1, is_backward_only=True) return outputs, parameters, internals
def setup(self, kwargs, in_shapes): # 'H', 'T' and 'x' must have the same shape if in_shapes['H'] != in_shapes['T']: raise LayerValidationError( "{}: H and T must have the same shape but got {} and {}" .format(self.name, in_shapes['H'], in_shapes['T'])) if in_shapes['H'] != in_shapes['x']: raise LayerValidationError( "{}: H and x must have the same shape but got {} and {}" .format(self.name, in_shapes['H'], in_shapes['x'])) outputs = OrderedDict() outputs['default'] = BufferStructure( 'T', 'B', *self.in_shapes['x'].feature_shape) return outputs, OrderedDict(), OrderedDict()
def _validate_connections(self): super(InputLayerImpl, self)._validate_connections() if self.incoming: raise LayerValidationError( 'InputLayer cannot have any incoming connections!' '(But had these: {})'.format(self.incoming))
def setup(self, kwargs, in_shapes): self.activation = kwargs.get('activation', 'tanh') self.size = kwargs.get('size', self.in_shapes['default'].feature_size) if not isinstance(self.size, int): raise LayerValidationError('size must be int but was {}'.format( self.size)) in_size = self.in_shapes['default'].feature_size outputs = OrderedDict() outputs['default'] = BufferStructure('T', 'B', self.size, context_size=1) parameters = OrderedDict() parameters['W'] = BufferStructure(self.size, in_size) parameters['R'] = BufferStructure(self.size, self.size) parameters['bias'] = BufferStructure(self.size) internals = OrderedDict() internals['Ha'] = BufferStructure('T', 'B', self.size, context_size=1) internals['dHa'] = BufferStructure('T', 'B', self.size, context_size=1, is_backward_only=True) internals['dHb'] = BufferStructure('T', 'B', self.size, context_size=1, is_backward_only=True) return outputs, parameters, internals
def _validate_connections(self): """ Ensure all incoming and outgoing connections are valid. Raises: LayerValidationError: if there is any: * incoming connection to a non-existent input * outgoing connection from a non-existent output, parameter or internal buffer """ for in_c in self.incoming: if in_c.input_name not in self.in_shapes: raise LayerValidationError( '{}: Invalid incoming connection ({}). Layer has no input ' 'named "{}"'.format(self.name, in_c, in_c.sink_name)) for out_c in self.outgoing: if out_c.output_name.startswith('..'): category, _, substruct = out_c.output_name[2:].partition('.') if category not in {'parameters', 'internals'}: raise LayerValidationError( "{}: Invalid outgoing connection ({}). Category '{}' " "is not allowed/does not exist. Choices are " "['parameters', 'internals']".format( self.name, out_c, category)) if category == 'parameters': parameters = self.parameter_shapes if substruct not in parameters: raise LayerValidationError( "{}: Invalid outgoing connection ({}). Parameter" " '{}' does not exist. Choices are {}".format( self.name, out_c, list(parameters.keys()))) if category == 'internals': internals = self.internal_shapes if substruct not in internals: raise LayerValidationError( "{}: Invalid outgoing connection ({}). Internal" " '{}' does not exist. Choices are {}".format( self.name, out_c, list(internals.keys()))) elif out_c.output_name not in self.out_shapes: raise LayerValidationError( '{}: Invalid outgoing connection ({}). Layer has no output' ' named "{}". Choices are: {}'.format( self.name, out_c, out_c.output_name, list(self.out_shapes.keys())))
def _validate_kwargs(self): """Ensure self.kwargs are all sound. Raises: LayerValidationError: if there are unexpected kwargs.""" unexpected_kwargs = set(self.kwargs) - set(self.expected_kwargs) if unexpected_kwargs: raise LayerValidationError("{}: Unexpected kwargs: {}".format( self.name, unexpected_kwargs))
def setup(self, kwargs, in_shapes): in_shape = in_shapes['default'].feature_shape tar_shape = in_shapes['targets'].feature_shape if len(tar_shape) != len(in_shape): raise LayerValidationError('Default input and targets must have ' 'the same number of dimensions.') if tar_shape != in_shape: raise LayerValidationError('All dimensions must match ' 'for default input and targets.') outputs = OrderedDict() outputs['predictions'] = BufferStructure('T', 'B', *in_shape) outputs['loss'] = BufferStructure('T', 'B', *in_shape) internals = OrderedDict() internals['dcee'] = BufferStructure('T', 'B', *in_shape, is_backward_only=True) return outputs, OrderedDict(), internals
def setup(self, kwargs, in_shapes): in_shape = in_shapes['default'].feature_shape if in_shapes['mask'].feature_shape not in [(1,), in_shape]: raise LayerValidationError( "Shape of the mask did not match shape of the default inputs. " "Should be either ('T', 'B', 1) or {}, but was {}".format( in_shapes['default'].shape), in_shapes['mask'].shape) outputs = OrderedDict() outputs['default'] = in_shapes['default'] return outputs, OrderedDict(), OrderedDict()
def setup(self, kwargs, in_shapes): in_shape = in_shapes['default'].feature_shape tar_shape = in_shapes['targets'].feature_shape if len(tar_shape) != len(in_shape): raise LayerValidationError('Default input and targets must have ' 'the same number of dimensions.') if tar_shape[:-1] != in_shape[:-1]: raise LayerValidationError('All dimensions except last must match ' 'for default input and targets.') if tar_shape[-1] != 1: raise LayerValidationError('Last dimension of targets must be ' 'size 1.') outputs = OrderedDict() outputs['probabilities'] = BufferStructure('T', 'B', *in_shape) outputs['loss'] = BufferStructure('T', 'B', *tar_shape) internals = OrderedDict() internals['t_bin'] = BufferStructure('T', 'B', *in_shape, is_backward_only=True) return outputs, OrderedDict(), internals
def setup(self, kwargs, in_shapes): # 'default' and 'targets' must have same shape in_shape = in_shapes['default'].feature_shape tar_shape = in_shapes['targets'].feature_shape if in_shape != tar_shape: raise LayerValidationError( "{}: default and targets must have same feature shapes but " "got {} and {}".format(self.name, in_shape, tar_shape)) outputs = OrderedDict() outputs['predictions'] = BufferStructure('T', 'B', *in_shape) outputs['loss'] = BufferStructure('T', 'B', *in_shape) internals = OrderedDict() internals['diff'] = BufferStructure('T', 'B', *in_shape) return outputs, OrderedDict(), internals
def setup(self, kwargs, in_shapes): # 'inputs_1' and 'inputs_2' must have same shape f_shape1 = in_shapes['inputs_1'].feature_shape f_shape2 = in_shapes['inputs_2'].feature_shape if f_shape1 != f_shape2: raise LayerValidationError( "{}: inputs_1 and inputs_2 must have same feature shapes but " "got {} and {}".format(self.name, f_shape1, f_shape2)) outputs = OrderedDict() outputs['default'] = BufferStructure('T', 'B', *f_shape1) internals = OrderedDict() feature_shape = self.in_shapes['inputs_1'].feature_shape internals['diff'] = BufferStructure('T', 'B', *feature_shape) return outputs, OrderedDict(), internals
def setup(self, kwargs, in_shapes): in_shape = in_shapes['default'].feature_shape tar_shape = in_shapes['targets'].feature_shape if tar_shape != in_shape: raise LayerValidationError('input and targets must have the same ' 'shapes. But got {} != {}' .format(in_shape, tar_shape)) outputs = OrderedDict() outputs['predictions'] = BufferStructure('T', 'B', *in_shape) outputs['loss'] = BufferStructure('T', 'B', *in_shape) internals = OrderedDict() internals['dcee'] = BufferStructure('T', 'B', *in_shape, is_backward_only=True) return outputs, OrderedDict(), internals
def setup(self, kwargs, in_shapes): if in_shapes['default'] != in_shapes['targets']: raise LayerValidationError("{}: default and targets must have the " "same shapes but got {} and {}" .format(self.name, in_shapes['default'], in_shapes['targets'])) outputs = OrderedDict() outputs['default'] = BufferStructure('T', 'B', 1) feature_shape = in_shapes['default'].feature_shape internals = OrderedDict() internals['cee'] = BufferStructure('T', 'B', *feature_shape) internals['ceed'] = BufferStructure('T', 'B', *feature_shape, is_backward_only=True) return outputs, OrderedDict(), internals
def setup(self, kwargs, in_shapes): in_shape = in_shapes['default'].feature_shape expected_shape = in_shape[:-1] + (1,) if in_shapes['mask'].feature_shape == (1,): self.flatten_dim = 2 elif in_shapes['mask'].feature_shape in [expected_shape, in_shape]: self.flatten_dim = len(in_shape) + 1 else: raise LayerValidationError( "Shape of the mask did not match shape of the default inputs. " "Should be either ('T', 'B', 1) or {} or {}, but was {}".format( ('T', 'B') + expected_shape, in_shapes['default'].shape, in_shapes['mask'])) outputs = OrderedDict() outputs['default'] = in_shapes['default'] return outputs, OrderedDict(), OrderedDict()
def setup(self, kwargs, in_shapes): # 'inputs_1' and 'inputs_2' must have same shape except for last dim shape_prefix1 = in_shapes['inputs_1'].shape[:-1] shape_prefix2 = in_shapes['inputs_2'].shape[:-1] if shape_prefix1 != shape_prefix2: raise LayerValidationError( "{}: The shapes of inputs_1 and inputs_2 may only differ in " "the last dimension but got {} and {}".format( self.name, in_shapes['inputs_1'].shape, in_shapes['inputs_2'].shape)) combined_size = (in_shapes['inputs_1'].shape[-1] + in_shapes['inputs_2'].shape[-1]) out_shape = shape_prefix1 + (combined_size, ) outputs = OrderedDict() outputs['default'] = BufferStructure(*out_shape) parameters = OrderedDict() internals = OrderedDict() return outputs, parameters, internals
def setup(self, kwargs, in_shapes): self.activation = kwargs.get('activation', 'rel') self.size = kwargs.get('size', self.in_shapes['default'].feature_shape) if isinstance(self.size, int): self.size = (self.size, ) if not isinstance(self.size, (tuple, list)): raise LayerValidationError('size must be int but was {}'.format( self.size)) in_size = in_shapes['default'].feature_size outputs = OrderedDict() outputs['default'] = BufferStructure('T', 'B', *self.size) out_size = outputs['default'].feature_size parameters = OrderedDict() parameters['W'] = BufferStructure(out_size, in_size) parameters['bias'] = BufferStructure(out_size) internals = OrderedDict() return outputs, parameters, internals
def setup(self, kwargs, in_shapes): # 'inputs_1' and 'inputs_2' must have same shape f_size1 = in_shapes['inputs_1'].feature_size f_size2 = in_shapes['inputs_2'].feature_size if f_size1 != f_size2: raise LayerValidationError( "{}: inputs_1 and inputs_2 must have same feature sizes but " "got {} and {}".format(self.name, in_shapes['inputs_1'].feature_shape, in_shapes['inputs_2'].feature_shape)) outputs = OrderedDict() outputs['default'] = BufferStructure('T', 'B', 1) internals = OrderedDict() feature_size = self.in_shapes['inputs_1'].feature_size internals['squared_diff'] = BufferStructure('T', 'B', feature_size) internals['grad_diff'] = BufferStructure('T', 'B', feature_size, is_backward_only=True) return outputs, OrderedDict(), internals
def setup(self, kwargs, in_shapes): self.activation = kwargs.get('activation', 'tanh') in_size = in_shapes['default'].feature_size self.size = kwargs.get('size', in_size) if not isinstance(self.size, int): raise LayerValidationError('size must be int but was {}'. format(self.size)) outputs = OrderedDict() outputs['default'] = BufferStructure('T', 'B', self.size, context_size=1) parameters = OrderedDict() parameters['Wz'] = BufferStructure(self.size, in_size) parameters['Wi'] = BufferStructure(self.size, in_size) parameters['Wf'] = BufferStructure(self.size, in_size) parameters['Wo'] = BufferStructure(self.size, in_size) parameters['pi'] = BufferStructure(1, self.size) parameters['pf'] = BufferStructure(1, self.size) parameters['po'] = BufferStructure(1, self.size) parameters['Rz'] = BufferStructure(self.size, self.size) parameters['Ri'] = BufferStructure(self.size, self.size) parameters['Rf'] = BufferStructure(self.size, self.size) parameters['Ro'] = BufferStructure(self.size, self.size) parameters['bz'] = BufferStructure(self.size) parameters['bi'] = BufferStructure(self.size) parameters['bf'] = BufferStructure(self.size) parameters['bo'] = BufferStructure(self.size) internals = OrderedDict() internals['Za'] = BufferStructure('T', 'B', self.size, context_size=1) internals['Zb'] = BufferStructure('T', 'B', self.size, context_size=1) internals['Ia'] = BufferStructure('T', 'B', self.size, context_size=1) internals['Ib'] = BufferStructure('T', 'B', self.size, context_size=1) internals['Fa'] = BufferStructure('T', 'B', self.size, context_size=1) internals['Fb'] = BufferStructure('T', 'B', self.size, context_size=1) internals['Oa'] = BufferStructure('T', 'B', self.size, context_size=1) internals['Ob'] = BufferStructure('T', 'B', self.size, context_size=1) internals['Ca'] = BufferStructure('T', 'B', self.size, context_size=1) internals['Cb'] = BufferStructure('T', 'B', self.size, context_size=1) internals['dZa'] = BufferStructure('T', 'B', self.size, context_size=1, is_backward_only=True) internals['dZb'] = BufferStructure('T', 'B', self.size, context_size=1, is_backward_only=True) internals['dIa'] = BufferStructure('T', 'B', self.size, context_size=1, is_backward_only=True) internals['dIb'] = BufferStructure('T', 'B', self.size, context_size=1, is_backward_only=True) internals['dFa'] = BufferStructure('T', 'B', self.size, context_size=1, is_backward_only=True) internals['dFb'] = BufferStructure('T', 'B', self.size, context_size=1, is_backward_only=True) internals['dOa'] = BufferStructure('T', 'B', self.size, context_size=1, is_backward_only=True) internals['dOb'] = BufferStructure('T', 'B', self.size, context_size=1, is_backward_only=True) internals['dCa'] = BufferStructure('T', 'B', self.size, context_size=1, is_backward_only=True) internals['dCb'] = BufferStructure('T', 'B', self.size, context_size=1, is_backward_only=True) return outputs, parameters, internals
def setup(self, kwargs, in_shapes): if 'factor' not in kwargs: raise LayerValidationError('Missing required "factor" argument') self.factor = kwargs['factor'] out_shapes = in_shapes return out_shapes, OrderedDict(), OrderedDict()