Exemplo n.º 1
0
    def __init__(self, num_outputs=None, feedback_dim=None, **kwargs):
        super(LookupFeedback, self).__init__(**kwargs)
        update_instance(self, locals())

        self.lookup = LookupTable(num_outputs, feedback_dim,
                                  kwargs.get("weights_init"))
        self.children = [self.lookup]
Exemplo n.º 2
0
 def __init__(self, aggregation_scheme, initialization_updates=None,
              accumulation_updates=None, readout_expression=None):
     if initialization_updates is None:
         initialization_updates = []
     if accumulation_updates is None:
         accumulation_updates = []
     update_instance(self, locals())
Exemplo n.º 3
0
    def __init__(self,
                 state_names,
                 state_dims,
                 sequence_dim,
                 match_dim,
                 state_transformer=None,
                 sequence_transformer=None,
                 energy_computer=None,
                 weights_init=None,
                 biases_init=None,
                 **kwargs):
        super(SequenceContentAttention, self).__init__(**kwargs)
        update_instance(self, locals())

        self.state_transformers = Parallel(state_names,
                                           self.state_transformer,
                                           name="state_trans")
        if not self.sequence_transformer:
            self.sequence_transformer = MLP([Identity()], name="seq_trans")
        if not self.energy_computer:
            self.energy_computer = MLP([Identity()], name="energy_comp")
        self.children = [
            self.state_transformers, self.sequence_transformer,
            self.energy_computer
        ]
Exemplo n.º 4
0
    def __init__(self, readout_dim, source_names,
                 weights_init, biases_init, **kwargs):
        super(LinearReadout, self).__init__(readout_dim, **kwargs)
        update_instance(self, locals())

        self.projectors = [MLP(name="project_{}".format(name),
                               activations=[Identity()])
                           for name in self.source_names]
        self.children.extend(self.projectors)
Exemplo n.º 5
0
    def __init__(self, transition, **kwargs):
        super(FakeAttentionTransition, self).__init__(**kwargs)
        update_instance(self, locals())

        self.state_names = transition.apply.states
        self.context_names = transition.apply.contexts
        self.glimpse_names = []

        self.children = [self.transition]
Exemplo n.º 6
0
    def __init__(self, readout, transition, fork=None, weights_init=None,
                 biases_init=None, **kwargs):
        super(BaseSequenceGenerator, self).__init__(**kwargs)
        update_instance(self, locals())

        self.state_names = transition.compute_states.outputs
        self.context_names = transition.apply.contexts
        self.glimpse_names = transition.take_look.outputs
        self.children = [self.readout, self.fork, self.transition]
Exemplo n.º 7
0
 def __init__(self,
              input_dim,
              output_dim,
              weights_init,
              biases_init=None,
              use_bias=True,
              **kwargs):
     super(Linear, self).__init__(**kwargs)
     update_instance(self, locals())
Exemplo n.º 8
0
    def __init__(self, readout_dim=None, emitter=None, feedbacker=None,
                 **kwargs):
        super(Readout, self).__init__(**kwargs)

        if not emitter:
            emitter = TrivialEmitter(readout_dim)
        if not feedbacker:
            feedbacker = TrivialFeedback(readout_dim)
        update_instance(self, locals())

        self.children = [self.emitter, self.feedbacker]
Exemplo n.º 9
0
    def __init__(self, readout_dim=None, emitter=None, feedbacker=None,
                 **kwargs):
        super(Readout, self).__init__(**kwargs)

        if not emitter:
            emitter = TrivialEmitter(readout_dim)
        if not feedbacker:
            feedbacker = TrivialFeedback(readout_dim)
        update_instance(self, locals())

        self.children = [self.emitter, self.feedbacker]
Exemplo n.º 10
0
    def __init__(self, activation, gate_activation, dim, weights_init,
                 use_update_gate=True, use_reset_gate=True, **kwargs):
        super(GatedRecurrent, self).__init__(**kwargs)

        if not activation:
            activation = Identity()
        if not gate_activation:
            gate_activation = Sigmoid()

        update_instance(self, locals())
        self.children = [activation, gate_activation]
Exemplo n.º 11
0
 def __init__(self, activations, dims, weights_init, biases_init=None,
              use_bias=True, **kwargs):
     super(MLP, self).__init__(**kwargs)
     self.linear_transformations = [Linear(name='linear_{}'.format(i))
                                    for i in range(len(activations))]
     self.children = (self.linear_transformations +
                      [activation for activation in activations
                       if activation is not None])
     if not dims:
         dims = [None] * (len(activations) + 1)
     update_instance(self, locals())
Exemplo n.º 12
0
 def __init__(self, activations, dims, **kwargs):
     update_instance(self, locals())
     self.linear_transformations = [Linear(name='linear_{}'.format(i))
                                    for i in range(len(activations))]
     # Interleave the transformations and activations
     children = [child for child in list(chain(*zip(
         self.linear_transformations, activations))) if child is not None]
     if not dims:
         dims = [None] * (len(activations) + 1)
     self.dims = dims
     super(MLP, self).__init__(children, **kwargs)
Exemplo n.º 13
0
    def __init__(self, channel_names, input_dims, output_dims,
                 prototype=None, **kwargs):
        super(Parallel, self).__init__(**kwargs)
        update_instance(self, locals())

        if not self.prototype:
            self.prototype = MLP([Identity()], use_bias=False)
        self.transforms = []
        for name in self.channel_names:
            self.transforms.append(copy.deepcopy(self.prototype))
            self.transforms[-1].name = "transform_{}".format(name)
        self.children = self.transforms
Exemplo n.º 14
0
 def __init__(self, input_dim, output_dim, num_pieces, **kwargs):
     super(LinearMaxout, self).__init__(**kwargs)
     update_instance(self, locals())
     self.linear_transformation = Linear(name='linear_to_maxout',
                                         input_dim=input_dim,
                                         output_dim=output_dim * num_pieces,
                                         weights_init=self.weights_init,
                                         biases_init=self.biases_init,
                                         use_bias=self.use_bias)
     self.maxout_transformation = Maxout(name='maxout',
                                         num_pieces=num_pieces)
     self.children = [self.linear_transformation,
                      self.maxout_transformation]
Exemplo n.º 15
0
    def __init__(self, transition, num_params, params_name,
                 weights_init, biases_init, **kwargs):
        super(AddParameters, self).__init__(**kwargs)
        update_instance(self, locals())

        self.input_names = [name for name in transition.apply.sequences
                            if name != 'mask']
        self.state_name = transition.apply.states[0]
        assert len(transition.apply.states) == 1

        self.fork = Fork(self.input_names)
        # Could be also several init bricks, one for each of the states
        self.init = MLP([Identity()], name="init")
        self.children = [self.transition, self.fork, self.init]
Exemplo n.º 16
0
    def __init__(self, state_names, state_dims, sequence_dim, match_dim,
                 state_transformer=None, sequence_transformer=None,
                 energy_computer=None, weights_init=None, biases_init=None,
                 **kwargs):
        super(SequenceContentAttention, self).__init__(**kwargs)
        update_instance(self, locals())

        self.state_transformers = Parallel(state_names, self.state_transformer,
                                           name="state_trans")
        if not self.sequence_transformer:
            self.sequence_transformer = MLP([Identity()], name="seq_trans")
        if not self.energy_computer:
            self.energy_computer = MLP([Identity()], name="energy_comp")
        self.children = [self.state_transformers, self.sequence_transformer,
                         self.energy_computer]
Exemplo n.º 17
0
 def __init__(self,
              activations,
              dims,
              weights_init,
              biases_init=None,
              use_bias=True,
              **kwargs):
     super(MLP, self).__init__(**kwargs)
     self.linear_transformations = [
         Linear(name='linear_{}'.format(i)) for i in range(len(activations))
     ]
     self.children = (self.linear_transformations + [
         activation for activation in activations if activation is not None
     ])
     if not dims:
         dims = [None] * (len(activations) + 1)
     update_instance(self, locals())
Exemplo n.º 18
0
    def __init__(self, transition, attention, mixer,
                 attended_name=None, attended_mask_name=None,
                 **kwargs):
        super(AttentionTransition, self).__init__(**kwargs)
        update_instance(self, locals())

        self.sequence_names = self.transition.apply.sequences
        self.state_names = self.transition.apply.states
        self.context_names = self.transition.apply.contexts

        if not self.attended_name:
            self.attended_name = self.context_names[0]
        if not self.attended_mask_name:
            self.attended_mask_name = self.context_names[1]
        self.preprocessed_attended_name = "preprocessed_" + self.attended_name

        self.glimpse_names = self.attention.take_look.outputs
        # We need to determine which glimpses are fed back.
        # Currently we extract it from `take_look` signature.
        self.previous_glimpses_needed = [
            name for name in self.glimpse_names
            if name in self.attention.take_look.inputs]

        self.children = [self.transition, self.attention, self.mixer]
Exemplo n.º 19
0
 def __init__(self, data_stream, request_iterator=None, as_dict=False):
     update_instance(self, locals())
Exemplo n.º 20
0
 def __init__(self, length, dim, weights_init, **kwargs):
     super(LookupTable, self).__init__(**kwargs)
     update_instance(self, locals())
Exemplo n.º 21
0
 def __init__(self, input_dim, output_dim, weights_init,
              biases_init=None, use_bias=True, **kwargs):
     super(Linear, self).__init__(**kwargs)
     update_instance(self, locals())
Exemplo n.º 22
0
 def __init__(self, num_init, weights_init, sparse_init=None):
     if sparse_init is None:
         sparse_init = Constant(0.)
     update_instance(self, locals())
Exemplo n.º 23
0
 def __init__(self, input_dim, output_dim, **kwargs):
     super(Linear, self).__init__(**kwargs)
     update_instance(self, locals())
Exemplo n.º 24
0
 def __init__(self, rng, func, seq_len, batch_size):
     update_instance(self, locals())
     self.num_params = len(inspect.getargspec(self.func).args) - 1
Exemplo n.º 25
0
 def __init__(self, dim, weights_init, activation=None, **kwargs):
     super(Recurrent, self).__init__(**kwargs)
     if activation is None:
         activation = Identity()
     update_instance(self, locals())
     self.children = [activation]
Exemplo n.º 26
0
 def __init__(self, num_init, weights_init, sparse_init=None):
     if sparse_init is None:
         sparse_init = Constant(0.)
     update_instance(self, locals())
Exemplo n.º 27
0
 def __init__(self, variable):
     update_instance(self, locals())
Exemplo n.º 28
0
 def __init__(self, rng, seq_len):
     update_instance(self, locals())
Exemplo n.º 29
0
 def __init__(self, rng, seq_len):
     update_instance(self, locals())
Exemplo n.º 30
0
    def __init__(self, rng, seq_len, batch_size):
        update_instance(self, locals())

        logger.debug("Markov chain entropy: {}".format(self.entropy))
        logger.debug("Expected min error: {}".format(
            -self.entropy * self.seq_len * self.batch_size))
Exemplo n.º 31
0
 def __init__(self, rng=None, theano_rng=None, **kwargs):
     super(DefaultRNG, self).__init__(**kwargs)
     update_instance(self, locals())
Exemplo n.º 32
0
 def __init__(self, theano_rng=None, **kwargs):
     super(Random, self).__init__(**kwargs)
     update_instance(self, locals())
Exemplo n.º 33
0
 def __init__(self, prototype, weights_init, **kwargs):
     super(Bidirectional, self).__init__(**kwargs)
     update_instance(self, locals())
     self.children = [copy.deepcopy(prototype) for i in range(2)]
     self.children[0].name = 'forward'
     self.children[1].name = 'backward'
Exemplo n.º 34
0
 def __init__(self, data_stream, request_iterator=None, as_dict=False):
     update_instance(self, locals())
Exemplo n.º 35
0
 def __init__(self, rng=None, theano_rng=None, **kwargs):
     super(DefaultRNG, self).__init__(**kwargs)
     update_instance(self, locals())