Пример #1
0
def test_multiple_inputs():
    """
    Create a VectorSpacesDataset with two inputs (features0 and features1)
    and train an MLP which takes both inputs for 1 epoch.
    """
    mlp = MLP(layers=[
        FlattenerLayer(
            CompositeLayer('composite',
                           [Linear(10, 'h0', 0.1),
                            Linear(10, 'h1', 0.1)], {
                                0: [1],
                                1: [0]
                            })),
        Softmax(5, 'softmax', 0.1)
    ],
              input_space=CompositeSpace([VectorSpace(15),
                                          VectorSpace(20)]),
              input_source=('features0', 'features1'))
    dataset = VectorSpacesDataset(
        (np.random.rand(20, 20).astype(theano.config.floatX),
         np.random.rand(20, 15).astype(theano.config.floatX),
         np.random.rand(20, 5).astype(theano.config.floatX)),
        (CompositeSpace(
            [VectorSpace(20), VectorSpace(15),
             VectorSpace(5)]), ('features1', 'features0', 'targets')))
    train = Train(dataset, mlp, SGD(0.1, batch_size=5))
    train.algorithm.termination_criterion = EpochCounter(1)
    train.main_loop()
Пример #2
0
def test_nest_specs():
    x1 = TT.matrix('x1')
    x2 = TT.matrix('x2')
    x3 = TT.matrix('x3')
    x4 = TT.matrix('x4')

    for nested_space, nested_source, nested_data in [
            (VectorSpace(dim=10), 'target', x2),
            (CompositeSpace([VectorSpace(dim=3), VectorSpace(dim=9)]),
                ('features', 'features'),
                (x1, x4)),
            (CompositeSpace([VectorSpace(dim=3),
                             CompositeSpace([VectorSpace(dim=10),
                                             VectorSpace(dim=7)])]),
                ('features', ('target', 'features')),
                (x1, (x2, x3))),
            ]:

        mapping = DataSpecsMapping((nested_space, nested_source))
        flat_space = mapping.flatten(nested_space)
        flat_source = mapping.flatten(nested_source)
        flat_data = mapping.flatten(nested_data)

        renested_space = mapping.nest(flat_space)
        renested_source = mapping.nest(flat_source)
        renested_data = mapping.nest(flat_data)

        assert_equal(renested_space, nested_space)
        assert_equal(renested_source, nested_source)
        assert_equal(renested_data, nested_data)
Пример #3
0
    def _build_data_specs(self):
        """
        Computes a nested data_specs for input and all channels

        Also computes the mapping to flatten it. This function is called from
        redo_theano.
        """
        # Ask the model what it needs
        m_space, m_source = self.model.get_monitoring_data_specs()
        input_spaces = [m_space]
        input_sources = [m_source]
        for channel in self.channels.values():
            space = channel.data_specs[0]
            assert isinstance(space, Space)
            input_spaces.append(space)
            input_sources.append(channel.data_specs[1])

        nested_space = CompositeSpace(input_spaces)
        nested_source = tuple(input_sources)

        self._nested_data_specs = (nested_space, nested_source)
        self._data_specs_mapping = DataSpecsMapping(self._nested_data_specs)

        flat_space = self._data_specs_mapping.flatten(nested_space,
                                                      return_tuple=True)
        flat_source = self._data_specs_mapping.flatten(nested_source,
                                                       return_tuple=True)
        self._flat_data_specs = (CompositeSpace(flat_space), flat_source)
Пример #4
0
def test_get_layer_monitor_channels():
    """
    Create a MLP with multiple layer types
    and get layer monitoring channels for MLP.
    """
    mlp = MLP(layers=[
        FlattenerLayer(
            CompositeLayer('composite',
                           [Linear(10, 'h0', 0.1),
                            Linear(10, 'h1', 0.1)], {
                                0: [1],
                                1: [0]
                            })),
        Softmax(5, 'softmax', 0.1)
    ],
              input_space=CompositeSpace([VectorSpace(15),
                                          VectorSpace(20)]),
              input_source=('features0', 'features1'))
    dataset = VectorSpacesDataset(
        (np.random.rand(20, 20).astype(theano.config.floatX),
         np.random.rand(20, 15).astype(theano.config.floatX),
         np.random.rand(20, 5).astype(theano.config.floatX)),
        (CompositeSpace(
            [VectorSpace(20), VectorSpace(15),
             VectorSpace(5)]), ('features1', 'features0', 'targets')))
    state_below = mlp.get_input_space().make_theano_batch()
    targets = mlp.get_target_space().make_theano_batch()
    mlp.get_layer_monitoring_channels(state_below=state_below,
                                      state=None,
                                      targets=targets)
Пример #5
0
    def __init__(self, shape, axes=None):
        shape = tuple(shape)

        if not all(isinstance(s, int) for s in shape):
            raise TypeError("Shape must be a tuple/list of ints")

        if len(shape) != 4:
            raise ValueError("Shape array needs to be of length 4, got %s." %
                             shape)

        datum_axes = list(axes)
        datum_axes.remove('b')
        if shape[datum_axes.index('s')] != 2:
            raise ValueError("Expected 's' axis to have size 2, got %d.\n"
                             "  axes:       %s\n"
                             "  shape:      %s" %
                             (shape[datum_axes.index('s')], axes, shape))
        self.shape = shape
        self.set_axes(axes)

        def make_conv2d_space(shape, axes):
            shape_axes = list(axes)
            shape_axes.remove('b')
            image_shape = tuple(shape[shape_axes.index(axis)]
                                for axis in (0, 1))
            conv2d_axes = list(axes)
            conv2d_axes.remove('s')
            return Conv2DSpace(shape=image_shape,
                               num_channels=shape[shape_axes.index('c')],
                               axes=conv2d_axes)

        conv2d_space = make_conv2d_space(shape, axes)
        self.topo_space = CompositeSpace((conv2d_space, conv2d_space))
        self.storage_space = VectorSpace(dim=numpy.prod(shape))
Пример #6
0
    def set_input_space(self, space):
        if not isinstance(space, CompositeSpace):
            if self.inputs_to_layers is not None:
                raise ValueError("CompositeLayer received an inputs_to_layers "
                                 "mapping, but does not have a CompositeSpace "
                                 "as its input space, so there is nothing to "
                                 "map. Received " + str(space) + " as input "
                                 "space.")
            else:
                self.layers_to_inputs = OrderedDict()
                for i, layer in enumerate(self.layers):
                    layer.set_input_space(space)
        else:
            if self.num_layers != len(space.components)*2:
                raise ValueError('The num of space componets should meet with the num of composite layers')
            else:
                self.layers_to_inputs = OrderedDict()
                for i, layer in enumerate(self.layers):
                    self.layers_to_inputs[i] = [i/2]
                    cur_space = space.restrict(self.layers_to_inputs[i])
                    layer.set_input_space(cur_space)


        self.input_space = space
        self.output_space = CompositeSpace(tuple(layer.get_output_space()
                                                 for layer in self.layers))
        self._target_space = CompositeSpace(tuple(layer.get_target_space()
                                                  for layer in self.layers))
Пример #7
0
def test_attention():
    input_space = CompositeSpace([ContextSpace(dim=14, num_annotation=2),
                                  VectorSpace(dim=12)])
    state_below = input_space.make_theano_batch()

    atten = ConditionLSTM(
        layer_name='cond_lstm',
        attention=Attention(layer_name='attention',
                            layers=[LinearAttention(layer_name='proj',
                                                    irange=2.)]))
    atten.set_input_space(input_space)
Пример #8
0
    def __init__(self, shape, axes=None):
        """
        The arguments describe how the data is laid out in the design matrix.

        Parameters
        ----------

        shape : tuple
          A tuple of 4 ints, describing the shape of each datum.
          This is the size of each axis in <axes>, excluding the 'b' axis.

        axes : tuple
          A tuple of the following elements in any order:
            'b'  batch axis
            's'  stereo axis
             0   image axis 0 (row)
             1   image axis 1 (column)
            'c'  channel axis
        """
        shape = tuple(shape)

        if not all(isinstance(s, int) for s in shape):
            raise TypeError("Shape must be a tuple/list of ints")

        if len(shape) != 4:
            raise ValueError("Shape array needs to be of length 4, got %s." %
                             shape)

        datum_axes = list(axes)
        datum_axes.remove('b')
        if shape[datum_axes.index('s')] != 2:
            raise ValueError("Expected 's' axis to have size 2, got %d.\n"
                             "  axes:       %s\n"
                             "  shape:      %s" %
                             (shape[datum_axes.index('s')], axes, shape))
        self.shape = shape
        self.set_axes(axes)

        def make_conv2d_space(shape, axes):
            shape_axes = list(axes)
            shape_axes.remove('b')
            image_shape = tuple(shape[shape_axes.index(axis)]
                                for axis in (0, 1))
            conv2d_axes = list(axes)
            conv2d_axes.remove('s')
            return Conv2DSpace(shape=image_shape,
                               num_channels=shape[shape_axes.index('c')],
                               axes=conv2d_axes,
                               dtype=None)

        conv2d_space = make_conv2d_space(shape, axes)
        self.topo_space = CompositeSpace((conv2d_space, conv2d_space))
        self.storage_space = VectorSpace(dim=numpy.prod(shape))
Пример #9
0
    def __init__(self,
                 nvis,
                 nhid,
                 hidden_transition_model,
                 irange=0.05,
                 non_linearity='sigmoid',
                 use_ground_truth=True):
        allowed_non_linearities = {'sigmoid': T.nnet.sigmoid, 'tanh': T.tanh}
        self.nvis = nvis
        self.nhid = nhid
        self.hidden_transition_model = hidden_transition_model
        self.use_ground_truth = use_ground_truth
        self.alpha = sharedX(1)
        self.alpha_decrease_rate = 1.0  #0.99

        assert non_linearity in allowed_non_linearities
        self.non_linearity = allowed_non_linearities[non_linearity]

        # Space initialization
        self.input_space = CompositeSpace(
            [VectorSequenceSpace(dim=self.nvis),
             VectorSequenceSpace(dim=62)])
        self.hidden_space = VectorSpace(dim=self.nhid)
        self.output_space = VectorSequenceSpace(dim=1)
        self.input_source = ('features', 'phones')
        self.target_source = 'targets'

        # Features-to-hidden matrix
        W_value = numpy.random.uniform(low=-irange,
                                       high=irange,
                                       size=(self.nvis, self.nhid))
        self.W = sharedX(W_value, name='W')
        # Phones-to-hidden matrix
        V_value = numpy.random.uniform(low=-irange,
                                       high=irange,
                                       size=(62, self.nhid))
        self.V = sharedX(V_value, name='V')
        # Hidden biases
        b_value = numpy.zeros(self.nhid)
        self.b = sharedX(b_value, name='b')
        # Hidden-to-out matrix
        U_value = numpy.random.uniform(low=-irange,
                                       high=irange,
                                       size=(self.nhid, 1))
        self.U = sharedX(U_value, name='U')
        # Output bias
        c_value = numpy.zeros(1)
        self.c = sharedX(c_value, name='c')
Пример #10
0
    def _prepare_generator(self, generator, noise_space,
                           condition_distribution, new_W_irange, input_source):
        noise_dim = noise_space.get_total_dimension()
        condition_dim = self.condition_space.get_total_dimension()

        first_layer = generator.mlp.layers[0]
        pretrain_W, _ = first_layer.get_param_values()

        rng = generator.mlp.rng
        new_W = np.vstack((pretrain_W,
                           rng.uniform(-new_W_irange, new_W_irange,
                                       (condition_dim, pretrain_W.shape[1]))))
        new_W = sharedX(new_W)
        new_W.name = first_layer.get_params()[0].name + '_retrain'

        first_layer.transformer = MatrixMul(new_W)
        first_layer.input_space = CompositeSpace(
            components=[noise_space, self.condition_space])
        generator.mlp.input_space = first_layer.input_space

        # HACK!
        generator.mlp._input_source = input_source

        return ConditionalGenerator(
            generator.mlp,
            input_condition_space=self.condition_space,
            condition_distribution=condition_distribution,
            noise_dim=noise_dim)
Пример #11
0
 def create_input_space(self):
     ws = (self.ws * 2 + 1)
     return CompositeSpace([
         IndexSpace(max_labels=self.vocab_size, dim=ws),
         IndexSpace(max_labels=self.total_feats, dim=self.feat_num),
         VectorSpace(dim=self.extender_dim * ws)
     ])
Пример #12
0
    def __init__(self,
                 dataset,
                 model,
                 algorithm,
                 save_path=None,
                 save_freq=0,
                 extensions=None,
                 *args,
                 **kwargs):
        # Set data_specs
        spaces, sources = dataset.data_specs
        if isinstance(spaces, CompositeSpace):
            spaces = spaces.components
        else:
            spaces = (spaces, )
            sources = (sources, )
        input_names = list(algorithm.cost.inputs.keys())
        spaces = [spaces[sources.index(source)] for source in input_names]
        if len(spaces) > 1:
            spaces = CompositeSpace(spaces)
            sources = input_names
        else:
            spaces = spaces[0]
            sources = input_names[0]
        model.data_specs = (spaces, sources)

        # Add default extensions
        if not extensions:
            extensions = list()
        extensions.append(DefaultExtension())

        super(Pylearn2Train,
              self).__init__(dataset, model, algorithm, save_path, save_freq,
                             extensions, *args, **kwargs)
Пример #13
0
    def get_monitoring_data_specs(self):

        space = CompositeSpace(
            [self.get_input_space(),
             self.get_target_space()])
        source = (self.get_input_source(), self.get_target_source())
        return (space, source)
Пример #14
0
    def iterator(self,
                 mode=None,
                 batch_size=None,
                 num_batches=None,
                 rng=None,
                 data_specs=None,
                 return_tuple=False):
        """
        .. todo::

            WRITEME
        """
        # Build the right data_specs to query self.raw
        if data_specs is not None:
            assert is_flat_specs(data_specs)
            space, source = data_specs
            if not isinstance(source, tuple):
                source = (source, )
            if isinstance(space, CompositeSpace):
                space = tuple(space.components)
            else:
                space = (space, )

            # Put 'features' first, as this is what TransformerIterator
            # is expecting
            if 'features' not in source:
                # 'features is not needed, get things directly from
                # the original data
                raw_data_specs = data_specs
            else:
                feature_idx = source.index('features')
                if self.space_preserving:
                    # Ask self.raw for the data in the expected space,
                    # and self.transformer will operate in that space
                    feature_input_space = space[feature_idx]
                else:
                    # We need to ask the transformer what its input space is
                    feature_input_space = self.transformer.get_input_space()

                raw_space = CompositeSpace((feature_input_space, ) +
                                           space[:feature_idx] +
                                           space[feature_idx + 1:])
                raw_source = (('features', ) + source[:feature_idx] +
                              source[feature_idx + 1:])
                raw_data_specs = (raw_space, raw_source)
        else:
            raw_data_specs = None

        raw_iterator = self.raw.iterator(mode=mode,
                                         batch_size=batch_size,
                                         num_batches=num_batches,
                                         rng=rng,
                                         data_specs=raw_data_specs,
                                         return_tuple=return_tuple)

        final_iterator = TransformerIterator(raw_iterator,
                                             self,
                                             data_specs=data_specs)

        return final_iterator
Пример #15
0
    def __init__(self, nvis, nhid):
        self.nvis = nvis
        self.nhid = nhid

        # Space initialization
        self.input_space = CompositeSpace([
            VectorSequenceSpace(window_dim=self.nvis),
            VectorSequenceSpace(window_dim=62)
        ])
        self.output_space = VectorSequenceSpace(window_dim=1)
        self.input_source = ('features', 'phones')
        self.target_source = 'targets'

        # Features-to-hidden matrix
        W_value = numpy.random.uniform(low=-0.5, high=0.5,
                                       size=(self.nvis, self.nhid))
        self.W = sharedX(W_value, name='W')
        # Phones-to-hidden matrix
        V_value = numpy.random.uniform(low=-0.5, high=0.5,
                                       size=(62, self.nhid))
        self.V = sharedX(V_value, name='V')
        # Hidden-to-hidden matrix
        M_value = numpy.random.uniform(low=-0.5, high=0.5,
                                       size=(self.nhid, self.nhid))
        self.M = sharedX(M_value, name='M')
        # Hidden biases
        b_value = numpy.zeros(self.nhid)
        self.b = sharedX(b_value, name='b')
        # Hidden-to-out matrix
        U_value = numpy.random.uniform(low=-0.5, high=0.5,
                                       size=(self.nhid, 1))
        self.U = sharedX(U_value, name='U')
        # Output bias
        c_value = numpy.zeros(1)
        self.c = sharedX(c_value, name='c')
Пример #16
0
def test_input_and_target_source():
    """
    Create a MLP and test input_source and target_source
    for default and non-default options.
    """
    mlp = MLP(
        layers=[CompositeLayer(
            'composite',
            [Linear(10, 'h0', 0.1),
                Linear(10, 'h1', 0.1)],
            {
                0: [1],
                1: [0]
            }
            )
        ],
        input_space=CompositeSpace([VectorSpace(15), VectorSpace(20)]),
        input_source=('features0', 'features1'),
        target_source=('targets0', 'targets1')
    )
    np.testing.assert_equal(mlp.get_input_source(), ('features0', 'features1'))
    np.testing.assert_equal(mlp.get_target_source(), ('targets0', 'targets1'))

    mlp = MLP(
        layers=[Linear(10, 'h0', 0.1)],
        input_space=VectorSpace(15)
    )
    np.testing.assert_equal(mlp.get_input_source(), 'features')
    np.testing.assert_equal(mlp.get_target_source(), 'targets')
Пример #17
0
    def get_data_specs(self, model):
        """
        Provides a default data specification.

        The cost requests input features from the model's input space and
        input source. `self` must contain a bool field called `supervised`.
        If this field is True, the cost requests targets as well.

        Parameters
        ----------
        model : pylearn2.models.Model
            TODO WRITEME
        """
        if self.supervised:
            # b=model.get_input_space()
            # a = model.get_latent_space()
            # space = CompositeSpace([model.get_input_space(),
            #                         CompositeSpace([model.get_target_space(),model.get_latent_space()])])
            # sources = (model.get_input_source(), (model.get_target_source(),model.get_latent_source()))
            # mapping = DataSpecsMapping((space, sources))
            # flat_source = mapping.flatten(sources)
            # # flat_source == ('features', 'features', 'targets')
            # flat_space = mapping.flatten(space)
            # return (flat_space, flat_source)
            space = CompositeSpace([model.get_input_space(),
                                    model.get_target_space(),model.get_latent_space()])
            sources = (model.get_input_source(), model.get_target_source(),model.get_latent_source())
            return space, sources
        else:
            return (model.get_input_space(), model.get_input_source())
Пример #18
0
    def _make_nested_space(self, flat, mapping):
        """
        Auxiliary recursive function used by self.nest

        Parameters
        ----------
        flat : WRITEME
        mapping : WRITEME

        Returns
        -------
        WRITEME
        """
        if isinstance(mapping, int):
            # We are at a leaf of the tree
            idx = mapping
            if isinstance(flat, CompositeSpace):
                assert 0 <= idx < len(flat.components)
                return flat.components[idx]
            else:
                assert idx == 0
                return flat
        else:
            return CompositeSpace([
                self._make_nested_space(flat, sub_mapping)
                for sub_mapping in mapping
            ])
    def __init__(
        self,
        dataset,
        model,
        header=None,
        class_prf1_channels=True,
        confusion_channels=True,
        #                  seq_prf1_channel=True, seq_confusion_channel=True
    ):
        self.dataset = dataset
        self.header = header

        self.class_prf1_channels = class_prf1_channels
        self.confusion_channels = confusion_channels

        minibatch = model.get_input_space().make_theano_batch()
        self.output_fn = theano.function(inputs=[minibatch],
                                         outputs=model.fprop(minibatch))

        self.data_specs = (CompositeSpace(
            (model.get_input_space(), model.get_output_space())), ("features",
                                                                   "targets"))

        if self.header is not None:
            self.channel_prefix = self.header
        else:
            if hasattr(self.dataset,
                       'name'):  #s  elf.dataset.name is not None:
                self.channel_prefix = self.dataset.name
            else:
                self.channel_prefix = ''
Пример #20
0
 def _build_output_space(self, space):
     if isinstance(space, IndexSpace):
         return VectorSpace(self.dim * space.dim)
     if isinstance(space, CompositeSpace):
         return CompositeSpace(
             [self._build_output_space(c) for c in space.components])
     assert False
Пример #21
0
    def __setstate__(self, d):
        """
        .. todo::

            WRITEME
        """

        if d['design_loc'] is not None:
            if control.get_load_data():
                d['X'] = np.load(d['design_loc'])
            else:
                d['X'] = None

        if d['compress']:
            X = d['X']
            mx = d['compress_max']
            mn = d['compress_min']
            del d['compress_max']
            del d['compress_min']
            d['X'] = 0
            self.__dict__.update(d)
            if X is not None:
                self.X = np.cast['float32'](X) * mx / 255. + mn
            else:
                self.X = None
        else:
            self.__dict__.update(d)

        # To be able to unpickle older data after the addition of
        # the data_specs mechanism
        if not all(m in d for m in ('data_specs', 'X_space',
                                    '_iter_data_specs', 'X_topo_space')):
            X_space = VectorSpace(dim=self.X.shape[1])
            X_source = 'features'
            if self.y is None:
                space = X_space
                source = X_source
            else:
                y_space = VectorSpace(dim=self.y.shape[-1])
                y_source = 'targets'

                space = CompositeSpace((X_space, y_space))
                source = (X_source, y_source)

            self.data_specs = (space, source)
            self.X_space = X_space
            self._iter_data_specs = (X_space, X_source)

            view_converter = d.get('view_converter', None)
            if view_converter is not None:
                # Get the topo_space from the view_converter
                if not hasattr(view_converter, 'topo_space'):
                    raise NotImplementedError("Not able to get a topo_space "
                                              "from this converter: %s"
                                              % view_converter)

                # self.X_topo_space stores a "default" topological space that
                # will be used only when self.iterator is called without a
                # data_specs, and with "topo=True", which is deprecated.
                self.X_topo_space = view_converter.topo_space
Пример #22
0
def process_dataset(model, dataset, data_specs=None, output_fn=None):
    
    if data_specs is None:
        data_specs = (CompositeSpace((
                                model.get_input_space(), 
                                model.get_output_space())), 
                           ("features", "targets"));
    
    if output_fn is None:                
        with log_timing(log, 'compiling output_fn'):         
            minibatch = model.get_input_space().make_theano_batch();
            output_fn = theano.function(inputs=[minibatch], 
                                        outputs=model.fprop(minibatch));
    
    it = dataset.iterator('sequential',
                          batch_size=100,
                          data_specs=data_specs);
    y_pred = [];
    y_real = [];                
    output = [];
    for minibatch, target in it:
        out = output_fn(minibatch); # this hangs for convnet on Jeep2
        output.append(out);
        y_pred.append(np.argmax(out, axis = 1));
        y_real.append(np.argmax(target, axis = 1));
    y_pred = np.hstack(y_pred);
    y_real = np.hstack(y_real);  
    output = np.vstack(output);
    
    return y_real, y_pred, output;
Пример #23
0
    def __init__(self,
                 data_generator=None,
                 n_classes=101,
                 n_examples=10,
                 n_frames=10,
                 n_features=4096):
        """
		:type data_generator: function
		:param data_generator: function used to generate data in the form of X, y tuple. X is a 3-dimensional array with dimensions (examples, frames/time, features). y is a 2-dimensional array with dimensions (examples, target values). Optional value defaults to generating random therefore 'hard' data.

		:type n_classes: int
		:param n_classes: the number of possible target values or n_classes

		:type n_examples: int
		:param n_examples: the number of examples to be generated in the dataset

		:type n_frames: int
		:param n_frames: the number of frames or time steps in each example

		:type n_features: int
		:param n_features: the number of features in each time step
		"""
        rng = np.random.RandomState(seed=42)
        self.n_features = n_features
        self.n_examples = n_examples
        if data_generator is None:
            data_generator = hard_data_generator
        self.data_generator = data_generator
        self.X, self.y = self.data_generator(n_classes, n_examples, n_frames,
                                             n_features)

        features_space = VectorSequenceSpace(dim=self.n_features)
        # features_space = SequenceDataSpace(VectorSpace(dim=self.n_features))

        targets_space = VectorSequenceSpace(dim=1)
        # targets_space = SequenceDataSpace(VectorSpace(dim=1))

        space_components = [features_space, targets_space]
        space = CompositeSpace(space_components)

        source = ('features', 'targets')

        self.data_specs = (space, source)

        self._iter_mode = resolve_iterator_class('shuffled_sequential')
        self._iter_data_specs = (CompositeSpace(
            (features_space, targets_space)), source)
Пример #24
0
def main(model_path):
    print 'Loading model...'
    model = serial.load(model_path)

    dataset_yaml_src = model.dataset_yaml_src
    dataset = yaml_parse.load(dataset_yaml_src)
    data_specs = (CompositeSpace([VectorSequenceSpace(dim=model.nvis),
                                 VectorSequenceSpace(dim=62)]),
                  ('features', 'phones'))
    it = dataset.iterator(mode='sequential', data_specs=data_specs,
                          num_batches=1, batch_size=1)
    original_sequence, phones = it.next()


    X = T.vector('X')
    p = T.vector('p')
    h = T.vector('h')
    out = T.vector('out')

    next_h, pred = model.fprop_step(X, p, h, out)
    fn = theano.function(inputs=[X, p, h, out], outputs=[next_h, pred],
                         on_unused_input='ignore')

    # Reconstruction
    numpy_h = numpy.zeros(model.nhid)
    numpy_out = numpy.zeros(1)
    x_t = numpy.copy(original_sequence[0])

    reconstruction_list = [original_sequence[0]]
    for p_t in phones:
        numpy_h, numpy_out = fn(x_t, p_t, numpy_h, numpy_out)
        reconstruction_list.append(numpy_out)
        x_t[:-1] = x_t[1:]
        x_t[-1] = numpy_out

    numpy_reconstruction = numpy.concatenate(reconstruction_list)
    numpy_reconstruction = numpy_reconstruction * dataset._std + dataset._mean
    numpy_reconstruction = numpy.cast['int16'](numpy_reconstruction)
    wf.write("reconstruction.wav", 16000, numpy_reconstruction)

    # One-on-one prediction
    numpy_h = numpy.zeros(model.nhid)
    numpy_out = numpy.zeros(1)

    prediction_list = [numpy.copy(original_sequence[0])]
    for x_t, p_t in zip(original_sequence, phones):
        numpy_h, numpy_out = fn(x_t, p_t, numpy_h, numpy_out)
        prediction_list.append(numpy_out)

    numpy_prediction = numpy.concatenate(prediction_list)
    numpy_prediction = numpy_prediction * dataset._std + dataset._mean
    numpy_prediction = numpy.cast['int16'](numpy_prediction)
    wf.write("prediction.wav", 16000, numpy_prediction)

    original= numpy.concatenate([original_sequence[0],
                                 original_sequence[1:, -1]])
    original= original * dataset._std + dataset._mean
    original= numpy.cast['int16'](original)
    wf.write("original.wav", 16000, original)
Пример #25
0
    def train(self, dataset):
        if not hasattr(self, 'sgd_update'):
            raise Exception("train called without first calling setup")

        # Make sure none of the parameters have bad values
        for param in self.params:
            value = param.get_value(borrow=True)
            if np.any(np.isnan(value)) or np.any(np.isinf(value)):
                raise Exception("NaN in " + param.name)

        self.first = False
        rng = self.rng
        if not is_stochastic(self.train_iteration_mode):
            rng = None

        data_specs = self.cost.get_data_specs(self.model)

        # The iterator should be built from flat data specs, so it returns
        # flat, non-redundent tuples of data.
        mapping = DataSpecsMapping(data_specs)
        space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
        source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
        if len(space_tuple) == 0:
            # No data will be returned by the iterator, and it is impossible
            # to know the size of the actual batch.
            # It is not decided yet what the right thing to do should be.
            raise NotImplementedError(
                "Unable to train with SGD, because "
                "the cost does not actually use data from the data set. "
                "data_specs: %s" % str(data_specs))
        flat_data_specs = (CompositeSpace(space_tuple), source_tuple)

        iterator = dataset.iterator(mode=self.train_iteration_mode,
                                    batch_size=self.batch_size,
                                    data_specs=flat_data_specs,
                                    return_tuple=True,
                                    rng=rng,
                                    num_batches=self.batches_per_iter)

        on_load_batch = self.on_load_batch
        for batch in iterator:
            for callback in on_load_batch:
                callback(mapping.nest(batch))
            self.sgd_update(*batch)
            # iterator might return a smaller batch if dataset size
            # isn't divisible by batch_size
            # Note: if data_specs[0] is a NullSpace, there is no way to know
            # how many examples would actually have been in the batch,
            # since it was empty, so actual_batch_size would be reported as 0.
            actual_batch_size = flat_data_specs[0].np_batch_size(batch)
            self.monitor.report_batch(actual_batch_size)
            for callback in self.update_callbacks:
                callback(self)

        # Make sure none of the parameters have bad values
        for param in self.params:
            value = param.get_value(borrow=True)
            if np.any(np.isnan(value)) or np.any(np.isinf(value)):
                raise Exception("NaN in " + param.name)
    def __init__(self,
                 data_mlp,
                 condition_mlp,
                 joint_mlp,
                 input_data_space,
                 input_condition_space,
                 input_source=('features', 'condition'),
                 *args,
                 **kwargs):
        """
        A discriminator acting within a cGAN which may "condition" on
        extra information.

        Parameters
        ----------
        data_mlp: pylearn2.models.mlp.MLP
            MLP which processes the data-space information. Must output
            a `VectorSpace` of some sort.

        condition_mlp: pylearn2.models.mlp.MLP
            MLP which processes the condition-space information. Must
            output a `VectorSpace` of some sort.

        joint_mlp: pylearn2.models.mlp.MLP
            MLP which processes the combination of the outputs of the
            data MLP and the condition MLP.

        input_data_space : pylearn2.space.CompositeSpace
            Space which contains the empirical / model-generated data

        input_condition_space : pylearn2.space.CompositeSpace
            Space which contains the extra data being conditioned on

        kwargs : dict
            Passed on to MLP superclass.
        """

        # Make sure user isn't trying to override any fixed keys
        for illegal_key in ['input_source', 'input_space', 'layers']:
            assert illegal_key not in kwargs

        # First feed forward in parallel along the data and condition
        # MLPs; then feed the composite output to the joint MLP
        layers = [
            CompositeMLPLayer(layer_name='discriminator_composite',
                              layers=[data_mlp, condition_mlp],
                              inputs_to_layers={
                                  0: [0],
                                  1: [1]
                              }), joint_mlp
        ]

        super(ConditionalDiscriminator,
              self).__init__(layers=layers,
                             input_space=CompositeSpace(
                                 [input_data_space, input_condition_space]),
                             input_source=input_source,
                             *args,
                             **kwargs)
Пример #27
0
    def set_topological_view(self, V, axes=('b', 0, 1, 'c')):
        """
        Sets the dataset to represent V, where V is a batch
        of topological views of examples.

        .. todo::

            Why is this parameter named 'V'?

        Parameters
        ----------
        V : ndarray
            An array containing a design matrix representation of
            training examples.
        axes : WRITEME
        """
        assert not contains_nan(V)
        rows = V.shape[axes.index(0)]
        cols = V.shape[axes.index(1)]
        channels = V.shape[axes.index('c')]
        self.view_converter = DefaultViewConverter([rows, cols, channels],
                                                   axes=axes)
        self.X = self.view_converter.topo_view_to_design_mat(V)
        # self.X_topo_space stores a "default" topological space that
        # will be used only when self.iterator is called without a
        # data_specs, and with "topo=True", which is deprecated.
        self.X_topo_space = self.view_converter.topo_space
        assert not contains_nan(self.X)

        # Update data specs
        X_space = VectorSpace(dim=self.X.shape[1])
        X_source = 'features'
        if self.y is None:
            space = X_space
            source = X_source
        else:
            if self.y.ndim == 1:
                dim = 1
            else:
                dim = self.y.shape[-1]
            # This is to support old pickled models
            if getattr(self, 'y_labels', None) is not None:
                y_space = IndexSpace(dim=dim, max_labels=self.y_labels)
            elif getattr(self, 'max_labels', None) is not None:
                y_space = IndexSpace(dim=dim, max_labels=self.max_labels)
            else:
                y_space = VectorSpace(dim=dim)
            y_source = 'targets'

            Latent_space = VectorSpace(dim=self.latent.shape[-1])
            Latent_source = 'latents'

            space = CompositeSpace((X_space, y_space,Latent_space))
            source = (X_source, y_source,Latent_source)

        self.data_specs = (space, source)
        self.X_space = X_space
        self._iter_data_specs = (X_space, X_source)
Пример #28
0
    def __init__(self,
                 X_path=None,
                 y_path=None,
                 from_scipy_sparse_dataset=None,
                 zipped_npy=False):

        self.X_path = X_path
        self.y_path = y_path

        if self.X_path != None:
            if zipped_npy == True:
                logger.info('... loading sparse data set from a zip npy file')
                self.X = scipy.sparse.csr_matrix(numpy.load(gzip.open(X_path)),
                                                 dtype=floatX)
            else:
                logger.info('... loading sparse data set from a npy file')
                self.X = scipy.sparse.csr_matrix(numpy.load(X_path).item(),
                                                 dtype=floatX)
        else:
            logger.info('... building from given sparse dataset')
            self.X = from_scipy_sparse_dataset

        if self.y_path != None:
            if zipped_npy == True:
                logger.info('... loading sparse data set from a zip npy file')
                self.y = scipy.sparse.csr_matrix(numpy.load(gzip.open(y_path)),
                                                 dtype=floatX).todense()
            else:
                logger.info('... loading sparse data set from a npy file')
                self.y = scipy.sparse.csr_matrix(numpy.load(y_path).item(),
                                                 dtype=floatX)
        else:
            logger.info('... building from given sparse dataset')
            self.X = from_scipy_sparse_dataset

        self.data_n_rows = self.X.shape[0]
        self.num_examples = self.data_n_rows
        self.fancy = False
        self.stochastic = False
        X_space = VectorSpace(dim=self.X.shape[1])
        X_source = 'features'
        if y_path is None:
            space = X_space
            source = X_source
        else:
            if self.y.ndim == 1:
                dim = 1
            else:
                dim = self.y.shape[-1]
            y_space = VectorSpace(dim=dim)
            y_source = 'targets'

            space = CompositeSpace((X_space, y_space))
            source = (X_source, y_source)

        self.data_specs = (space, source)
        self.X_space = X_space
        self._iter_data_specs = (self.X_space, 'features')
def get_model_results(filename, model_folder, subject, bands, fold, kwargs):
    from pylearn2.datasets import ecog_neuro
    kwargs = copy.deepcopy(kwargs)
    file_loc = os.path.join(model_folder, filename)
    model = serial.load(file_loc)
    X_sym = model.get_input_space().make_theano_batch()
    target_space = model.get_target_space()
    y_inpt = target_space.make_theano_batch()
    y_sym = y_inpt
    input_space = model.get_input_space()
    ec = ecog_neuro

    ds = ec.ECoG(subject, bands, 'train', fold=fold, **kwargs)
    ts = ds.get_test_set()
    acts = model.fprop(X_sym, return_all=True)
    y_hat = acts[-1]
    hidden = list(acts[:-1])
    n_hidden = len(hidden)
    if isinstance(model.layers[-1], FlattenerLayer):
        comp_space = model.layers[-1].raw_layer.get_output_space()
        y_hat_list = list(comp_space.undo_format_as(y_hat, target_space))
        y_sym_list = list(target_space.format_as(y_inpt, comp_space))
        n_targets = len(y_hat_list)
    else:
        n_targets = 1
        y_hat_list = [y_hat]
        y_sym_list = [y_sym]
    misclass_sym = []
    indices_sym = []
    logits_sym = []
    for yh, ys in zip(y_hat_list, y_sym_list):
        misclass_sym.append(nnet.Misclass(ys, yh))
        indices_sym.append(
            T.join(1, T.argmax(ys, axis=1, keepdims=True),
                   T.argmax(yh, axis=1, keepdims=True)))
        if isinstance(yh.owner.op, T.nnet.Softmax):
            logits_sym.append(nnet.arg_of_softmax(yh))
        else:
            logits_sym.append(yh)

    f = theano.function([X_sym, y_inpt], misclass_sym + indices_sym +
                        y_hat_list + logits_sym + hidden)
    it = ts.iterator(mode='sequential',
                     batch_size=ts.X.shape[0],
                     num_batches=1,
                     data_specs=(CompositeSpace(
                         (model.get_input_space(), model.get_target_space())),
                                 (model.get_input_source(),
                                  model.get_target_source())))
    X, y = it.next()
    rvals = f(X, y)
    misclass = list(rvals[:n_targets])
    indices = list(rvals[n_targets:2 * n_targets])
    y_hats = list(rvals[2 * n_targets:3 * n_targets])
    logits = list(rvals[3 * n_targets:4 * n_targets])
    hidden = list(rvals[4 * n_targets:4 * n_targets + n_hidden])
    return misclass, indices, y_hats, logits, hidden
Пример #30
0
 def _create_data_specs(self):
     ws = (self.window_size * 2 + 1)
     space = CompositeSpace((IndexSequenceSpace(max_labels=self.vocab_size,
                                                dim=ws),
                             IndexSequenceSpace(max_labels=self.total_feats,
                                                dim=self.feat_num),
                             VectorSequenceSpace(dim=self.n_classes)))
     source = ('words', 'features', 'targets')
     self.data_specs = (space, source)
Пример #31
0
    def _create_data_specs(self, dataset):
        self.input_space = CompositeSpace([
            dataset.data_specs[0].components[i]
            for i in xrange(len(dataset.data_specs[0].components) - 1)
        ])
        self.output_space = dataset.data_specs[0].components[-1]

        self.input_source = dataset.data_specs[1][:-1]
        self.target_source = dataset.data_specs[1][-1]
Пример #32
0
    def get_data_specs(self, model):
        """
        .. todo::

            WRITEME
        """
        space = CompositeSpace([model.get_input_space(), model.get_output_space()])
        sources = (model.get_input_source(), model.get_target_source())
        return (space, sources)
Пример #33
0
    def __init__(self, shape, axes=None):
        """
        The arguments describe how the data is laid out in the design matrix.

        Parameters
        ----------

        shape : tuple
          A tuple of 4 ints, describing the shape of each datum.
          This is the size of each axis in <axes>, excluding the 'b' axis.

        axes : tuple
          A tuple of the following elements in any order:
            'b'  batch axis
            's'  stereo axis
             0   image axis 0 (row)
             1   image axis 1 (column)
            'c'  channel axis
        """
        shape = tuple(shape)

        if not all(isinstance(s, int) for s in shape):
            raise TypeError("Shape must be a tuple/list of ints")

        if len(shape) != 4:
            raise ValueError("Shape array needs to be of length 4, got %s." %
                             shape)

        datum_axes = list(axes)
        datum_axes.remove('b')
        if shape[datum_axes.index('s')] != 2:
            raise ValueError("Expected 's' axis to have size 2, got %d.\n"
                             "  axes:       %s\n"
                             "  shape:      %s" %
                             (shape[datum_axes.index('s')],
                              axes,
                              shape))
        self.shape = shape
        self.set_axes(axes)

        def make_conv2d_space(shape, axes):
            shape_axes = list(axes)
            shape_axes.remove('b')
            image_shape = tuple(shape[shape_axes.index(axis)]
                                for axis in (0, 1))
            conv2d_axes = list(axes)
            conv2d_axes.remove('s')
            return Conv2DSpace(shape=image_shape,
                               num_channels=shape[shape_axes.index('c')],
                               axes=conv2d_axes,
                               dtype=None)

        conv2d_space = make_conv2d_space(shape, axes)
        self.topo_space = CompositeSpace((conv2d_space, conv2d_space))
        self.storage_space = VectorSpace(dim=numpy.prod(shape))
Пример #34
0
def test_buffered_provider_2d():
    batch_size = 1024
    num_utts = 10 ** 6
    utt_length = None  # None will result in random lengths
    data_space = CompositeSpace((VectorSpace(dim=40), VectorSpace(dim=1)))
    data_source = ("features", "targets")
    provider = MockUttProvider(num_utts, (data_space, data_source), utt_length)
    buffered_provider = BufferedProviderDataSpec(provider, batch_size)

    num_datapoints = 0
    start = time.clock()
    for batch in buffered_provider:
        data_space.np_validate(batch)
        num_datapoints += batch_size
    stop = time.clock() - start

    print "Converting (and producing by mock provider) %i utterances " "(%i datapoints) by BufferedDataProvider took %f seconds" % (
        num_utts,
        num_datapoints,
        stop,
    )
Пример #35
0
    def __init__(self, nvis, nhid, irange=0.05, non_linearity='sigmoid',
                 use_ground_truth=True):
        allowed_non_linearities = {'sigmoid': T.nnet.sigmoid,
                                   'tanh': T.tanh}
        self.nvis = nvis
        self.nhid = nhid
        self.use_ground_truth = use_ground_truth
        self.alpha = sharedX(1)
        self.alpha_decrease_rate = 0.9

        assert non_linearity in allowed_non_linearities
        self.non_linearity = allowed_non_linearities[non_linearity]

        # Space initialization
        self.input_space = CompositeSpace([
            VectorSequenceSpace(dim=self.nvis),
            VectorSequenceSpace(dim=62)
        ])
        self.output_space = VectorSequenceSpace(dim=1)
        self.input_source = ('features', 'phones')
        self.target_source = 'targets'

        # Features-to-hidden matrix
        W_value = numpy.random.uniform(low=-irange, high=irange,
                                       size=(self.nvis, self.nhid))
        self.W = sharedX(W_value, name='W')
        # Phones-to-hidden matrix
        V_value = numpy.random.uniform(low=-irange, high=irange,
                                       size=(62, self.nhid))
        self.V = sharedX(V_value, name='V')
        # Hidden-to-hidden matrix
        M_value = numpy.random.uniform(low=-irange, high=irange,
                                       size=(self.nhid, self.nhid))
        self.M = sharedX(M_value, name='M')
        # Hidden biases
        b_value = numpy.zeros(self.nhid)
        self.b = sharedX(b_value, name='b')
        # Hidden-to-out matrix
        U_value = numpy.random.uniform(low=-irange, high=irange,
                                       size=(self.nhid, 1))
        self.U = sharedX(U_value, name='U')
        # Output bias
        c_value = numpy.zeros(1)
        self.c = sharedX(c_value, name='c')
Пример #36
0
    def __init__(self, shape, axes=None):
        shape = tuple(shape)

        if not all(isinstance(s, int) for s in shape):
            raise TypeError("Shape must be a tuple/list of ints")

        if len(shape) != 4:
            raise ValueError("Shape array needs to be of length 4, got %s." %
                             shape)

        datum_axes = list(axes)
        datum_axes.remove('b')
        if shape[datum_axes.index('s')] != 2:
            raise ValueError("Expected 's' axis to have size 2, got %d.\n"
                             "  axes:       %s\n"
                             "  shape:      %s" %
                             (shape[datum_axes.index('s')],
                              axes,
                              shape))
        self.shape = shape
        self.set_axes(axes)

        def make_conv2d_space(shape, axes):
            shape_axes = list(axes)
            shape_axes.remove('b')
            image_shape = tuple(shape[shape_axes.index(axis)]
                                for axis in (0, 1))
            conv2d_axes = list(axes)
            conv2d_axes.remove('s')
            return Conv2DSpace(shape=image_shape,
                               num_channels=shape[shape_axes.index('c')],
                               axes=conv2d_axes)

        conv2d_space = make_conv2d_space(shape, axes)
        self.topo_space = CompositeSpace((conv2d_space, conv2d_space))
        self.storage_space = VectorSpace(dim=numpy.prod(shape))
Пример #37
0
class StereoViewConverter(object):
    """
    Converts stereo image data between two formats:

    #. A dense design matrix, one stereo pair per row (`VectorSpace`)
    #. An image pair (`CompositeSpace` of two `Conv2DSpace`)

    The arguments describe how the data is laid out in the design matrix.

    Parameters
    ----------
    shape: tuple
        A tuple of 4 ints, describing the shape of each datum. This is the size
        of each axis in `<axes>`, excluding the `b` axis.
    axes : tuple
        Tuple of the following elements in any order:

        * 'b' : batch axis
        * 's' : stereo axis
        *  0  : image axis 0 (row)
        *  1  : image axis 1 (column)
        * 'c' : channel axis
    """
    def __init__(self, shape, axes=None):
        shape = tuple(shape)

        if not all(isinstance(s, int) for s in shape):
            raise TypeError("Shape must be a tuple/list of ints")

        if len(shape) != 4:
            raise ValueError("Shape array needs to be of length 4, got %s." %
                             shape)

        datum_axes = list(axes)
        datum_axes.remove('b')
        if shape[datum_axes.index('s')] != 2:
            raise ValueError("Expected 's' axis to have size 2, got %d.\n"
                             "  axes:       %s\n"
                             "  shape:      %s" %
                             (shape[datum_axes.index('s')],
                              axes,
                              shape))
        self.shape = shape
        self.set_axes(axes)

        def make_conv2d_space(shape, axes):
            shape_axes = list(axes)
            shape_axes.remove('b')
            image_shape = tuple(shape[shape_axes.index(axis)]
                                for axis in (0, 1))
            conv2d_axes = list(axes)
            conv2d_axes.remove('s')
            return Conv2DSpace(shape=image_shape,
                               num_channels=shape[shape_axes.index('c')],
                               axes=conv2d_axes)

        conv2d_space = make_conv2d_space(shape, axes)
        self.topo_space = CompositeSpace((conv2d_space, conv2d_space))
        self.storage_space = VectorSpace(dim=numpy.prod(shape))

    def get_formatted_batch(self, batch, space):
        """
        .. todo::

            WRITEME
        """
        return self.storage_space.np_format_as(batch, space)

    def design_mat_to_topo_view(self, design_mat):
        """
        Called by DenseDesignMatrix.get_formatted_view(), get_batch_topo()
        """
        return self.storage_space.np_format_as(design_mat, self.topo_space)

    def design_mat_to_weights_view(self, design_mat):
        """
        Called by DenseDesignMatrix.get_weights_view()
        """
        return self.design_mat_to_topo_view(design_mat)

    def topo_view_to_design_mat(self, topo_batch):
        """
        Used by `DenseDesignMatrix.set_topological_view()` and
        `DenseDesignMatrix.get_design_mat()`.
        """
        return self.topo_space.np_format_as(topo_batch, self.storage_space)

    def view_shape(self):
        """
        .. todo::

            WRITEME
        """
        return self.shape

    def weights_view_shape(self):
        """
        .. todo::

            WRITEME
        """
        return self.view_shape()

    def set_axes(self, axes):
        """
        .. todo::

            WRITEME
        """
        axes = tuple(axes)

        if len(axes) != 5:
            raise ValueError("Axes must have 5 elements; got %s" % str(axes))

        for required_axis in ('b', 's', 0, 1, 'c'):
            if required_axis not in axes:
                raise ValueError("Axes must contain 'b', 's', 0, 1, and 'c'. "
                                 "Got %s." % str(axes))

        if axes.index('b') != 0:
            raise ValueError("The 'b' axis must come first (axes = %s)." %
                             str(axes))

        def get_batchless_axes(axes):
            axes = list(axes)
            axes.remove('b')
            return tuple(axes)

        if hasattr(self, 'axes'):
            # Reorders the shape vector to match the new axis ordering.
            assert hasattr(self, 'shape')
            old_axes = get_batchless_axes(self.axes)
            new_axes = get_batchless_axes(axes)
            new_shape = tuple(self.shape[old_axes.index(a)] for a in new_axes)
            self.shape = new_shape

        self.axes = axes
Пример #38
0
class StereoViewConverter(object):

    """
    Converts stereo image data between two formats:
      A) A dense design matrix, one stereo pair per row (VectorSpace)
      B) An image pair (CompositeSpace of two Conv2DSpaces)

    Parameters
    ----------
    shape : tuple
    See doc for __init__'s <shape> parameter.
    """

    def __init__(self, shape, axes=None):
        """
        The arguments describe how the data is laid out in the design matrix.

        Parameters
        ----------

        shape : tuple
          A tuple of 4 ints, describing the shape of each datum.
          This is the size of each axis in <axes>, excluding the 'b' axis.

        axes : tuple
          A tuple of the following elements in any order:
            'b'  batch axis
            's'  stereo axis
             0   image axis 0 (row)
             1   image axis 1 (column)
            'c'  channel axis
        """
        shape = tuple(shape)

        if not all(isinstance(s, int) for s in shape):
            raise TypeError("Shape must be a tuple/list of ints")

        if len(shape) != 4:
            raise ValueError("Shape array needs to be of length 4, got %s." %
                             shape)

        datum_axes = list(axes)
        datum_axes.remove('b')
        if shape[datum_axes.index('s')] != 2:
            raise ValueError("Expected 's' axis to have size 2, got %d.\n"
                             "  axes:       %s\n"
                             "  shape:      %s" %
                             (shape[datum_axes.index('s')],
                              axes,
                              shape))
        self.shape = shape
        self.set_axes(axes)

        def make_conv2d_space(shape, axes):
            shape_axes = list(axes)
            shape_axes.remove('b')
            image_shape = tuple(shape[shape_axes.index(axis)]
                                for axis in (0, 1))
            conv2d_axes = list(axes)
            conv2d_axes.remove('s')
            return Conv2DSpace(shape=image_shape,
                               num_channels=shape[shape_axes.index('c')],
                               axes=conv2d_axes,
                               dtype=None)

        conv2d_space = make_conv2d_space(shape, axes)
        self.topo_space = CompositeSpace((conv2d_space, conv2d_space))
        self.storage_space = VectorSpace(dim=numpy.prod(shape))

    def get_formatted_batch(self, batch, space):
        """
        Returns a batch formatted to a space.

        Parameters
        ----------

        batch : ndarray
        The batch to format

        space : a pylearn2.space.Space
        The target space to format to.
        """
        return self.storage_space.np_format_as(batch, space)

    def design_mat_to_topo_view(self, design_mat):
        """
        Called by DenseDesignMatrix.get_formatted_view(), get_batch_topo()

        Parameters
        ----------

        design_mat : ndarray
        """
        return self.storage_space.np_format_as(design_mat, self.topo_space)

    def design_mat_to_weights_view(self, design_mat):
        """
        Called by DenseDesignMatrix.get_weights_view()

        Parameters
        ----------

        design_mat : ndarray
        """
        return self.design_mat_to_topo_view(design_mat)

    def topo_view_to_design_mat(self, topo_batch):
        """
        Used by DenseDesignMatrix.set_topological_view(), .get_design_mat()

        Parameters
        ----------

        topo_batch : ndarray
        """
        return self.topo_space.np_format_as(topo_batch, self.storage_space)

    def view_shape(self):
        """
        TODO: write documentation.
        """
        return self.shape

    def weights_view_shape(self):
        """
        TODO: write documentation.
        """
        return self.view_shape()

    def set_axes(self, axes):
        """
        Change the order of the axes.

        Parameters
        ----------

        axes : tuple
        Must have length 5, must contain 'b', 's', 0, 1, 'c'.
        """
        axes = tuple(axes)

        if len(axes) != 5:
            raise ValueError("Axes must have 5 elements; got %s" % str(axes))

        for required_axis in ('b', 's', 0, 1, 'c'):
            if required_axis not in axes:
                raise ValueError("Axes must contain 'b', 's', 0, 1, and 'c'. "
                                 "Got %s." % str(axes))

        if axes.index('b') != 0:
            raise ValueError("The 'b' axis must come first (axes = %s)." %
                             str(axes))

        def remove_b_axis(axes):
            axes = list(axes)
            axes.remove('b')
            return tuple(axes)

        if hasattr(self, 'axes'):
            # Reorders the shape vector to match the new axis ordering.
            assert hasattr(self, 'shape')
            old_axes = remove_b_axis(self.axes)  # pylint: disable-msg=E0203
            new_axes = remove_b_axis(axes)
            new_shape = tuple(self.shape[old_axes.index(a)] for a in new_axes)
            self.shape = new_shape

        self.axes = axes
Пример #39
0
class Seq2Seq(RNN, LayerLab):
    """
    input dim: (max input len, batch_size, input_dim)
    output dim: (max output len, batch_size, output_dim)
    """
    _topo = GraphContainer()
    seed = 123
    def __init__(self, encoder_layers, bridge_layers,
                 decoder_layers, input_space,
                 maxlen_encoder, maxlen_decoder, annotation_dim, max_labels,
                 layer_name, embedings=None,
                 batch_size=None,
                 input_source='features', target_source='targets',
                 **kwargs):
        assert len(encoder_layers) >= 1
        assert len(decoder_layers) >= 1
        assert len(bridge_layers) >= 1
        assert maxlen_encoder is not None
        assert maxlen_decoder is not None
        assert annotation_dim is not None

        self.encoder_layers = encoder_layers
        self.decoder_layers = decoder_layers
        self.bridge_layers = bridge_layers
        self._input_source = input_source
        self._trarget_sourece = target_source

        # Seq2Seq map Seq1 feature  2 Seq2 target
        self.layer_name = layer_name
        self._nested = False
        self.layer_names = set()
        self.batch_size = batch_size
        self.force_batch_size = batch_size
        self._input_source = input_source
        self._target_source = target_source

        if embedings:
            use_embeding = True
            self.embedings = embedings
            self.embedings.set_mlp(self)
            self.layer_names.add(self.embedings.layer_name)
        else:
            use_embeding = False

        self.use_embeding = use_embeding
        self.layers = self.encoder_layers + self.bridge_layers + \
            self.decoder_layers

        #self.monitor_targets = monitor_targets
        for layer in self.layers:
            assert layer.get_mlp() is None
            if layer.layer_name in self.layer_names:
                raise ValueError("Seq2Seq.__init__ given two or more layers "
                                 "with same name: " + layer.layer_name)

            layer.set_mlp(self)

            self.layer_names.add(layer.layer_name)

        if input_space is not None:  # XXX this part is not general. need to
                                # dealing some normal case
            self.setup_rng()
            if self.use_embeding:
                self.input_space = CompositeSpace(
                    [ContextSpace(dim=annotation_dim,
                                num_annotation=maxlen_encoder),
                     CompositeSpace([IndexSpace(max_labels=max_labels,
                                            dim=maxlen_decoder),
                                     SequenceMaskSpace()])])
            else:
                self.input_space = CompositeSpace(
                    [ContextSpace(dim=annotation_dim,
                                num_annotation=maxlen_encoder),
                     SequenceSpace(IndexSpace(max_labels=max_labels,
                                            dim=maxlen_decoder))])
            assert self.input_space == input_space
            self.context_space, self.seq_space = self.input_space.components
            self._update_layer_input_spaces()

    def _update_layer_input_spaces(self):
        layers = self.encoder_layers + self.bridge_layers
        self.encoder_layers[0].set_input_space(self.context_space)

        for i in range(1, len(layers)):
            layers[i].set_input_space(layers[i-1].get_output_space())

        if self.use_embeding:
            self.embedings.set_input_space(self.seq_space.components[0])
            seq_space = SequenceSpace(self.embedings.get_output_space().space)
        else:
            seq_space = self.seq_space

        self.decoder_layers[0].set_input_space(
            CompositeSpace([seq_space, self.context_space]))

        for i in range(1, len(self.decoder_layers)):
            self.decoder_layers[i].set_input_space(
                self.decoder_layers[i-1].get_output_space())

    @wraps(RNN.fprop)
    def fprop(self, state_below):
        self.input_space.validate(state_below)
        context, (x, mask) = state_below
        encode = context
        for encoder in self.encoder_layers + self.bridge_layers:
            encode = encoder.fprop(encode)

        if self.use_embeding:
            x = self.embedings.upward_pass(x)

        cond = (x, mask, context)
        for decoder in self.decoder_layers:
            cond = decoder.fprop(cond, z0=encode)

        return cond

    @wraps(RNN.get_input_space)
    def get_output_space(self):
        return self.layers[-1].get_target_space()

    @wraps(RNN.get_default_cost)
    def get_default_cost(self):
        return Seq2SeqCost()

    @wraps(RNN.get_monitoring_channels)
    def get_monitoring_channels(self, data):
        if self.monitor_targets:
            seq1, seq2 = data
        else:
            seq1 = data
            seq2 = None

        rvel = self.get_layer_moniror_channel(state_below=seq1, targets=seq2)
        return rvel
Пример #40
0
    def __init__(self, encoder_layers, bridge_layers,
                 decoder_layers, input_space,
                 maxlen_encoder, maxlen_decoder, annotation_dim, max_labels,
                 layer_name, embedings=None,
                 batch_size=None,
                 input_source='features', target_source='targets',
                 **kwargs):
        assert len(encoder_layers) >= 1
        assert len(decoder_layers) >= 1
        assert len(bridge_layers) >= 1
        assert maxlen_encoder is not None
        assert maxlen_decoder is not None
        assert annotation_dim is not None

        self.encoder_layers = encoder_layers
        self.decoder_layers = decoder_layers
        self.bridge_layers = bridge_layers
        self._input_source = input_source
        self._trarget_sourece = target_source

        # Seq2Seq map Seq1 feature  2 Seq2 target
        self.layer_name = layer_name
        self._nested = False
        self.layer_names = set()
        self.batch_size = batch_size
        self.force_batch_size = batch_size
        self._input_source = input_source
        self._target_source = target_source

        if embedings:
            use_embeding = True
            self.embedings = embedings
            self.embedings.set_mlp(self)
            self.layer_names.add(self.embedings.layer_name)
        else:
            use_embeding = False

        self.use_embeding = use_embeding
        self.layers = self.encoder_layers + self.bridge_layers + \
            self.decoder_layers

        #self.monitor_targets = monitor_targets
        for layer in self.layers:
            assert layer.get_mlp() is None
            if layer.layer_name in self.layer_names:
                raise ValueError("Seq2Seq.__init__ given two or more layers "
                                 "with same name: " + layer.layer_name)

            layer.set_mlp(self)

            self.layer_names.add(layer.layer_name)

        if input_space is not None:  # XXX this part is not general. need to
                                # dealing some normal case
            self.setup_rng()
            if self.use_embeding:
                self.input_space = CompositeSpace(
                    [ContextSpace(dim=annotation_dim,
                                num_annotation=maxlen_encoder),
                     CompositeSpace([IndexSpace(max_labels=max_labels,
                                            dim=maxlen_decoder),
                                     SequenceMaskSpace()])])
            else:
                self.input_space = CompositeSpace(
                    [ContextSpace(dim=annotation_dim,
                                num_annotation=maxlen_encoder),
                     SequenceSpace(IndexSpace(max_labels=max_labels,
                                            dim=maxlen_decoder))])
            assert self.input_space == input_space
            self.context_space, self.seq_space = self.input_space.components
            self._update_layer_input_spaces()
Пример #41
0
class ToyRNN(Model):
    """
    WRITEME
    """
    def __init__(self, nvis, nhid, hidden_transition_model, irange=0.05,
                 non_linearity='sigmoid', use_ground_truth=True):
        allowed_non_linearities = {'sigmoid': T.nnet.sigmoid,
                                   'tanh': T.tanh}
        self.nvis = nvis
        self.nhid = nhid
        self.hidden_transition_model = hidden_transition_model
        self.use_ground_truth = use_ground_truth
        self.alpha = sharedX(1)
        self.alpha_decrease_rate = 1.0#0.99

        assert non_linearity in allowed_non_linearities
        self.non_linearity = allowed_non_linearities[non_linearity]

        # Space initialization
        self.input_space = CompositeSpace([
            VectorSequenceSpace(dim=self.nvis),
            VectorSequenceSpace(dim=62)
        ])
        self.hidden_space = VectorSpace(dim=self.nhid)
        self.output_space = VectorSequenceSpace(dim=1)
        self.input_source = ('features', 'phones')
        self.target_source = 'targets'

        # Features-to-hidden matrix
        W_value = numpy.random.uniform(low=-irange, high=irange,
                                       size=(self.nvis, self.nhid))
        self.W = sharedX(W_value, name='W')
        # Phones-to-hidden matrix
        V_value = numpy.random.uniform(low=-irange, high=irange,
                                       size=(62, self.nhid))
        self.V = sharedX(V_value, name='V')
        # Hidden biases
        b_value = numpy.zeros(self.nhid)
        self.b = sharedX(b_value, name='b')
        # Hidden-to-out matrix
        U_value = numpy.random.uniform(low=-irange, high=irange,
                                       size=(self.nhid, 1))
        self.U = sharedX(U_value, name='U')
        # Output bias
        c_value = numpy.zeros(1)
        self.c = sharedX(c_value, name='c')

    def fprop_step(self, features, phones, h_tm1, out):
        h_tm1 = self.hidden_space.format_as(h_tm1.dimshuffle('x', 0),
                                            self.hidden_transition_model.input_space)
        h = self.non_linearity(T.dot(features, self.W) +
                               T.dot(phones, self.V) +
                               self.hidden_transition_model.fprop(h_tm1).flatten() +
                               self.b)
        out = T.dot(h, self.U) + self.c
        return h, out

    def fprop_step_prime(self, truth, phones, features, h_tm1, out):
        features = T.set_subtensor(features[-1], (1 - self.alpha) *
                                   features[-1] + self.alpha * truth[-1])
        h_tm1 = self.hidden_space.format_as(h_tm1.dimshuffle('x', 0),
                                            self.hidden_transition_model.input_space)
        h = self.non_linearity(T.dot(features, self.W) +
                               T.dot(phones, self.V) +
                               self.hidden_transition_model.fprop(h_tm1).flatten() +
                               self.b)
        out = T.dot(h, self.U) + self.c
        features = T.concatenate([features[1:], out])
        return features, h, out

    def fprop(self, data):
        if self.use_ground_truth:
            self.input_space.validate(data)
            features, phones = data

            init_h = T.alloc(numpy.cast[theano.config.floatX](0), self.nhid)
            init_out = T.alloc(numpy.cast[theano.config.floatX](0), 1)
            init_out = T.unbroadcast(init_out, 0)

            fn = lambda f, p, h, o: self.fprop_step(f, p, h, o)

            ((h, out), updates) = theano.scan(fn=fn,
                                              sequences=[features, phones],
                                              outputs_info=[dict(initial=init_h,
                                                                 taps=[-1]),
                                                            init_out])
            return out
        else:
            self.input_space.validate(data)
            features, phones = data

            init_in = features[0]
            init_h = T.alloc(numpy.cast[theano.config.floatX](0), self.nhid)
            init_out = T.alloc(numpy.cast[theano.config.floatX](0), 1)
            init_out = T.unbroadcast(init_out, 0)

            fn = lambda t, p, f, h, o: self.fprop_step_prime(t, p, f, h, o)

            ((f, h, out), updates) = theano.scan(fn=fn,
                                                 sequences=[features, phones],
                                                 outputs_info=[init_in,
                                                               dict(initial=init_h,
                                                                    taps=[-1]),
                                                               init_out])
            return out

    def predict_next(self, features, phones, h_tm1):
        h_tm1 = self.hidden_space.format_as(h_tm1.dimshuffle('x', 0),
                                            self.hidden_transition_model.input_space)
        h = self.non_linearity(T.dot(features, self.W) +
                               T.dot(phones, self.V) +
                               self.hidden_transition_model.fprop(h_tm1).flatten() +
                               self.b)
        out = T.dot(h, self.U) + self.c
        return h, out

    def get_params(self):
        return [self.W, self.V, self.b, self.U, self.c] + \
               self.hidden_transition_model.get_params()

    def get_input_source(self):
        return self.input_source

    def get_target_source(self):
        return self.target_source

    def censor_updates(self, updates):
        updates[self.alpha] = self.alpha_decrease_rate * self.alpha

    def get_monitoring_channels(self, data):
        rval = OrderedDict()
        rval['alpha'] = self.alpha
        return rval
Пример #42
0
class ToyRNN(Model):
    """
    WRITEME
    """
    def __init__(self, nvis, nhid):
        self.nvis = nvis
        self.nhid = nhid

        # Space initialization
        self.input_space = CompositeSpace([
            VectorSequenceSpace(window_dim=self.nvis),
            VectorSequenceSpace(window_dim=62)
        ])
        self.output_space = VectorSequenceSpace(window_dim=1)
        self.input_source = ('features', 'phones')
        self.target_source = 'targets'

        # Features-to-hidden matrix
        W_value = numpy.random.uniform(low=-0.5, high=0.5,
                                       size=(self.nvis, self.nhid))
        self.W = sharedX(W_value, name='W')
        # Phones-to-hidden matrix
        V_value = numpy.random.uniform(low=-0.5, high=0.5,
                                       size=(62, self.nhid))
        self.V = sharedX(V_value, name='V')
        # Hidden-to-hidden matrix
        M_value = numpy.random.uniform(low=-0.5, high=0.5,
                                       size=(self.nhid, self.nhid))
        self.M = sharedX(M_value, name='M')
        # Hidden biases
        b_value = numpy.zeros(self.nhid)
        self.b = sharedX(b_value, name='b')
        # Hidden-to-out matrix
        U_value = numpy.random.uniform(low=-0.5, high=0.5,
                                       size=(self.nhid, 1))
        self.U = sharedX(U_value, name='U')
        # Output bias
        c_value = numpy.zeros(1)
        self.c = sharedX(c_value, name='c')

    def fprop(self, data):
        self.input_space.validate(data)
        features, phones = data

        init_h = T.alloc(numpy.cast[theano.config.floatX](0), self.nhid)
        init_out = T.alloc(numpy.cast[theano.config.floatX](0), 1)

        def fprop_step(features, phones, h_tm1, out):
            h = T.nnet.sigmoid(T.dot(features, self.W) +
                               T.dot(phones, self.V) +
                               T.dot(h_tm1, self.M) +
                               self.b)
            out = T.dot(h, self.U) + self.c
            return h, out

        ((h, out), updates) = theano.scan(fn=fprop_step,
                                          sequences=[features, phones],
                                          outputs_info=[dict(initial=init_h,
                                                             taps=[-1]),
                                                        init_out])
        return out

    def get_params(self):
        return [self.W, self.V, self.M, self.b, self.U, self.c]

    def get_input_source(self):
        return self.input_source

    def get_target_source(self):
        return self.target_source