Exemplo n.º 1
0
    def __init__(self,
                 window_shape,
                 randomize=None,
                 randomize_once=None,
                 center=None,
                 rng=(2013, 2, 20),
                 pad_randomized=0,
                 flip=True):
        self._window_shape = tuple(window_shape)

        # Defined in setup(). A dict that maps Datasets in self._randomize and
        # self._randomize_once to zero-padded versions of their topological
        # views.
        self._original = None

        self._randomize = randomize if randomize else []
        self._randomize_once = randomize_once if randomize_once else []
        self._center = center if center else []
        self._pad_randomized = pad_randomized
        self._flip = flip

        if randomize is None and randomize_once is None and center is None:
            warnings.warn(self.__class__.__name__ + " instantiated without "
                          "any dataset arguments, and therefore does nothing",
                          stacklevel=2)

        self._rng = make_np_rng(rng, which_method="random_integers")
Exemplo n.º 2
0
 def __init__(self,
              window_shape,
              center_shape=None,
              central_window_shape=None,
              randomize=None,
              randomize_once=None,
              center=None,
              rotate=True,
              scale_diff=0.0,
              rng=(2013, 0o2, 20),
              shear=0.0,
              translation=0.0,
              preprocess=None):
     self._window_shape = window_shape
     self._center_shape = center_shape
     self._central_window_shape = central_window_shape
     self._randomize = randomize if randomize else []
     self._randomize_once = randomize_once if randomize_once else []
     self._center = center if center else []
     self._rotate = rotate
     self._scale_diff = scale_diff
     self._shear = shear
     self._translation = translation
     self._preprocess = preprocess
     self._rng = make_np_rng(rng, which_method="random_integers")
Exemplo n.º 3
0
def make_random_conv3D(irange,
                       input_axes,
                       output_axes,
                       signal_shape,
                       filter_shape,
                       kernel_stride=(2, 2, 1),
                       pad=0,
                       message="",
                       rng=None,
                       partial_sum=None):
    if rng is None:
        rng = make_np_rng(rng, default_seed, which_method='uniform')

    ### b 0 1 t c
    _filter_5d_shape = (filter_shape[0], filter_shape[1], filter_shape[2],
                        filter_shape[3], filter_shape[4])

    # initialize weights
    print(_filter_5d_shape)
    W = sharedX(rng.uniform(-irange, irange, (_filter_5d_shape)))
    print 'w is set'
    return Conv3DB01TC(filters=W,
                       input_axes=input_axes,
                       output_axes=output_axes,
                       signal_shape=signal_shape,
                       filter_shape=filter_shape,
                       kernel_stride=kernel_stride,
                       pad=pad,
                       message=message,
                       partial_sum=partial_sum)
Exemplo n.º 4
0
    def __init__(self, which_set='debug', start=None, end=None, shuffle=True,
                 lazy_load=False, rng=_default_seed):

        assert which_set in ['debug', 'train', 'test']
        if which_set == 'debug':
            maxlen, n_samples, n_annotations, n_features = 10, 12, 13, 14
            X = N.zeros(shape=(n_samples, maxlen))
            X_mask = X  # same with X
            Z = N.zeros(shape=(n_annotations, n_samples, n_features))
        elif which_set == 'train':
            pass
        else:
            pass

        self.X, self.X_mask, self.Z = (X, X_mask, Z)
        self.sources = ('features', 'target')

        self.spaces = CompositeSpace([
            SequenceSpace(space=VectorSpace(dim=self.X.shape[1])),
            SequenceDataSpace(space=VectorSpace(dim=self.Z.shape[-1]))
        ])
        self.data_spces = (self.spaces, self.sources)
        # self.X_space, self.X_mask_space, self.Z_space
        # Default iterator
        self._iter_mode = resolve_iterator_class('sequential')
        self._iter_topo = False
        self._iter_target = False
        self._iter_data_specs = self.data_spces
        self.rng = make_np_rng(rng, which_method='random_intergers')
    def __init__(self, data=None, data_specs=None, rng=_default_seed,
                 preprocessor=None, fit_preprocessor=False):
        # data_specs should be flat, and there should be no
        # duplicates in source, as we keep only one version
        assert is_flat_specs(data_specs)
        if isinstance(data_specs[1], tuple):
            assert sorted(set(data_specs[1])) == sorted(data_specs[1])
        space, source = data_specs
        space.np_validate(data)
        # TODO: assume that data[0] is num example => error if channel in c01b
        # assert len(set(elem.shape[0] for elem in list(data))) <= 1
        self.data = data
        self.data_specs = data_specs
        # TODO: assume that data[0] is num example => error if channel in c01b
        self.num_examples = list(data)[-1].shape[0] # TODO: list(data)[0].shape[0]

        self.compress = False
        self.design_loc = None
        self.rng = make_np_rng(rng, which_method='random_integers')
        # Defaults for iterators
        self._iter_mode = resolve_iterator_class('sequential')

        if preprocessor:
            preprocessor.apply(self, can_fit=fit_preprocessor)
        self.preprocessor = preprocessor
Exemplo n.º 6
0
def make_random_conv2D(irange, input_space, output_space,
                       kernel_shape, batch_size=None, \
                       subsample = (1,1), border_mode = 'valid',
                       message = "", rng = None):
    """
    .. todo::

        WRITEME properly

    Creates a Conv2D with random kernels
    """

    rng = make_np_rng(rng, default_seed, which_method='uniform')

    W = sharedX(rng.uniform(
        -irange, irange,
        (output_space.num_channels, input_space.num_channels,
         kernel_shape[0], kernel_shape[1])
    ))

    return Conv2D(
        filters=W,
        batch_size=batch_size,
        input_space=input_space,
        output_axes=output_space.axes,
        subsample=subsample, border_mode=border_mode,
        filters_shape=W.get_value(borrow=True).shape, message=message
    )
Exemplo n.º 7
0
    def __init__(self,
                 window_shape,
                 randomize=None,
                 randomize_once=None,
                 center=None,
                 rng=(2013, 2, 20),
                 pad_randomized=0,
                 flip=True):
        self._window_shape = tuple(window_shape)

        # Defined in setup(). A dict that maps Datasets in self._randomize and
        # self._randomize_once to zero-padded versions of their topological
        # views.
        self._original = None

        self._randomize = randomize if randomize else []
        self._randomize_once = randomize_once if randomize_once else []
        self._center = center if center else []
        self._pad_randomized = pad_randomized
        self._flip = flip

        if randomize is None and randomize_once is None and center is None:
            warnings.warn(self.__class__.__name__ + " instantiated without "
                          "any dataset arguments, and therefore does nothing",
                          stacklevel=2)

        self._rng = make_np_rng(rng, which_method="random_integers")
Exemplo n.º 8
0
    def __init__(self, X, y):
        if (self.dataset_name in dataset_info.aod_datasets
                and self.which_set == "full"):
            self.targets, self.novels = self.load_aod_gts()
            assert self.targets.shape == self.novels.shape
            if X.shape[0] % self.targets.shape[0] != 0:
                raise ValueError("AOD data and targets seems to have "
                                 "incompatible shapes: %r vs %r" %
                                 (X.shape, self.targets.shape))

        X = self.preprocess(X)

        if self.shuffle:
            logger.info("Shuffling data")
            self.shuffle_rng = make_np_rng(None, [1, 2, 3],
                                           which_method="shuffle")
            for i in xrange(m):
                j = self.shuffle_rng.randint(m)
                tmp = X[i].copy()
                X[i] = X[j]
                X[j] = tmp
                tmp = y[i:i + 1].copy()
                y[i] = y[j]
                y[j] = tmp

        max_labels = np.amax(y) + 1
        logger.info("%d labels found." % max_labels)

        super(MRI, self).__init__(X=X,
                                  y=y,
                                  view_converter=self.view_converter,
                                  y_labels=max_labels)

        assert not np.any(np.isnan(self.X))
Exemplo n.º 9
0
def test_convolutional_compatible():
    """
    VAE allows convolutional encoding networks
    """
    encoding_model = MLP(
        layers=[
            SpaceConverter(
                layer_name='conv2d_converter',
                output_space=Conv2DSpace(shape=[4, 4], num_channels=1)
            ),
            ConvRectifiedLinear(
                layer_name='h',
                output_channels=2,
                kernel_shape=[2, 2],
                kernel_stride=[1, 1],
                pool_shape=[1, 1],
                pool_stride=[1, 1],
                pool_type='max',
                irange=0.01)
            ]
    )
    decoding_model = MLP(layers=[Linear(layer_name='h', dim=16, irange=0.01)])
    prior = DiagonalGaussianPrior()
    conditional = BernoulliVector(mlp=decoding_model, name='conditional')
    posterior = DiagonalGaussian(mlp=encoding_model, name='posterior')
    vae = VAE(nvis=16, prior=prior, conditional=conditional,
              posterior=posterior, nhid=16)
    X = T.matrix('X')
    lower_bound = vae.log_likelihood_lower_bound(X, num_samples=10)
    f = theano.function(inputs=[X], outputs=lower_bound)
    rng = make_np_rng(default_seed=11223)
    f(as_floatX(rng.uniform(size=(10, 16))))
Exemplo n.º 10
0
 def __init__(self, num_arms, mean_std=1.0, std_std=1.0):
     self.rng = make_np_rng(None, [2013, 11, 12], which_method="randn")
     self.means = sharedX(self.rng.randn(num_arms) * mean_std)
     self.stds = sharedX(np.abs(self.rng.randn(num_arms) * std_std))
     self.theano_rng = make_theano_rng(None,
                                       self.rng.randint(2**16),
                                       which_method="normal")
Exemplo n.º 11
0
    def make_weights(input_space, output_space, kernel_shape, **kwargs):
        rs = make_np_rng(rng, default_seed, which_method='normal')

        shape = (output_space.num_channels, input_space.num_channels,
             kernel_shape[0], kernel_shape[1])

        return sharedX(rs.normal(0, istd, shape))
Exemplo n.º 12
0
    def make_weights(input_space, output_space, kernel_shape, **kwargs):
        rs = make_np_rng(rng, default_seed, which_method='uniform')

        shape = (output_space.num_channels, input_space.num_channels,
             kernel_shape[0], kernel_shape[1])

        return sharedX(rs.uniform(-irange, irange, shape))
Exemplo n.º 13
0
 def _create_subset_iterator(self, mode, batch_size=None, num_batches=None,
                             rng=None):
     subset_iterator = resolve_iterator_class(mode)
     if rng is None and subset_iterator.stochastic:
         rng = make_np_rng()
     return subset_iterator(self.get_num_examples(), batch_size,
                            num_batches, rng)
Exemplo n.º 14
0
    def setup_rng(self):
        """
        .. todo::

            WRITEME
        """
        self.rng = make_np_rng(None, [2012, 10, 17], which_method="uniform")
Exemplo n.º 15
0
    def __init__(self, X, y):
        if (self.dataset_name in dataset_info.aod_datasets
            and self.which_set == "full"):
            self.targets, self.novels = self.load_aod_gts()
            assert self.targets.shape == self.novels.shape
            if X.shape[0] % self.targets.shape[0] != 0:
                raise ValueError("AOD data and targets seems to have "
                                 "incompatible shapes: %r vs %r"
                                 % (X.shape, self.targets.shape))

        X = self.preprocess(X)

        if self.shuffle:
            logger.info("Shuffling data")
            self.shuffle_rng = make_np_rng(None, [1 ,2 ,3],
                                           which_method="shuffle")
            for i in xrange(m):
                j = self.shuffle_rng.randint(m)
                tmp = X[i].copy()
                X[i] = X[j]
                X[j] = tmp
                tmp = y[i:i+1].copy()
                y[i] = y[j]
                y[j] = tmp

        max_labels = np.amax(y) + 1
        logger.info("%d labels found." % max_labels)

        super(MRI, self).__init__(X=X,
                                  y=y,
                                  view_converter=self.view_converter,
                                  y_labels=max_labels)

        assert not np.any(np.isnan(self.X))
Exemplo n.º 16
0
def split_patients(patients, valid_percent, test_percent, rng=(2014, 10, 22)):
    if isinstance(rng, (list, tuple)):
        rng = make_np_rng(None, rng, which_method='uniform')

    vals = np.asarray(patients.values())
    keys = np.asarray(patients.keys())
    sss = StratifiedShuffleSplit(
        vals, n_iter=1, test_size=test_percent, random_state=rng)
    remaining_idx, test_idx = sss.__iter__().next()

    if valid_percent > 0:
        # Rate of samples required to build validation set
        valid_rate = valid_percent / (1 - test_percent)

        sss = StratifiedShuffleSplit(
            vals[remaining_idx], n_iter=1, test_size=valid_rate, random_state=rng)
        tr_idx, val_idx = sss.__iter__().next()
        valid_idx = remaining_idx[val_idx]
        train_idx = remaining_idx[tr_idx]
    else:
        train_idx = remaining_idx
        valid_idx = []

    train_patients = dict(zip(keys[train_idx], vals[train_idx]))
    valid_patients = dict(zip(keys[valid_idx], vals[valid_idx]))
    test_patients = dict(zip(keys[test_idx], vals[test_idx]))
    return train_patients, valid_patients, test_patients
Exemplo n.º 17
0
    def __init__(self,
                 cost=None,
                 batch_size=None,
                 batches_per_iter=None,
                 updates_per_batch=10,
                 monitoring_batch_size=None,
                 monitoring_batches=None,
                 monitoring_dataset=None,
                 termination_criterion=None,
                 set_batch_size=False,
                 reset_alpha=True,
                 conjugate=False,
                 min_init_alpha=.001,
                 reset_conjugate=True,
                 line_search_mode=None,
                 verbose_optimization=False,
                 scale_step=1.,
                 theano_function_mode=None,
                 init_alpha=None,
                 seed=None):

        self.__dict__.update(locals())
        del self.self

        if monitoring_dataset is None:
            assert monitoring_batches is None
            assert monitoring_batch_size is None

        self._set_monitoring_dataset(monitoring_dataset)

        self.bSetup = False
        self.termination_criterion = termination_criterion
        self.rng = make_np_rng(seed, [2012, 10, 16],
                               which_method=["randn", "randint"])
Exemplo n.º 18
0
    def __init__(self, dataset_size, batch_size, num_batches=None, rng=None):
        self._rng = make_np_rng(rng,
                                which_method=["random_integers", "shuffle"])
        assert num_batches is None or num_batches >= 0
        self._dataset_size = dataset_size
        if batch_size is None:
            if num_batches is not None:
                batch_size = int(np.ceil(self._dataset_size / num_batches))
            else:
                raise ValueError("need one of batch_size, num_batches "
                                 "for sequential batch iteration")
        elif batch_size is not None:
            if num_batches is not None:
                max_num_batches = np.ceil(self._dataset_size / batch_size)
                if num_batches > max_num_batches:
                    raise ValueError("dataset of %d examples can only provide "
                                     "%d batches with batch_size %d, but %d "
                                     "batches were requested" %
                                     (self._dataset_size, max_num_batches,
                                      batch_size, num_batches))
            else:
                num_batches = np.ceil(self._dataset_size / batch_size)

        self._batch_size = batch_size
        self._num_batches = int(num_batches)
        self._next_batch_no = 0
        self._idx = 0
        self._batch_order = list(range(self._num_batches))
        self._rng.shuffle(self._batch_order)
Exemplo n.º 19
0
    def __init__(self, dataset_size, batch_size, num_batches=None, rng=None):
        self._rng = make_np_rng(rng, which_method=["random_integers",
                                                   "shuffle"])
        assert num_batches is None or num_batches >= 0
        self._dataset_size = dataset_size
        if batch_size is None:
            if num_batches is not None:
                batch_size = int(np.ceil(self._dataset_size / num_batches))
            else:
                raise ValueError("need one of batch_size, num_batches "
                                 "for sequential batch iteration")
        elif batch_size is not None:
            if num_batches is not None:
                max_num_batches = np.ceil(self._dataset_size / batch_size)
                if num_batches > max_num_batches:
                    raise ValueError("dataset of %d examples can only provide "
                                     "%d batches with batch_size %d, but %d "
                                     "batches were requested" %
                                     (self._dataset_size, max_num_batches,
                                      batch_size, num_batches))
            else:
                num_batches = np.ceil(self._dataset_size / batch_size)

        self._batch_size = batch_size
        self._num_batches = int(num_batches)
        self._next_batch_no = 0
        self._idx = 0
        self._batch_order = list(range(self._num_batches))
        self._rng.shuffle(self._batch_order)
Exemplo n.º 20
0
 def __init__(self, dataset_size, batch_size, num_batches, rng=None):
     super(ShuffledSequentialSubsetIterator,
           self).__init__(dataset_size, batch_size, num_batches, None)
     self._rng = make_np_rng(rng,
                             which_method=["random_integers", "shuffle"])
     self._shuffled = np.arange(self._dataset_size)
     self._rng.shuffle(self._shuffled)
Exemplo n.º 21
0
def make_sparse_random_local(num_nonzero, input_space, output_space,
        kernel_shape, batch_size, \
        kernel_stride = (1,1), border_mode = 'valid', message = "", rng=None):
    """
    .. todo::

        WRITEME
    """
    raise NotImplementedError("Not yet modified after copy-paste from "
                              "pylearn2.linear.conv2d_c01b")
    """ Creates a Conv2D with random kernels, where the randomly initialized
    values are sparse"""

    rng = make_np_rng(rng,
                      default_sparse_seed,
                      which_method=['randn', 'randint'])

    W = np.zeros(( output_space.num_channels, input_space.num_channels, \
            kernel_shape[0], kernel_shape[1]))

    def random_coord():
        return [rng.randint(dim) for dim in W.shape]

    for i in xrange(num_nonzero):
        o, ch, r, c = random_coord()
        while W[o, ch, r, c] != 0:
            o, ch, r, c = random_coord()
        W[o, ch, r, c] = rng.randn()

    W = sharedX(W)
Exemplo n.º 22
0
    def setup_rng(self):
        """
        .. todo::

            WRITEME
        """
        self.rng = make_np_rng(None, [2012, 10, 17], which_method="uniform")
    def __init__(self, nvis, nhid, init_lambda, init_p, init_alpha,
                 learning_rate):
        """
        .. todo::

            WRITEME
        """
        self.nvis = int(nvis)
        self.nhid = int(nhid)
        self.init_lambda = float(init_lambda)
        self.init_p = float(init_p)
        self.init_alpha = N.cast[config.floatX](init_alpha)
        self.tol = 1e-6
        self.time_constant = 1e-2
        self.learning_rate = N.cast[config.floatX](learning_rate)

        self.predictor_learning_rate = self.learning_rate

        self.rng = make_np_rng(None, [1, 2, 3], which_method="randn")

        self.error_record = []
        self.ERROR_RECORD_MODE_MONITORING = 0
        self.error_record_mode = self.ERROR_RECORD_MODE_MONITORING

        self.instrumented = False

        self.redo_everything()
Exemplo n.º 24
0
def test_convolutional_compatible():
    """
    VAE allows convolutional encoding networks
    """
    encoding_model = MLP(
        layers=[
            SpaceConverter(layer_name="conv2d_converter", output_space=Conv2DSpace(shape=[4, 4], num_channels=1)),
            ConvRectifiedLinear(
                layer_name="h",
                output_channels=2,
                kernel_shape=[2, 2],
                kernel_stride=[1, 1],
                pool_shape=[1, 1],
                pool_stride=[1, 1],
                pool_type="max",
                irange=0.01,
            ),
        ]
    )
    decoding_model = MLP(layers=[Linear(layer_name="h", dim=16, irange=0.01)])
    prior = DiagonalGaussianPrior()
    conditional = BernoulliVector(mlp=decoding_model, name="conditional")
    posterior = DiagonalGaussian(mlp=encoding_model, name="posterior")
    vae = VAE(nvis=16, prior=prior, conditional=conditional, posterior=posterior, nhid=16)
    X = T.matrix("X")
    lower_bound = vae.log_likelihood_lower_bound(X, num_samples=10)
    f = theano.function(inputs=[X], outputs=lower_bound)
    rng = make_np_rng(default_seed=11223)
    f(as_floatX(rng.uniform(size=(10, 16))))
Exemplo n.º 25
0
def make_random_conv2D(irange,
                       input_channels,
                       input_axes,
                       output_axes,
                       output_channels,
                       kernel_shape,
                       kernel_stride=(1, 1),
                       pad=0,
                       message="",
                       rng=None,
                       partial_sum=None,
                       sparse_init=None):
    """
    .. todo::

        WRITEME properly

    Creates a Conv2D with random kernels.
    Should be functionally equivalent to
    pylearn2.linear.conv2d.make_random_conv2D
    """

    rng = make_np_rng(rng, default_seed, which_method='uniform')

    W = sharedX( rng.uniform(-irange,irange,(input_channels, \
            kernel_shape[0], kernel_shape[1], output_channels)))

    return Conv2D(filters=W,
                  input_axes=input_axes,
                  output_axes=output_axes,
                  kernel_stride=kernel_stride,
                  pad=pad,
                  message=message,
                  partial_sum=partial_sum)
Exemplo n.º 26
0
    def __init__(self, nvis, nhid, coeff):
        self.nvis = nvis
        self.nhid = nhid
        self.coeff = float(coeff)
        self.rng = make_np_rng(None, [1, 2, 3], which_method="randn")

        self.redo_everything()
Exemplo n.º 27
0
    def reset_rng(self):
        """
        .. todo::

            WRITEME
        """
        self.rng = make_np_rng([1,2,3], which_method=['randn','uniform'])
Exemplo n.º 28
0
    def __init__(self, min_x=-6.28, max_x=6.28, std=.05, rng=_default_seed):
        """
        Constructor.
        """
        super(CosDataset, self).__init__()
        
        #: lower limit for x as in cos(x)
        self.min_x = min_x
        
        #: higher limit for x as in cos(x)
        self.max_x = max_x
        
        #: standard deviation for the noise added to the values we generate
        self.std = std

        # argument to resolve_iterator_class() can be either
        # a string from [sequential, shuffled_sequential, random_slice,
        # random_uniform, batchwise_shuffled_sequential, even_sequential,
        # even_shuffled_sequential, even_batchwise_shuffled_sequential,
        # even_sequences] or a SubsetIterator sublass.

        #: default iterator implementation (a class to be instantiated)
        self._iter_subset_class = resolve_iterator_class('sequential')
        
        #: default data specifications for iterator
        self._iter_data_specs = (VectorSpace(2), 'features')
        
        #: default batch size for the iterator
        self._iter_batch_size = 100
        
        #: default number of batches for the iterator
        self._iter_num_batches = 10
        
        #: random number generator
        self.rng = make_np_rng(rng, which_method=['uniform', 'randn'])
Exemplo n.º 29
0
    def __init__(
        self,
        cost=None,
        batch_size=None,
        batches_per_iter=None,
        updates_per_batch=10,
        monitoring_batches=None,
        monitoring_dataset=None,
        termination_criterion=None,
        set_batch_size=False,
        reset_alpha=True,
        conjugate=False,
        min_init_alpha=0.001,
        reset_conjugate=True,
        line_search_mode=None,
        verbose_optimization=False,
        scale_step=1.0,
        theano_function_mode=None,
        init_alpha=None,
        seed=None,
    ):

        self.__dict__.update(locals())
        del self.self

        if monitoring_dataset is None:
            assert monitoring_batches == None

        self._set_monitoring_dataset(monitoring_dataset)

        self.bSetup = False
        self.termination_criterion = termination_criterion
        self.rng = make_np_rng(seed, [2012, 10, 16], which_method=["randn", "randint"])
Exemplo n.º 30
0
def make_sparse_random_conv2D(num_nonzero, input_space, output_space,
                              kernel_shape, pad=0, kernel_stride=(1, 1),
                              border_mode='valid', message="", rng=None,
                              partial_sum=None):
    """
    .. todo::

        WRITEME properly

    Creates a Conv2D with random kernels, where the randomly initialized
    values are sparse
    """

    rng = make_np_rng(rng, default_sparse_seed,
                      which_method=['randn', 'randint'])

    W = np.zeros((input_space.num_channels, kernel_shape[0],
                  kernel_shape[1], output_space.num_channels))

    def random_coord():
        return [rng.randint(dim) for dim in W.shape[0:3]]

    for o in xrange(output_space.num_channels):
        for i in xrange(num_nonzero):
            ch, r, c = random_coord()
            while W[ch, r, c, o] != 0:
                ch, r, c = random_coord()
            W[ch, r, c, o] = rng.randn()

    W = sharedX(W)

    return Conv2D(filters=W, input_axes=input_space.axes,
                  output_axes=output_space.axes, kernel_stride=kernel_stride,
                  pad=pad, message=message, partial_sum=partial_sum)
Exemplo n.º 31
0
def make_sparse_random_conv2D(num_nonzero, input_space, output_space,
                              kernel_shape, pad=0, kernel_stride=(1, 1),
                              border_mode='valid', message="", rng=None,
                              partial_sum=None):
    """
    .. todo::

        WRITEME properly

    Creates a Conv2D with random kernels, where the randomly initialized
    values are sparse
    """

    rng = make_np_rng(rng, default_sparse_seed,
                      which_method=['randn', 'randint'])

    W = np.zeros((input_space.num_channels, kernel_shape[0],
                  kernel_shape[1], output_space.num_channels))

    def random_coord():
        return [rng.randint(dim) for dim in W.shape[0:3]]

    for o in xrange(output_space.num_channels):
        for i in xrange(num_nonzero):
            ch, r, c = random_coord()
            while W[ch, r, c, o] != 0:
                ch, r, c = random_coord()
            W[ch, r, c, o] = rng.randn()

    W = sharedX(W)

    return Conv2D(filters=W, input_axes=input_space.axes,
                  output_axes=output_space.axes, kernel_stride=kernel_stride,
                  pad=pad, message=message, partial_sum=partial_sum)
Exemplo n.º 32
0
    def __init__(self, nvis, nhid, coeff):
        self.nvis = nvis
        self.nhid = nhid
        self.coeff = float(coeff)
        self.rng = make_np_rng(None, [1, 2, 3], which_method="randn")

        self.redo_everything()
Exemplo n.º 33
0
 def _create_subset_iterator(self, mode, batch_size=None, num_batches=None,
                             rng=None):
     subset_iterator = resolve_iterator_class(mode)
     if rng is None and subset_iterator.stochastic:
         rng = make_np_rng()
     return subset_iterator(self.get_num_examples(), batch_size,
                            num_batches, rng)
    def __init__(self, nvis, nhid,
            init_lambda,
            init_p, init_alpha, learning_rate):
        """
        .. todo::

            WRITEME
        """
        self.nvis = int(nvis)
        self.nhid = int(nhid)
        self.init_lambda = float(init_lambda)
        self.init_p = float(init_p)
        self.init_alpha = N.cast[config.floatX](init_alpha)
        self.tol = 1e-6
        self.time_constant = 1e-2
        self.learning_rate = N.cast[config.floatX](learning_rate)

        self.predictor_learning_rate = self.learning_rate

        self.rng = make_np_rng(None, [1,2,3], which_method="randn")

        self.error_record = []
        self.ERROR_RECORD_MODE_MONITORING = 0
        self.error_record_mode = self.ERROR_RECORD_MODE_MONITORING

        self.instrumented = False

        self.redo_everything()
Exemplo n.º 35
0
    def __init__(self,
                 data=None,
                 data_specs=None,
                 rng=_default_seed,
                 preprocessor=None,
                 fit_preprocessor=False):
        # data_specs should be flat, and there should be no
        # duplicates in source, as we keep only one version
        assert is_flat_specs(data_specs)
        if isinstance(data_specs[1], tuple):
            assert sorted(set(data_specs[1])) == sorted(data_specs[1])
        space, source = data_specs
        space.np_validate(data)
        assert len(set(elem.shape[0] for elem in list(data))) <= 1
        self.data = data
        self.data_specs = data_specs
        self.num_examples = list(data)[0].shape[0]

        self.compress = False
        self.design_loc = None
        self.rng = make_np_rng(rng, which_method='random_integers')
        # Defaults for iterators
        self._iter_mode = resolve_iterator_class('sequential')

        if preprocessor:
            preprocessor.apply(self, can_fit=fit_preprocessor)
        self.preprocessor = preprocessor
Exemplo n.º 36
0
def make_random_conv2D(irange, input_channels, input_axes, output_axes,
        output_channels,
        kernel_shape,
        kernel_stride = (1,1), pad=0, message = "", rng = None,
        partial_sum = None, sparse_init = None):
    """
    .. todo::

        WRITEME properly

    Creates a Conv2D with random kernels.
    Should be functionally equivalent to
    pylearn2.linear.conv2d.make_random_conv2D
    """

    rng = make_np_rng(rng, default_seed, which_method='uniform')

    W = sharedX( rng.uniform(-irange,irange,(input_channels, \
            kernel_shape[0], kernel_shape[1], output_channels)))

    return Conv2D(filters = W,
        input_axes = input_axes,
        output_axes = output_axes,
        kernel_stride = kernel_stride, pad=pad,
        message = message, partial_sum=partial_sum)
Exemplo n.º 37
0
def make_sparse_random_local(num_nonzero, input_space, output_space,
        kernel_shape, batch_size, \
        kernel_stride = (1,1), border_mode = 'valid', message = "", rng=None):
    """
    .. todo::

        WRITEME
    """
    raise NotImplementedError("Not yet modified after copy-paste from "
            "pylearn2.linear.conv2d_c01b")
    """ Creates a Conv2D with random kernels, where the randomly initialized
    values are sparse"""

    rng = make_np_rng(rng, default_sparse_seed, which_method=['randn','randint'])

    W = np.zeros(( output_space.num_channels, input_space.num_channels, \
            kernel_shape[0], kernel_shape[1]))

    def random_coord():
        return [ rng.randint(dim) for dim in W.shape ]

    for i in xrange(num_nonzero):
        o, ch, r, c = random_coord()
        while W[o, ch, r, c] != 0:
            o, ch, r, c = random_coord()
        W[o, ch, r, c] = rng.randn()


    W = sharedX( W)
def make_random_conv3D(irange, input_axes, output_axes,
        signal_shape,
        filter_shape,
        kernel_stride = (1,1), pad=0, message = "", rng = None,
        partial_sum = None):
    

    if rng is None:
        rng = make_np_rng(rng, default_seed, which_method='uniform')

    _filter_5d_shape = (
                filter_shape[0],
                filter_shape[1],
                filter_shape[2],
                filter_shape[3],filter_shape[4])

    # initialize weights
    W = sharedX(rng.uniform(-irange,irange,(_filter_5d_shape)))

    return Conv3DBCT01(filters = W,
                       input_axes = input_axes,
                       output_axes = output_axes,
                       signal_shape = signal_shape,
                       filter_shape = filter_shape,
                       kernel_stride = kernel_stride, pad=pad,
                       message = message, partial_sum=partial_sum)
Exemplo n.º 39
0
def make_random_conv2D(irange, input_space, output_space,
                       kernel_shape, batch_size=None, \
                       subsample = (1,1), border_mode = 'valid',
                       message = "", rng = None):
    """
    .. todo::

        WRITEME properly

    Creates a Conv2D with random kernels
    """

    rng = make_np_rng(rng, default_seed, which_method='uniform')

    W = sharedX(rng.uniform(
        -irange, irange,
        (output_space.num_channels, input_space.num_channels,
         kernel_shape[0], kernel_shape[1])
    ))

    return Conv2D(
        filters=W,
        batch_size=batch_size,
        input_space=input_space,
        output_axes=output_space.axes,
        subsample=subsample, border_mode=border_mode,
        filters_shape=W.get_value(borrow=True).shape, message=message
    )
Exemplo n.º 40
0
def build_stacked_ae(nvis,
                     nhids,
                     act_enc,
                     act_dec,
                     tied_weights=False,
                     irange=1e-3,
                     rng=None,
                     corruptor=None,
                     contracting=False):
    """
    .. todo::

        WRITEME properly

    Allocate a stack of autoencoders.
    """
    rng = make_np_rng(rng, which_method='randn')
    layers = []
    final = {}
    # "Broadcast" arguments if they are singular, or accept sequences if
    # they are the same length as nhids
    for c in [
            'corruptor', 'contracting', 'act_enc', 'act_dec', 'tied_weights',
            'irange'
    ]:
        if type(locals()[c]) is not str and hasattr(locals()[c], '__len__'):
            assert len(nhids) == len(locals()[c])
            final[c] = locals()[c]
        else:
            final[c] = [locals()[c]] * len(nhids)
    # The number of visible units in each layer is the initial input
    # size and the first k-1 hidden unit sizes.
    nviss = [nvis] + nhids[:-1]
    seq = izip(
        nhids,
        nviss,
        final['act_enc'],
        final['act_dec'],
        final['corruptor'],
        final['contracting'],
        final['tied_weights'],
        final['irange'],
    )
    # Create each layer.
    for (nhid, nvis, act_enc, act_dec, corr, cae, tied, ir) in seq:
        args = (nvis, nhid, act_enc, act_dec, tied, ir, rng)
        if cae and corr is not None:
            raise ValueError("Can't specify denoising and contracting "
                             "objectives simultaneously")
        elif cae:
            autoenc = ContractiveAutoencoder(*args)
        elif corr is not None:
            autoenc = DenoisingAutoencoder(corr, *args)
        else:
            autoenc = Autoencoder(*args)
        layers.append(autoenc)

    # Create the stack
    return StackedBlocks(layers)
Exemplo n.º 41
0
def spatiotemporal_cubes(file_tuples, shape, n_patches=numpy.inf, rng=None):
    """
    Generator function that yields a stream of (filename, slicetuple)
    representing a spatiotemporal patch of that file.

    Parameters
    ----------
    file_tuples : list of tuples
        Each element should be a 2-tuple consisting of a filename
        (or arbitrary identifier) and a (length, height, width)
        shape tuple of the dimensions (number of frames in the video,
        height and width of each frame).

    shape : tuple
        A shape tuple consisting of the desired (length, height, width)
        of each spatiotemporal patch.

    n_patches : int, optional
        The number of patches to generate. By default, generates patches
        infinitely.

    rng : RandomState object or seed, optional
        The random number generator (or seed) to use. Defaults to None,
        meaning it will be seeded from /dev/urandom or the clock.

    Returns
    -------
    generator : generator object
        A generator that yields a stream of (filename, slicetuple) tuples.
        The slice tuple is such that it indexes into a 3D array containing
        the entire clip with frames indexed along the first axis, rows
        along the second and columns along the third.
    """
    frame_lookup = FrameLookup([(a, b[0]) for a, b in file_tuples])
    file_lookup = OrderedDict(file_tuples)
    patch_length, patch_height, patch_width = shape
    done = 0
    rng = make_np_rng(rng, which_method="random_integers")
    while done < n_patches:
        frame = rng.random_integers(0, len(frame_lookup) - 1)
        filename, file_length, frame_no = frame_lookup[frame]
        # Check that there is a contiguous block of frames starting at
        # frame_no that is at least as long as our desired cube length.
        if file_length - frame_no < patch_length:
            continue
        _, video_height, video_width = file_lookup[filename][:3]
        # The last row and column in which a patch could "start" to still
        # fall within frame.
        last_row = video_height - patch_height
        last_col = video_width - patch_width
        row = rng.random_integers(0, last_row)
        col = rng.random_integers(0, last_col)
        patch_slice = (
            slice(frame_no, frame_no + patch_length),
            slice(row, row + patch_height),
            slice(col, col + patch_width),
        )
        done += 1
        yield filename, patch_slice
Exemplo n.º 42
0
    def __init__(
        self,
        learning_rate,
        cost=None,
        batch_size=None,
        monitoring_batches=None,
        monitoring_dataset=None,
        monitor_iteration_mode="sequential",
        termination_criterion=None,
        update_callbacks=None,
        learning_rule=None,
        init_momentum=None,
        set_batch_size=False,
        train_iteration_mode=None,
        batches_per_iter=None,
        theano_function_mode=None,
        monitoring_costs=None,
        seed=[2012, 10, 5],
    ):

        if isinstance(cost, (list, tuple, set)):
            raise TypeError(
                "SGD no longer supports using collections of "
                + "Costs to represent a sum of Costs. Use "
                + "pylearn2.costs.cost.SumOfCosts instead."
            )

        if init_momentum:
            warnings.warn(
                "init_momentum interface is deprecated and will "
                "become officially unsuported as of May 9, 2014. Please use the "
                "`learning_rule` parameter instead, providing an object of type "
                "`pylearn2.training_algorithms.learning_rule.Momentum` instead"
            )
            # Convert to new interface under the hood.
            self.learning_rule = Momentum(init_momentum)
        else:
            self.learning_rule = learning_rule

        self.learning_rate = sharedX(learning_rate, "learning_rate")
        self.cost = cost
        self.batch_size = batch_size
        self.set_batch_size = set_batch_size
        self.batches_per_iter = batches_per_iter
        self._set_monitoring_dataset(monitoring_dataset)
        self.monitoring_batches = monitoring_batches
        self.monitor_iteration_mode = monitor_iteration_mode
        if monitoring_dataset is None:
            if monitoring_batches is not None:
                raise ValueError("Specified an amount of monitoring batches " + "but not a monitoring dataset.")
        self.termination_criterion = termination_criterion
        self._register_update_callbacks(update_callbacks)
        if train_iteration_mode is None:
            train_iteration_mode = "shuffled_sequential"
        self.train_iteration_mode = train_iteration_mode
        self.first = True
        self.rng = make_np_rng(seed, which_method=["randn", "randint"])
        self.theano_function_mode = theano_function_mode
        self.monitoring_costs = monitoring_costs
Exemplo n.º 43
0
    def __call__(self):

        print('loading model')
        model_path = self.model_path
        self.model = serial.load(model_path)
        self.model.set_dtype('float32')
        input_space = self.model.get_input_space()
        #This code all assumes the model works on vectors
        assert isinstance(input_space, VectorSpace)
        nvis = input_space.dim
        self.size = int(np.sqrt(nvis/3))

        rng = make_np_rng(None, [1,2,3], which_method="randint")

        #Generate the random pooling structure
        num_filters = self.model.mu.get_value().shape[0]

        idxs = rng.randint(0,num_filters,(self.num_output_features,))
        top = idxs.copy()
        bottom = idxs.copy()
        left = idxs.copy()
        right = idxs.copy()
        for i in xrange(self.num_output_features):
            top[i] = rng.randint(num_superpixels)
            bottom[i] = rng.randint(top[i],num_superpixels)
            left[i] = rng.randint(num_superpixels)
            right[i] = rng.randint(left[i],num_superpixels)

        #output[i] will pool over detector feature map i
        self.idxs = idxs
        #output [i] will pool over a rectangle with upper left coordinates (top[i],left[i])
        #and lower right coordinates (bottom[i],right[i])
        self.top = top
        self.bottom = bottom
        self.left = left
        self.right = right



        #Run the experiment
        if self.chunk_size is not None:
            dataset_family = self.dataset_family
            which_set = self.which_set
            dataset_descriptor = self.dataset_family[which_set][size]

            num_examples = dataset_descriptor.num_examples
            assert num_examples % self.chunk_size == 0

            self.chunk_id = 0
            for i in xrange(0,num_examples, self.chunk_size):
                self.restrict = (i, i + self.chunk_size)

                self._execute()

                self.chunk_id += 1
        else:
            self._execute()
Exemplo n.º 44
0
    def reset_RNG(self):
        """
        Restore the default seed of the rng used for choosing random
        examples.
        """

        if "default_rng" not in dir(self):
            self.default_rng = make_np_rng(None, [17, 2, 946], which_method="random_integers")
        self.rng = copy.copy(self.default_rng)
Exemplo n.º 45
0
def spatiotemporal_cubes(file_tuples, shape, n_patches=numpy.inf, rng=None):
    """
    Generator function that yields a stream of (filename, slicetuple)
    representing a spatiotemporal patch of that file.

    Parameters
    ----------
    file_tuples : list of tuples
        Each element should be a 2-tuple consisting of a filename
        (or arbitrary identifier) and a (length, height, width)
        shape tuple of the dimensions (number of frames in the video,
        height and width of each frame).

    shape : tuple
        A shape tuple consisting of the desired (length, height, width)
        of each spatiotemporal patch.

    n_patches : int, optional
        The number of patches to generate. By default, generates patches
        infinitely.

    rng : RandomState object or seed, optional
        The random number generator (or seed) to use. Defaults to None,
        meaning it will be seeded from /dev/urandom or the clock.

    Returns
    -------
    generator : generator object
        A generator that yields a stream of (filename, slicetuple) tuples.
        The slice tuple is such that it indexes into a 3D array containing
        the entire clip with frames indexed along the first axis, rows
        along the second and columns along the third.
    """
    frame_lookup = FrameLookup([(a, b[0]) for a, b in file_tuples])
    file_lookup = OrderedDict(file_tuples)
    patch_length, patch_height, patch_width = shape
    done = 0
    rng = make_np_rng(rng, which_method="random_integers")
    while done < n_patches:
        frame = rng.random_integers(0, len(frame_lookup) - 1)
        filename, file_length, frame_no = frame_lookup[frame]
        # Check that there is a contiguous block of frames starting at
        # frame_no that is at least as long as our desired cube length.
        if file_length - frame_no < patch_length:
            continue
        _, video_height, video_width = file_lookup[filename][:3]
        # The last row and column in which a patch could "start" to still
        # fall within frame.
        last_row = video_height - patch_height
        last_col = video_width - patch_width
        row = numpy.random.random_integers(0, last_row)
        col = numpy.random.random_integers(0, last_col)
        patch_slice = (slice(frame_no, frame_no + patch_length),
                       slice(row, row + patch_height),
                       slice(col, col + patch_width))
        done += 1
        yield filename, patch_slice
Exemplo n.º 46
0
    def __init__(self, nvis, nhid, act_enc, act_dec, tied_weights=False, irange=1e-3, rng=9001):
        super(Autoencoder, self).__init__()
        assert nvis > 0, "Number of visible units must be non-negative"
        assert nhid > 0, "Number of hidden units must be positive"

        self.input_space = VectorSpace(nvis)
        self.output_space = VectorSpace(nhid)

        # Save a few parameters needed for resizing
        self.nhid = nhid
        self.irange = irange
        self.tied_weights = tied_weights
        self.rng = make_np_rng(rng, which_method="randn")
        self._initialize_hidbias()
        if nvis > 0:
            self._initialize_visbias(nvis)
            self._initialize_weights(nvis)
        else:
            self.visbias = None
            self.weights = None

        seed = int(self.rng.randint(2 ** 30))

        # why a theano rng? should we remove it?
        self.s_rng = make_theano_rng(seed, which_method="uniform")

        if tied_weights and self.weights is not None:
            self.w_prime = self.weights.T
        else:
            self._initialize_w_prime(nvis)

        def _resolve_callable(conf, conf_attr):
            """
            .. todo::

                WRITEME
            """
            if conf[conf_attr] is None or conf[conf_attr] == "linear":
                return None
            # If it's a callable, use it directly.
            if hasattr(conf[conf_attr], "__call__"):
                return conf[conf_attr]
            elif conf[conf_attr] in globals() and hasattr(globals()[conf[conf_attr]], "__call__"):
                return globals()[conf[conf_attr]]
            elif hasattr(tensor.nnet, conf[conf_attr]):
                return getattr(tensor.nnet, conf[conf_attr])
            elif hasattr(tensor, conf[conf_attr]):
                return getattr(tensor, conf[conf_attr])
            else:
                raise ValueError("Couldn't interpret %s value: '%s'" % (conf_attr, conf[conf_attr]))

        self.act_enc = _resolve_callable(locals(), "act_enc")
        self.act_dec = _resolve_callable(locals(), "act_dec")
        self._params = [self.visbias, self.hidbias, self.weights]
        if not self.tied_weights:
            self._params.append(self.w_prime)
Exemplo n.º 47
0
    def __init__(self,
                 learning_rate,
                 cost=None,
                 batch_size=None,
                 monitoring_batches=None,
                 monitoring_dataset=None,
                 monitor_iteration_mode='sequential',
                 termination_criterion=None,
                 update_callbacks=None,
                 learning_rule=None,
                 init_momentum=None,
                 set_batch_size=False,
                 train_iteration_mode=None,
                 batches_per_iter=None,
                 theano_function_mode=None,
                 monitoring_costs=None,
                 seed=[2012, 10, 5]):

        if isinstance(cost, (list, tuple, set)):
            raise TypeError("SGD no longer supports using collections of " +
                            "Costs to represent a sum of Costs. Use " +
                            "pylearn2.costs.cost.SumOfCosts instead.")

        if init_momentum:
            warnings.warn(
                "init_momentum interface is deprecated and will "
                "become officially unsuported as of May 9, 2014. Please use the "
                "`learning_rule` parameter instead, providing an object of type "
                "`pylearn2.training_algorithms.learning_rule.Momentum` instead"
            )
            # Convert to new interface under the hood.
            self.learning_rule = Momentum(init_momentum)
        else:
            self.learning_rule = learning_rule

        self.learning_rate = sharedX(learning_rate, 'learning_rate')
        self.cost = cost
        self.batch_size = batch_size
        self.set_batch_size = set_batch_size
        self.batches_per_iter = batches_per_iter
        self._set_monitoring_dataset(monitoring_dataset)
        self.monitoring_batches = monitoring_batches
        self.monitor_iteration_mode = monitor_iteration_mode
        if monitoring_dataset is None:
            if monitoring_batches is not None:
                raise ValueError("Specified an amount of monitoring batches " +
                                 "but not a monitoring dataset.")
        self.termination_criterion = termination_criterion
        self._register_update_callbacks(update_callbacks)
        if train_iteration_mode is None:
            train_iteration_mode = 'shuffled_sequential'
        self.train_iteration_mode = train_iteration_mode
        self.first = True
        self.rng = make_np_rng(seed, which_method=["randn", "randint"])
        self.theano_function_mode = theano_function_mode
        self.monitoring_costs = monitoring_costs
Exemplo n.º 48
0
    def __init__(self, num_examples, rng=(2013, 5, 17)):
        """
        .. todo::

            WRITEME
        """
        rng = make_np_rng(rng, self._default_seed, which_method='uniform')
        X = rng.uniform(-1, 1, size=(num_examples, 2))
        y = _four_regions_labels(X)
        super(FourRegions, self).__init__(X=X, y=y, y_labels=4)
Exemplo n.º 49
0
    def __init__(self, num_examples, rng=(2013, 5, 17)):
        """
        .. todo::

            WRITEME
        """
        rng = make_np_rng(rng, self._default_seed, which_method='uniform')
        X = rng.uniform(-1, 1, size=(num_examples, 2))
        y = _four_regions_labels(X)
        super(FourRegions, self).__init__(X=X, y=y, y_labels=4)
Exemplo n.º 50
0
def test_np_rng():
    """
        Tests that the four possible ways of creating
        a numpy RNG give the same results with the same seed
    """

    rngs = [make_np_rng(rng_or_seed=42, which_method='uniform'),
            make_np_rng(rng_or_seed=numpy.random.RandomState(42),
                        which_method='uniform'),
            make_np_rng(default_seed=42),
            make_np_rng()]

    random_numbers = rngs[0].uniform(size=(100,))
    equals = numpy.ones((100,))
    for rng in rngs[1:]:
        equal = random_numbers == rng.uniform(size=(100,))
        equals *= equal

    assert equals.all()
Exemplo n.º 51
0
    def __init__(self, min_x=-6.28, max_x=6.28, std=.05, rng=None):
        """
        .. todo::

            WRITEME
        """
        self.min_x, self.max_x, self.std = min_x, max_x, std
        rng = make_np_rng(rng, [17, 2, 946], which_method=['uniform', 'randn'])
        self.default_rng = copy.copy(rng)
        self.rng = rng
Exemplo n.º 52
0
    def __init__(self, min_x=-6.28, max_x=6.28, std=.05, rng=None):
        """
        .. todo::

            WRITEME
        """
        self.min_x, self.max_x, self.std = min_x, max_x, std
        rng = make_np_rng(rng, [17, 2, 946], which_method=['uniform', 'randn'])
        self.default_rng = copy.copy(rng)
        self.rng = rng
Exemplo n.º 53
0
    def reset_RNG(self):
        """
        Restore the default seed of the rng used for choosing random
        examples.
        """

        if 'default_rng' not in dir(self):
            self.default_rng = make_np_rng(None, [17, 2, 946],
                    which_method="random_integers")
        self.rng = copy.copy(self.default_rng)
Exemplo n.º 54
0
    def __call__(self):

        print 'loading model'
        model_path = self.model_path
        self.model = serial.load(model_path)
        self.model.set_dtype('float32')
        input_space = self.model.get_input_space()
        #This code all assumes the model works on vectors
        assert isinstance(input_space, VectorSpace)
        nvis = input_space.dim
        self.size = int(np.sqrt(nvis / 3))

        rng = make_np_rng(None, [1, 2, 3], which_method="randint")

        #Generate the random pooling structure
        num_filters = self.model.mu.get_value().shape[0]

        idxs = rng.randint(0, num_filters, (self.num_output_features, ))
        top = idxs.copy()
        bottom = idxs.copy()
        left = idxs.copy()
        right = idxs.copy()
        for i in xrange(self.num_output_features):
            top[i] = rng.randint(num_superpixels)
            bottom[i] = rng.randint(top[i], num_superpixels)
            left[i] = rng.randint(num_superpixels)
            right[i] = rng.randint(left[i], num_superpixels)

        #output[i] will pool over detector feature map i
        self.idxs = idxs
        #output [i] will pool over a rectangle with upper left coordinates (top[i],left[i])
        #and lower right coordinates (bottom[i],right[i])
        self.top = top
        self.bottom = bottom
        self.left = left
        self.right = right

        #Run the experiment
        if self.chunk_size is not None:
            dataset_family = self.dataset_family
            which_set = self.which_set
            dataset_descriptor = self.dataset_family[which_set][size]

            num_examples = dataset_descriptor.num_examples
            assert num_examples % self.chunk_size == 0

            self.chunk_id = 0
            for i in xrange(0, num_examples, self.chunk_size):
                self.restrict = (i, i + self.chunk_size)

                self._execute()

                self.chunk_id += 1
        else:
            self._execute()
Exemplo n.º 55
0
 def __init__(self, dataset_size, batch_size, num_batches, rng=None):
     super(ShuffledSequentialSubsetIterator, self).__init__(
         dataset_size,
         batch_size,
         num_batches,
         None
     )
     self._rng = make_np_rng(rng, which_method=["random_integers",
                                                "shuffle"])
     self._shuffled = np.arange(self._dataset_size)
     self._rng.shuffle(self._shuffled)
Exemplo n.º 56
0
def test_np_rng():
    """
        Tests that the four possible ways of creating
        a numpy RNG give the same results with the same seed
    """

    rngs = [
        make_np_rng(rng_or_seed=42, which_method='uniform'),
        make_np_rng(rng_or_seed=numpy.random.RandomState(42),
                    which_method='uniform'),
        make_np_rng(default_seed=42),
        make_np_rng()
    ]

    random_numbers = rngs[0].uniform(size=(100, ))
    equals = numpy.ones((100, ))
    for rng in rngs[1:]:
        equal = random_numbers == rng.uniform(size=(100, ))
        equals *= equal

    assert equals.all()
Exemplo n.º 57
0
def shuffle(X, Y):
    shuffle_rng = make_np_rng(None, [1, 2, 3], which_method="shuffle")
    for i in xrange(X.shape[0]):
        j = shuffle_rng.randint(len(X))
        # Copy ensures that memory is not aliased.
        tmp = X[i, :, :, :].copy()
        X[i, :, :, :] = X[j, :, :, :]
        X[j, :, :, :] = tmp

        tmp = Y[i:i + 1].copy()
        Y[i] = Y[j]
        Y[j] = tmp
Exemplo n.º 58
0
    def __init__(self,
                 nvis,
                 prior,
                 conditional,
                 posterior,
                 nhid,
                 learn_prior=True,
                 kl_integrator=None,
                 batch_size=None,
                 seed=None):
        super(VAE, self).__init__()

        self.__dict__.update(locals())
        del self.self

        self.rng = make_np_rng(self.seed, default_seed,
                               ['uniform', 'randint', 'randn'])

        self.prior.set_vae(self)
        self.conditional.set_vae(self)
        self.posterior.set_vae(self)

        self.learn_prior = learn_prior

        # Space initialization
        self.input_space = VectorSpace(dim=self.nvis)
        self.input_source = 'features'
        self.latent_space = VectorSpace(dim=self.nhid)

        # Parameter initialization
        self.prior.initialize_parameters(nhid=self.nhid)
        self.conditional.initialize_parameters(input_space=self.latent_space,
                                               ndim=self.nvis)
        self.posterior.initialize_parameters(input_space=self.input_space,
                                             ndim=self.nhid)
        self._params = (self.get_posterior_params() +
                        self.get_conditional_params())
        if self.learn_prior:
            self._params += self.get_prior_params()

        names = []
        for param in self._params:
            if param.name not in names:
                names.append(param.name)
            else:
                raise Exception(
                    "no two parameters must share the same name: " +
                    param.name)

        # Look for the right KLIntegrator if it's not specified
        if self.kl_integrator is None:
            self.kl_integrator = find_integrator_for(self.prior,
                                                     self.posterior)