def test_pylearn2_trainin(): # Construct the model mlp = MLP(activations=[Sigmoid(), Sigmoid()], dims=[784, 100, 784], weights_init=IsotropicGaussian(), biases_init=Constant(0.01)) mlp.initialize() cost = SquaredError() block_cost = BlocksCost(cost) block_model = BlocksModel(mlp, (VectorSpace(dim=784), 'features')) # Load the data rng = numpy.random.RandomState(14) train_dataset = random_dense_design_matrix(rng, 1024, 784, 10) valid_dataset = random_dense_design_matrix(rng, 1024, 784, 10) # Silence Pylearn2's logger logger = logging.getLogger(pylearn2.__name__) logger.setLevel(logging.ERROR) # Training algorithm sgd = SGD(learning_rate=0.01, cost=block_cost, batch_size=128, monitoring_dataset=valid_dataset) train = Train(train_dataset, block_model, algorithm=sgd) train.main_loop(time_budget=3)
def test_pylearn2_training(): # Construct the model mlp = MLP(activations=[Sigmoid(), Sigmoid()], dims=[784, 100, 784], weights_init=IsotropicGaussian(), biases_init=Constant(0.01)) mlp.initialize() cost = SquaredError() # Load the data rng = numpy.random.RandomState(14) train_dataset = random_dense_design_matrix(rng, 1024, 784, 10) valid_dataset = random_dense_design_matrix(rng, 1024, 784, 10) x = tensor.matrix('features') block_cost = Pylearn2Cost(cost.apply(x, mlp.apply(x))) block_model = Pylearn2Model(mlp) # Silence Pylearn2's logger logger = logging.getLogger(pylearn2.__name__) logger.setLevel(logging.ERROR) # Training algorithm sgd = SGD(learning_rate=0.01, cost=block_cost, batch_size=128, monitoring_dataset=valid_dataset) train = Pylearn2Train(train_dataset, block_model, algorithm=sgd) train.main_loop(time_budget=3)
def test_zca_dataset(): """ Tests the ZCA_Dataset class. """ # Preparation rng = np.random.RandomState([2014, 11, 4]) start = 0 stop = 990 num_examples = 1000 num_feat = 5 num_classes = 2 # random_dense_design_matrix has values that are centered and of # unit stdev, which is not useful to test the ZCA. # So, we replace its value by an uncentered uniform one. raw = random_dense_design_matrix(rng, num_examples, num_feat, num_classes) x = rng.uniform(low=-0.5, high=2.0, size=(num_examples, num_feat)) x = x.astype(np.float32) raw.X = x zca = ZCA(filter_bias=0.0) zca.apply(raw, can_fit=True) zca_dataset = ZCA_Dataset(raw, zca, start, stop) # Testing general behaviour mean = zca_dataset.X.mean(axis=0) var = zca_dataset.X.std(axis=0) assert_allclose(mean, np.zeros(num_feat), atol=1e-2) assert_allclose(var, np.ones(num_feat), atol=1e-2) # Testing mapback() y = zca_dataset.mapback(zca_dataset.X) assert_allclose(x[start:stop], y) # Testing mapback_for_viewer() y = zca_dataset.mapback_for_viewer(zca_dataset.X) z = x/np.abs(x).max(axis=0) assert_allclose(z[start:stop], y, rtol=1e-2) # Testing adjust_for_viewer() y = zca_dataset.adjust_for_viewer(x.T).T z = x/np.abs(x).max(axis=0) assert_allclose(z, y) # Testing adjust_to_be_viewed_with() y = zca_dataset.adjust_to_be_viewed_with(x, 2*x, True) z = zca_dataset.adjust_for_viewer(x) assert_allclose(z/2, y) y = zca_dataset.adjust_to_be_viewed_with(x, 2*x, False) z = x/np.abs(x).max() assert_allclose(z/2, y) # Testing has_targets() assert zca_dataset.has_targets()
def test_zca_dataset(): """ Test that a ZCA dataset can be constructed without crashing. No attempt to verify correctness of behavior. """ rng = np.random.RandomState([2014, 11, 4]) num_examples = 5 dim = 3 num_classes = 2 raw = random_dense_design_matrix(rng, num_examples, dim, num_classes) zca = ZCA() zca.apply(raw, can_fit=True) zca_dataset = ZCA_Dataset(raw, zca, start=1, stop=4)
def test_hdf5_convert_to_one_hot(): """Train using an HDF5 dataset with one-hot target conversion.""" skip_if_no_h5py() import h5py # save random data to HDF5 handle, filename = tempfile.mkstemp() dataset = random_dense_design_matrix(np.random.RandomState(1), num_examples=10, dim=5, num_classes=3) with h5py.File(filename, 'w') as f: f.create_dataset('X', data=dataset.get_design_matrix()) f.create_dataset('y', data=dataset.get_targets()) # instantiate Train object trainer = yaml_parse.load(convert_to_one_hot_yaml % {'filename': filename}) trainer.main_loop() # cleanup os.remove(filename)