# Set the random seed value
np.random.seed(42)

# Choose data and model size
num_patterns = 100
data_dim = 200

# Get dataset
train_data = np.float64(
    np.random.random_integers(0, 1, (num_patterns, data_dim)))
train_label = np.float64(
    np.random.random_integers(0, 1, (num_patterns, data_dim)))

# Choose activation function
act = actFct.Sigmoid()

# Train model, this performs a single trial as shown in Figure 8 a) in the Hebbina descent paper
mymodel, results = train_Hebbian_descent_model(train_data=train_data,
                                               train_label=train_label,
                                               centered=True,
                                               act=actFct.Sigmoid(),
                                               epochs=1,
                                               epsilon=0.2,
                                               batch_size=1,
                                               weightdecay=0.0)

# Display errors
vis.xlabel("Pattern index (" + str(num_patterns) + " = latest pattern)")
vis.ylabel("Mean Absolute Error")
# Split in tarining and test data
train_data = data[0:50000]
test_data = data[50000:70000]

# Set hyperparameters batchsize and number of epochs
batch_size = 10
max_epochs = 20

# Create model with sigmoid hidden units, linear output units, and squared error.
ae = aeModel.AutoEncoder(
    v1 * v2,
    h1 * h2,
    data=train_data,
    visible_activation_function=act.Identity(),
    hidden_activation_function=act.Sigmoid(),
    cost_function=cost.SquaredError(),
    initial_weights=0.01,
    initial_visible_bias=0.0,
    initial_hidden_bias=-2.0,
    # Set initially the units to be inactive, speeds up learning a little bit
    initial_visible_offsets=0.0,
    initial_hidden_offsets=0.02,
    dtype=numx.float64)

# Initialized gradient descent trainer
trainer = aeTrainer.GDTrainer(ae)

# Train model
print 'Training'
print 'Epoch\tRE train\t\tRE test\t\t\tSparsness train\t\tSparsness test '
Beispiel #3
0
h1 = 10
h2 = 10

# Load data , get it from 'deeplearning.net/data/mnist/mnist.pkl.gz'
train_data, _, _, _, test_data, _ = io.load_mnist("../../data/mnist.pkl.gz",
                                                  False)

# Set hyperparameters batchsize and number of epochs
batch_size = 10
max_epochs = 10

# Create model with sigmoid hidden units, linear output units, and squared error loss.
ae = aeModel.AutoEncoder(v1 * v2,
                         h1 * h2,
                         data=train_data,
                         visible_activation_function=act.Sigmoid(),
                         hidden_activation_function=act.Sigmoid(),
                         cost_function=cost.CrossEntropyError(),
                         initial_weights='AUTO',
                         initial_visible_bias='AUTO',
                         initial_hidden_bias='AUTO',
                         initial_visible_offsets='AUTO',
                         initial_hidden_offsets='AUTO',
                         dtype=numx.float64)

# Initialized gradient descent trainer
trainer = aeTrainer.GDTrainer(ae)

# Train model
print 'Training'
print 'Epoch\tRE train\t\tRE test\t\t\tSparsness train\t\tSparsness test '