print_flush("... getting the pre-training functions") pretraining_fns = stacked_autoencoder.pretraining_functions(training_set=training_set, batch_size=batch_size) if ENABLE_FINE_TUNING: print_flush("... getting the fine-tune function") if fine_tune_supervised: finetune_fn, validate_model = stacked_autoencoder.finetune_functions(training_set=training_set, training_labels=training_labels, test_set=test_set, test_labels=test_labels, batch_size=batch_size, learning_rate=fine_tune_learning_rate) else: finetune_fn, validate_model = stacked_autoencoder.finetune_functions_unsupervised(training_set=training_set, test_set=test_set, batch_size=batch_size, learning_rate=fine_tune_learning_rate) print_flush("... pre-training the model") start_time = clock() # Pre-train layer-wise for i in range(stacked_autoencoder.n_layers): # go through pretraining epochs learning_rate = pretraining_learning_rates[i] corruption_level = corruption_levels[i] for epoch in range(pretraining_epochs): # go through the training set c = [] for batch_index in xrange(n_train_batches): cur_cost = pretraining_fns[i](index=batch_index, corruption=corruption_level, lr=learning_rate)