Beispiel #1
0
training_epochs = 1000
batch_size = 10

# early-stopping parameters
patience = 20000  # look as this many examples regardless
patience_increase = 2  # wait this much longer if new best found
improvement_threshold = 0.995  # consider this improvement significant
pretrain_vis_freq = False
finetrain_vis_freq = False

if __name__ == '__main__':

    logger = utils.logs.get_logger(
        __name__, update_stream_level=utils.logs.logging.DEBUG)
    logger.info('Loading data ...')
    source = data.Load_Data(location=data.data_loc)

    datasets = source.all()
    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size

    # np random generator
    np_rng = np.random.RandomState(123)

    logger.info('Building the model ...')
Beispiel #2
0
training_epochs     = 1000
batch_size          = 10

# early-stopping parameters
patience                = 20000 # look as this many examples regardless
patience_increase       = 2     # wait this much longer if new best found
improvement_threshold   = 0.995 # consider this improvement significant
pretrain_vis_freq = 200
finetrain_vis_freq = 1

if __name__ == '__main__':
    
    logger = utils.logs.get_logger(__name__,
         update_stream_level=utils.logs.logging.DEBUG)
    logger.info('Loading data ...')
    source = data.Load_Data()
    
    datasets = source.mnist()
    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]
    
    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0]// batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]// batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size
    
    # np random generator
    np_rng = np.random.RandomState(123)
    
    logger.info('Building the model ...')
Beispiel #3
0
## early-stopping parameters
patience = 50000  # look as this many examples regardless
patience_increase = 2  # wait this much longer if new best is found
improvement_threshold = 0.995  # consider this improvement significant

# sample for plotting
freq = 1

if __name__ == "__main__":

    logger = utils.logs.get_logger(
        __name__, update_stream_level=utils.logs.logging.DEBUG)
    logger.info('Loading data ...')
    source = data.Load_Data(location=data.data_loc,
                            # search_pat='day1'
                            )

    datasets = source.all()
    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size

    logger.info('Building the model ...')

    # allocate symbolic variables for the data