Example #1
0
def train(job_id, border, n_hidden_layer, eta):
    print "Job ID: %d" % job_id
    metric_recorder = MetricRecorder(config_dir_path='.', job_id=job_id)
    C = {
        'X_dirpath' : '../../../data/train/*',
        'y_dirpath' : '../../../data/train_cleaned/',
        'mini_batch_size' : 500,
        'batchsize' : 500000,
        'limit' : 30,
        'epochs' : 100,
        'patience' : 20000,
        'patience_increase' : 2,
        'improvement_threshold' : 0.995,
        'validation_frequency' : 5000,
        'lmbda' : 0.0,
        'training_size' : None,
        'validation_size' : None,
        'algorithm' : 'RMSProp'
    }

    training_data = BatchProcessor(
        X_dirpath='../../../data/train/*',
        y_dirpath='../../../data/train_cleaned/',
        batchsize=C['batchsize'],
        border=border,
        limit=C['limit'],
        dtype=theano.config.floatX)

    validation_data = BatchProcessor(
        X_dirpath='../../../data/valid/*',
        y_dirpath='../../../data/train_cleaned/',
        batchsize=C['batchsize'],
        border=border,
        limit=C['limit'],
        dtype=theano.config.floatX)

    C['training_size'] = len(training_data)
    C['validation_size'] = len(validation_data)
    print "Training size: %d" % C['training_size']
    print "Validation size: %d" % C['validation_size']

    metric_recorder.add_experiment_metainfo(constants=C)
    metric_recorder.start()

    n_in = (2*border+1)**2
    net = Network([FullyConnectedLayer(n_in=n_in, n_out=n_hidden_layer),
                   FullyConnectedLayer(n_in=n_hidden_layer, n_out=1)],
                  C['mini_batch_size'])

    result = net.train(tdata=training_data, epochs=C['epochs'],
                     mbs=C['mini_batch_size'], eta=eta,
                     vdata=validation_data, lmbda=C['lmbda'],
                     momentum=None, patience_increase=C['patience_increase'],
                     improvement_threshold=C['improvement_threshold'],
                     validation_frequency=C['validation_frequency'],
                     metric_recorder=metric_recorder)

    print 'Time = %f' % metric_recorder.stop()
    print 'Result = %f' % result
    return float(result)
Example #2
0
import numpy as np
import PIL.Image
import cPickle
import theano
from timeit import default_timer as timer

lib_path = os.path.abspath(os.path.join('..', '..', 'src'))
sys.path.append(lib_path)

from network import Network, FullyConnectedLayer, AutoencoderLayer, ReLU
from preprocessor import BatchProcessor
from metric import MetricRecorder

rnd = np.random.RandomState()

mr = MetricRecorder(config_dir_path='./sae.json')
C = {
    'X_dirpath' : '../../../data/onetext_train/*',
    'X_valid_dirpath' : '../../../data/onetext_valid/*',
    'X_pretrain_dirpath' : '../../../data/onetext_pretrain/*',
    'y_dirpath' : '../../../data/train_cleaned/',
    'batchsize' : 1000000,
    'limit' : None,
    'epochs' : 15,
    'patience' : 70000,
    'patience_increase' : 2,
    'improvement_threshold' : 0.995,
    'validation_frequency' : 1,
    'lmbda' : 0.0,
    'dropout' : 0.001,
    'training_size' : None,
Example #3
0
def train(job_id, mbs):
    #print "Job ID: %d" % job_id
    eta = 0.01 # 1-7 0.01
    border = 2
    n_hidden_layer = 80
    metric_recorder = MetricRecorder(config_dir_path='.', job_id=job_id)
    C = {
        'X_dirpath' : '../../../data/train_all/*',
        'y_dirpath' : '../../../data/train_cleaned/',
        'batchsize' : 500000,
        'limit' : 20,
        'epochs' : 4,
        'patience' : 70000,
        'patience_increase' : 2,
        'improvement_threshold' : 0.995,
        'validation_frequency' : 20,
        'lmbda' : 0.0,
        'training_size' : None,
        'validation_size' : None,
        'algorithm' : 'RMSProp',
        'mini_batch_size': mbs
    }

    training_data = BatchProcessor(
        X_dirpath='../../../data/train_all/*',
        y_dirpath='../../../data/train_cleaned/',
        batchsize=C['batchsize'],
        border=border,
        limit=C['limit'],
        random=True,
        random_mode='fully',
        dtype=theano.config.floatX,
        rnd=rnd)

    validation_data = BatchProcessor(
        X_dirpath='../../../data/train/*',
        y_dirpath='../../../data/train_cleaned/',
        batchsize=C['batchsize'],
        random=False,
        border=border,
        limit=C['limit'],
        dtype=theano.config.floatX,
        rnd=rnd)

    C['training_size'] = training_data.size()
    C['validation_size'] = validation_data.size()
    print "Training size: %d" % C['training_size']
    print "Validation size: %d" % C['validation_size']

    metric_recorder.add_experiment_metainfo(constants=C)
    metric_recorder.start()

    n_in = (2*border+1)**2
    net = Network([FullyConnectedLayer(n_in=n_in, n_out=n_hidden_layer,
                    rnd=rnd),
                   FullyConnectedLayer(n_in=n_hidden_layer, n_out=1,
                    rnd=rnd)],
                  C['mini_batch_size'])

    result = net.train(tdata=training_data, epochs=C['epochs'],
                     mbs=C['mini_batch_size'], eta=eta,
                     vdata=validation_data, lmbda=C['lmbda'],
                     momentum=None, patience_increase=C['patience_increase'],
                     improvement_threshold=C['improvement_threshold'],
                     validation_frequency=C['validation_frequency'],
                     metric_recorder=metric_recorder,
                     save_dir='./model/%d_' % metric_recorder.job_id,
                     early_stoping=False)

    print 'Time = %f' % metric_recorder.stop()
    print 'Result = %f' % result
    return float(result)
Example #4
0
def train(job_id, params):
    print "Job ID: %d" % job_id
    border = 2
    n_hidden_layer = params['hidden']
    metric_recorder = MetricRecorder(config_dir_path='./config.json',
                                     job_id=job_id)
    C = {
        'X_dirpath' : '../../../data/onetext_train_small/*',
        'X_valid_dirpath' : '../../../data/onetext_valid_small/*',
        'y_dirpath' : '../../../data/train_cleaned/',
        'batchsize' : 2000000,
        'limit' : None,
        'epochs' : 15,
        'patience' : 70000,
        'patience_increase' : 2,
        'improvement_threshold' : 0.995,
        'validation_frequency' : 2,
        'lmbda' : 0.0,
        'dropout' : 0.0,
        'training_size' : None,
        'validation_size' : None,
        'algorithm' : 'RMSProp',
        'eta' : float(params['eta'][0]),
        'eta_min': float(params['eta_min'][0]),
        'eta_pre' : float(params['eta_pre'][0]),
        'corruption_level' : float(params['corruption_level'][0]),
        'border' : 2,
        'hidden' : int(params['hidden'][0]),
        'mini_batch_size': 500
    }

    training_data = BatchProcessor(
        X_dirpath=C['X_dirpath'],
        y_dirpath=C['y_dirpath'],
        batchsize=C['batchsize'],
        border=C['border'],
        limit=C['limit'],
        random=True,
        random_mode='fully',
        dtype=theano.config.floatX,
        rnd=rnd)

    validation_data = BatchProcessor(
        X_dirpath=C['X_valid_dirpath'],
        y_dirpath=C['y_dirpath'],
        batchsize=C['batchsize'],
        border=C['border'],
        limit=C['limit'],
        random=False,
        dtype=theano.config.floatX,
        rnd=rnd)

    pretrain_data = BatchProcessor(
        X_dirpath='../../../data/onetext_pretrain_small/*',
        y_dirpath='../../../data/train_cleaned/',
        batchsize=50000, border=border, limit=None,
        random=True, random_mode='fully', rnd=rnd,
        dtype=theano.config.floatX)

    C['training_size'] = training_data.size()
    C['validation_size'] = validation_data.size()
    print "Training size: %d" % C['training_size']
    print "Validation size: %d" % C['validation_size']

    metric_recorder.add_experiment_metainfo(constants=C)
    metric_recorder.start()

    n_in = (2*border+1)**2
    net = Network([
        AutoencoderLayer(n_in=n_in, n_hidden=C['hidden'], rnd=rnd,
          corruption_level=C['corruption_level']),
        FullyConnectedLayer(n_in=C['hidden'], n_out=1, rnd=rnd)],
        C['mini_batch_size'])

    print '...start pretraining'
    net.pretrain_autoencoders(training_data=pretrain_data,
        mbs=C['mini_batch_size'], eta=C['eta_pre'], epochs=15, metric_recorder=metric_recorder)

    result = net.train(tdata=training_data, epochs=C['epochs'],
                     mbs=C['mini_batch_size'], eta=C['eta'],
                     eta_min=C['eta_min'],
                     vdata=validation_data, lmbda=C['lmbda'],
                     momentum=None,
                     patience_increase=C['patience_increase'],
                     improvement_threshold=C['improvement_threshold'],
                     validation_frequency=C['validation_frequency'],
                     metric_recorder=metric_recorder,
                     save_dir='./models/%d_' % metric_recorder.job_id,
                     early_stoping=False)

    print 'Time = %f' % metric_recorder.stop()
    print 'Result = %f' % result
    return float(result)
Example #5
0
def train(job_id, params):
    print "Job ID: %d" % job_id
    eta = params["eta"]
    border = 2
    n_hidden_layer = params["hidden"]
    metric_recorder = MetricRecorder(config_dir_path="./config.json", job_id=job_id)
    C = {
        "X_dirpath": "../../../data/onetext_train_small/*",
        "X_valid_dirpath": "../../../data/onetext_valid_small/*",
        "y_dirpath": "../../../data/train_cleaned/",
        "batchsize": 500000,
        "limit": None,
        "epochs": 4,
        "patience": 70000,
        "patience_increase": 2,
        "improvement_threshold": 0.995,
        "validation_frequency": 20,
        "lmbda": float(params["l2"][0]),
        "dropout": float(params["dropout"][0]),
        "training_size": None,
        "validation_size": None,
        "algorithm": "RMSProp",
        "eta": float(params["eta"][0]),
        "eta_min": params["eta_min"][0],
        "border": 2,
        "hidden": int(params["hidden"][0]),
        "mini_batch_size": 500,
    }

    training_data = BatchProcessor(
        X_dirpath=C["X_dirpath"],
        y_dirpath=C["y_dirpath"],
        batchsize=C["batchsize"],
        border=C["border"],
        limit=C["limit"],
        random=True,
        random_mode="fully",
        dtype=theano.config.floatX,
        rnd=rnd,
    )

    validation_data = BatchProcessor(
        X_dirpath=C["X_valid_dirpath"],
        y_dirpath=C["y_dirpath"],
        batchsize=C["batchsize"],
        border=C["border"],
        limit=C["limit"],
        random=False,
        dtype=theano.config.floatX,
        rnd=rnd,
    )

    C["training_size"] = training_data.size()
    C["validation_size"] = validation_data.size()
    print "Training size: %d" % C["training_size"]
    print "Validation size: %d" % C["validation_size"]

    metric_recorder.add_experiment_metainfo(constants=C)
    metric_recorder.start()

    n_in = (2 * border + 1) ** 2
    net = Network(
        [
            FullyConnectedLayer(n_in=n_in, n_out=C["hidden"], rnd=rnd),
            FullyConnectedLayer(n_in=C["hidden"], n_out=1, rnd=rnd),
        ],
        C["mini_batch_size"],
    )

    result = net.train(
        tdata=training_data,
        epochs=C["epochs"],
        mbs=C["mini_batch_size"],
        eta=C["eta"],
        eta_min=C["eta_min"],
        vdata=validation_data,
        lmbda=C["lmbda"],
        momentum=None,
        patience_increase=C["patience_increase"],
        improvement_threshold=C["improvement_threshold"],
        validation_frequency=C["validation_frequency"],
        metric_recorder=metric_recorder,
        save_dir="./models/%d_" % metric_recorder.job_id,
        early_stoping=False,
    )

    print "Time = %f" % metric_recorder.stop()
    print "Result = %f" % result
    return float(result)
Example #6
0
import theano
import sys, os
from timeit import default_timer as timer

# add own libs to path
lib_path = os.path.abspath(os.path.join('../../src'))
sys.path.append(lib_path)

# own libs
from network import Network, FullyConnectedLayer
from preprocessor import BatchProcessor
from metric import MetricRecorder

rnd = np.random.RandomState()

mr = MetricRecorder(config_dir_path='./simple.json')
mr.start()

border = 2

training_data = BatchProcessor(
    X_dirpath='../../../data/train_all/*',
    y_dirpath='../../../data/train_cleaned/',
    batchsize=1000000, border=border, limit=None,
    random=True, random_mode='fully',
    dtype=theano.config.floatX, rnd=rnd)

validation_data = BatchProcessor(
    X_dirpath='../../../data/train_all/*',
    y_dirpath='../../../data/train_cleaned/',
    batchsize=1000000, border=border, limit=None,