Example #1
0
def test_networks():
    # testbed をつくる
    # bed = TestBed("../data/lane.180000.3.xml", r=2, d=1)
    bed = TestBed("../../data/cross3ltl_full_3/lane.129600.1.xml",
                  r=2,
                  d=1,
                  debug_level=0)
    n_input = bed.get_n_input()
    n_output = bed.get_n_outupt()

    # 実験用のネットワークを作る
    exp1 = theanets.Experiment(theanets.feedforward.Regressor,
                               layers=(n_input,
                                       dict(size=100,
                                            activation='linear'), n_output),
                               optimize='sgd',
                               activation='linear')
    exp2 = theanets.Experiment(theanets.feedforward.Regressor,
                               layers=(n_input,
                                       dict(size=100, activation='linear'),
                                       dict(size=100,
                                            activation='linear'), n_output),
                               optimize='sgd',
                               activation='linear')
    exp3 = theanets.Experiment(theanets.feedforward.Regressor,
                               layers=(n_input,
                                       dict(size=100, activation='linear'),
                                       dict(size=100, activation='linear'),
                                       dict(size=100,
                                            activation='linear'), n_output),
                               optimize='sgd',
                               activation='linear')
    # exp4 = theanets.Experiment(
    #     theanets.feedforward.Regressor,
    #     layers=(
    #         n_input,
    #         dict(size=400, activation='linear'),
    #         dict(size=400, activation='linear'),
    #         dict(size=400, activation='linear'),
    #         n_output
    #     ),
    #     optimize='sgd',
    #     activation='linear'
    # )

    # 実験する
    bed.test(exp1, 20)
    bed.test(exp2, 20)
    bed.test(exp3, 20, block=True)
Example #2
0
def do_regression():
    climate.enable_default_logging()

    train, test = create_datasets()
    x_train = train[:, 0:6]
    y_train = train[:, 6]
    y_train = np.reshape(y_train, (y_train.shape[0], 1))
    y_test = test[:, 6]
    y_test = np.reshape(y_test, (y_test.shape[0], 1))
    exp = theanets.Experiment(theanets.Regressor, layers=(6, 6, 1))
    exp.train([x_train, y_train])

    #do the testing
    x_test = test[:, 0:6]
    y_test = test[:, 6]

    yp = exp.network.predict(x_test)

    xi = [(i + 1) for i in range(x_test.shape[0])]

    pb.scatter(xi, y_test, color="red")
    pb.scatter(xi, yp, color="blue")

    pb.show()

    return
Example #3
0
def train_strategy(stock, ratio=0.8, min_improvement=0.001):

    train, valid = load_dataset(stock)
    n, n_input = train[0].shape

    exp = theanets.Experiment(
        theanets.Classifier,
        layers=(n_input, n_input * 2, 2),
    )
    exp.train(train,
              valid,
              min_improvement=min_improvement,
              algo='sgd',
              learning_rate=0.01,
              momentum=0.5,
              hidden_l1=0.001,
              weight_l2=0.001,
              num_updates=100)
    print('training:')
    evaluate(exp, train)

    print('validation:')
    evaluate(exp, valid)

    exp.save('%s.nn' % stock)
    return exp
Example #4
0
    def partial_fit(self, X, y, sample_weight=None, keep_trainer=True, **trainer):
        """
        Train the regressor by training the existing regressor again.

        :param pandas.DataFrame X: data shape [n_samples, n_features]
        :param y: values - array-like of shape [n_samples] (or [n_samples, n_targets])
        :param bool keep_trainer: True if the trainer is not stored in self.trainers
        :param dict trainer: parameters of the training algorithm we want to use now
        :return: self

        .. note:: if `trainer['optimize'] == 'pretrain'` (unsupervised training)
        `y` can be any vector just with information about number of targets `numpy.shape(y)`
        """
        allow_multiple_targets = False if len(numpy.shape(y)) == 1 else True
        X, y = self._prepare_for_partial_fit(X, y, allow_multiple_targets=allow_multiple_targets,
                                             keep_trainer=keep_trainer, **trainer)
        if self.exp is None:
            layers = self._construct_layers(X.shape[1], 1 if len(numpy.shape(y)) == 1 else numpy.shape(y)[1])
            self.exp = tnt.Experiment(tnt.Regressor, layers=layers,
                                      rng=self._reproducibilize(), **self._prepare_network_params())
        self._reproducibilize()
        if len(numpy.shape(y)) == 1:
            y = y.reshape(len(y), 1)
        if trainer.get('optimize') == 'pretrain':
            self.exp.train([X.astype(numpy.float32)], **trainer)
        else:
            self.exp.train([X.astype(numpy.float32), y], **trainer)
        return self
Example #5
0
    def partial_fit(self, X, y, keep_trainer=True, **trainer):
        """
        Train the classifier by training the existing classifier again.

        :param pandas.DataFrame X: data shape [n_samples, n_features]
        :param y: values - array-like of shape [n_samples]
        :param bool keep_trainer: True if the trainer is not stored in self.trainers
        :param dict trainer: parameters of the training algorithm we want to use now
        :return: self

        .. note:: if `trainer['optimize'] == 'pretrain'` (unsupervised training)
        `y` can be any vector just with information `numpy.unique(y) == classes`
        """
        X, y = self._prepare_for_partial_fit(X, y, keep_trainer=keep_trainer, **trainer)
        if self.exp is None:
            self._set_classes(y)
            layers = self._construct_layers(X.shape[1], len(self.classes_))
            self.exp = tnt.Experiment(tnt.Classifier, layers=layers,
                                      rng=self._reproducibilize(), **self._prepare_network_params())
        self._reproducibilize()
        if trainer.get('optimize', None) == 'pretrain':
            self.exp.train([X.astype(numpy.float32)], **trainer)
        else:
            self.exp.train([X.astype(numpy.float32), y.astype(numpy.int32)], **trainer)
        return self
Example #6
0
 def partial_fit(self,
                 X,
                 y,
                 sample_weight=None,
                 keep_trainer=True,
                 **trainer):
     X, y, sample_weight = self._prepare_for_partial_fit(
         X,
         y,
         sample_weight=sample_weight,
         keep_trainer=keep_trainer,
         **trainer)
     if self.exp is None:
         self._set_classes(y)
         layers = self._construct_layers(X.shape[1], len(self.classes_))
         self.exp = tnt.Experiment(tnt.Classifier,
                                   layers=layers,
                                   weighted=True)
     params = self._prepare_network_params()
     params.update(**trainer)
     if trainer.get('algo', None) == 'pretrain':
         self.exp.train([X.astype(numpy.float32)], **params)
     else:
         self.exp.train([
             X.astype(numpy.float32),
             y.astype(numpy.int32),
             sample_weight.astype(numpy.float32)
         ], **params)
     return self
Example #7
0
def main(args):
    train, valid, _ = load_cifar()

    whiten, color = pca(train)

    feat = args.features or int(np.sqrt(4 * K))
    e = theanets.Experiment(
        theanets.Autoencoder,
        layers=(K, feat**2, K),
    )

    e.train(whiten(train), whiten(valid), input_noise=1)

    plot_layers([
        color(e.network.find(1, 0).get_value().T).T,
        color(e.network.find('out', 0).get_value())
    ],
                channels=3)
    plt.tight_layout()
    plt.show()

    valid = whiten(valid[:100])
    plot_images(color(valid), 121, 'Sample data', channels=3)
    plot_images(color(e.network.predict(valid)),
                122,
                'Reconstructed data',
                channels=3)
    plt.tight_layout()
    plt.show()
Example #8
0
def do_classification():

    climate.enable_default_logging()
    df = pd.read_csv("glass.csv")
    #recode the class variable to go from 0 through 5
    df["GT"] = map(recode, df["GT"])
    train, valid = train_test_split(df, test_size=0.3)
    train_X = train.ix[:, 0:9].as_matrix()
    train_Y = train.ix[:, 9].as_matrix()
    valid_X = valid.ix[:, 0:9].as_matrix()
    valid_Y = valid.ix[:, 9].as_matrix()
    train_X = train_X.astype('f')
    train_Y = train_Y.astype('i')
    valid_X = valid_X.astype('f')
    valid_Y = valid_Y.astype('i')
    t0 = (train_X, train_Y)
    t1 = (valid_X, valid_Y)

    exp = theanets.Experiment(theanets.Classifier, layers=(9, 18, 18, 6))
    exp.train(t0, t1, algorithm='sgd',\
              learning_rate=1e-4, momentum=0.1,\
              hidden_l1=0.001, weight_l2=0.001)

    cm = confusion_matrix(valid_Y, exp.network.predict(valid_X))

    return cm
Example #9
0
 def test_sgd(self):
     self.exp = theanets.Experiment(theanets.recurrent.Classifier,
                                    layers=(self.NUM_INPUTS, (10, 'rnn'),
                                            self.NUM_CLASSES),
                                    weighted=True)
     self.assert_progress('sgd',
                          [self.INPUTS, self.CLASSES, self.CLASS_WEIGHTS])
Example #10
0
 def test_sgd(self):
     self.exp = theanets.Experiment(theanets.recurrent.Regressor,
                                    layers=(self.NUM_INPUTS, (10, 'rnn'),
                                            self.NUM_OUTPUTS),
                                    weighted=True)
     self.assert_progress('sgd',
                          [self.INPUTS, self.OUTPUTS, self.OUTPUT_WEIGHTS])
Example #11
0
def theanets_load(key, theanonets_type):
    file_name_network = os.path.join(c["CACHE_DIR"], key + ".network.pkl")
    file_name_network_structure = os.path.join(c["CACHE_DIR"], key + ".network.structure.pkl")
    e = None
    with open(file_name_network_structure, "r") as f:
        e = theanets.Experiment(theanonets_type, **pickle.load(f))
    e.load(file_name_network)
    return e
Example #12
0
def main():
    training_data, validation_data, test_data, std_scale = load_training_data()
    climate.enable_default_logging()

    targets = ['esgd','layerwise','rmsprop','nag','rprop','sgd','sample','adadelta']
    layers = [(93,  dict(size=512, sparsity=0.2, activation='relu'),
                    dict(size=512, sparsity=0.2, activation='relu'),
                    dict(size=512, sparsity=0.2, activation='relu'),
                    9)]

    for l in layers:
        for t in targets:
            exp = theanets.Experiment(
                theanets.Classifier,
                layers=l,
                weighted=True,
                output_activation='softmax'
            )

            exp.train(training_data,
                        validation_data,
                        optimize=t,
                      )

            exp.train(training_data,
                        validation_data,
                        optimize=t,

                      )
            exp.train(training_data,
                        validation_data,
                        optimize=t,

                      )

            #get an prediction of the accuracy from the test_data
            test_results = exp.network.predict(test_data[0])
            loss = multiclass_log_loss(test_data[1], test_results)

            print 'Test multiclass log loss:', loss

            out_file = 'results/' + str(loss) + t
            exp.save(out_file + '.pkl')


            #save the kaggle results
            kaggle_test_features = load_test_data(std_scale)
            results = exp.network.predict(kaggle_test_features)
            save_results(out_file + '.csv', kaggle_test_features, results)
Example #13
0
def load_strategy(name, verbose=False):
    print("loading %s trained strategy" % name)
    train, valid = load_dataset(name)
    n, n_input = train[0].shape
    exp = theanets.Experiment(
        theanets.Classifier,
        layers=(n_input, n_input * 2, 2),
    )
    exp.load('%s.nn' % name)
    if verbose:
        print('training:')
        evaluate(exp, train)
        print('validation:')
        evaluate(exp, valid)
    return exp
Example #14
0
 def test_save_load(self):
     exp = theanets.Experiment(theanets.Autoencoder, layers=(10, 3, 4, 10))
     net = exp.network
     f, p = tempfile.mkstemp(suffix='pkl')
     os.close(f)
     os.unlink(p)
     try:
         exp.save(p)
         assert os.path.isfile(p)
         exp.load(p)
         assert exp.network is not net
         assert exp.network.layers == (10, 3, 4, 10)
     finally:
         if os.path.exists(p):
             os.unlink(p)
Example #15
0
def main(args):
    train, valid, _ = load_mnist()

    e = theanets.Experiment(theanets.Autoencoder,
                            layers=(784, args.features**2, 784))

    e.train(train, valid)

    plot_layers([e.network.find(1, 0), e.network.find(2, 0)])
    plt.tight_layout()
    plt.show()

    v = valid[:100]
    plot_images(v, 121, 'Sample data')
    plot_images(e.network.predict(v), 122, 'Reconstructed data')
    plt.tight_layout()
    plt.show()
Example #16
0
 def test_save_load(self):
     exp = theanets.Experiment(theanets.Autoencoder, layers=(10, 3, 4, 10))
     net = exp.network
     f, p = tempfile.mkstemp(suffix='pkl')
     os.close(f)
     os.unlink(p)
     try:
         exp.save(p)
         assert os.path.isfile(p)
         exp.load(p)
         assert exp.network is not net
         for lo, ln in zip(net.layers, exp.network.layers):
             assert lo.name == ln.name
             assert lo.inputs == ln.inputs
             assert lo.size == ln.size
     finally:
         if os.path.exists(p):
             os.unlink(p)
Example #17
0
    def __setstate__(self, dictionary):
        """
        Required for pickle.load working, because theanets objects can't be unpickled by default.

        :param dict dictionary: the structure representing a TheanetsClassifier
        """
        self.__dict__ = dictionary
        if dictionary['dumped_exp'] is None:
            self.exp = None
        else:
            with tempfile.NamedTemporaryFile() as dump:
                with open(dump.name, 'wb') as dumpfile:
                    dumpfile.write(dictionary['dumped_exp'])
                assert os.path.exists(dump.name), 'there is no such file: {}'.format(dump.name)
                layers = [1] + self.layers + [1]
                self.exp = tnt.Experiment(tnt.Classifier, layers=layers, rng=self._reproducibilize(),
                                          **self.network_params)
                self.exp.load(dump.name)
        del dictionary['dumped_exp']
Example #18
0
    def partial_fit(self, X, y, sample_weight=None, new_trainer=True, **trainer):
        """
        Train the regressor by training the existing regressor again.

        :param pandas.DataFrame X: data shape [n_samples, n_features]
        :param y: values - array-like of shape [n_samples]
        :param bool new_trainer: True if the trainer is not stored in self.trainers
        :param dict trainer: parameters of the training algorithm we want to use now
        :return: self
        """
        X, y = self._prepare_for_partial_fit(X, y, new_trainer, **trainer)

        if self.exp is None:
            layers = self._construct_layers(X.shape[1], 1)
            print(layers)
            self.exp = tnt.Experiment(tnt.feedforward.Regressor, layers=layers,
                                      rng=self._reproducibilize(), **self.network_params)
        self._reproducibilize()
        self.exp.train([X.astype(numpy.float32), y.reshape(len(y), 1)], **trainer)
        return self
Example #19
0
def main(args):
    train, valid, _ = load_cifar()

    e = theanets.Experiment(theanets.Autoencoder,
                            layers=(3072, args.features**2, 3072))

    e.train(train, valid)

    plot_layers(e.network.weights, channels=3)
    plt.tight_layout()
    plt.show()

    valid = valid[:100]
    plot_images(valid, 121, 'Sample data', channels=3)
    plot_images(e.network.predict(valid),
                122,
                'Reconstructed data',
                channels=3)
    plt.tight_layout()
    plt.show()
Example #20
0
 def test_save_load(self):
     exp = theanets.Experiment(theanets.Autoencoder, layers=(10, 3, 4, 10))
     net = exp.network
     f, p = tempfile.mkstemp(suffix='pkl')
     os.close(f)
     os.unlink(p)
     try:
         exp.save(p)
         assert os.path.isfile(p)
         exp.load(p)
         assert exp.network is not net
         for lo, ln in zip(net.layers, exp.network.layers):
             assert lo.name == ln.name
             assert lo._input_shapes == ln._input_shapes
         for po, pn in zip(net.params, exp.network.params):
             assert po.name == pn.name
             assert np.allclose(po.get_value(), pn.get_value())
     finally:
         if os.path.exists(p):
             os.unlink(p)
Example #21
0
    def __setstate__(self, dictionary):
        """
        Required for pickle.load working, because theanets objects can't be unpickled by default.

        :param dict dictionary: the structure representing a TheanetsClassifier or TheanetsRegressor
        """
        dumped_exp = dictionary.pop('dumped_exp')
        self.__dict__ = dictionary.copy()
        if dumped_exp is None:
            self.exp = None
        else:
            with tempfile.NamedTemporaryFile() as dump:
                with open(dump.name, 'wb') as dumpfile:
                    dumpfile.write(dumped_exp)
                assert os.path.exists(dump.name), 'something strange in unpickling: there is no such file: {}'.format(
                    dump.name)
                dummy_layers = [1] + self.layers + [1]
                estimator_object = tnt.Classifier if self._model_type == 'classification' else tnt.Regressor
                self.exp = tnt.Experiment(estimator_object, layers=dummy_layers, rng=self._reproducibilize(),
                                          **self._prepare_network_params())
                self.exp.load(dump.name)
Example #22
0
    def partial_fit(self, X, y, new_trainer=True, **trainer):
        """
        Train the classifier by training the existing classifier again.

        :param pandas.DataFrame X: data shape [n_samples, n_features]
        :param y: values - array-like of shape [n_samples]
        :param bool new_trainer: True if the trainer is not stored in self.trainers
        :param dict trainer: parameters of the training algorithm we want to use now
        :return: self
        """
        X, y = self._prepare_for_partial_fit(X, y, new_trainer, **trainer)
        self.classes_ = numpy.unique(y)
        if self.exp is None:
            layers = self._construct_layers(X.shape[1], len(self.classes_))
            print(layers)
            self.exp = tnt.Experiment(tnt.Classifier, layers=layers,
                                      rng=self._reproducibilize(), **self.network_params)
        self._reproducibilize()
        self.exp.train((X.astype(numpy.float32), y.astype(numpy.int32)),
                       **trainer)
        return self
Example #23
0
def test_networks():
    # experiment をつくる
    bed = Experiment(r=2, d=1, debug_level=2)

    # データ準備
    bed.setTrainData("../data/cross3ltl_full_3/lane.129600.1.xml")
    bed.setTestData("../data/cross3ltl_full_3/lane.129600.2.xml")

    # ネットワーク作成準備
    n_input = bed.get_n_input()
    n_output = bed.get_n_outupt()

    # 実験用のネットワークを作る
    exp1 = theanets.Experiment(
        theanets.feedforward.Regressor,
        layers=(
            n_input,
            dict(size=120, activation='relu'),
            dict(size=120, activation='relu'),
            dict(size=120, activation='relu'),
            n_output
        ),
        optimize='sgd',
        activation='sigmoid'
    )

    # 事前学習
    bed.pretrain(exp1)

    # 学習と評価
    bed.train(exp1, 1)
    bed.test(exp1, False)

    bed.train(exp1, 1)
    bed.test(exp1, False)

    bed.train(exp1, 1)
    bed.test(exp1, True)
Example #24
0
    def train(self):
        architecture = self.__architecture
        dataset = self.__dataset

        cut = int(0.9 * len(dataset)
                  )  # select 90% of data for training, 10% for validation
        idx = range(len(dataset))
        np.random.shuffle(idx)

        train = idx[:cut]
        train_set = [dataset[train, :-1], dataset[train, -1:]]
        valid = idx[cut:]
        valid_set = [dataset[valid, :-1], dataset[valid, -1:]]

        e = theanets.Experiment(theanets.feedforward.Regressor,
                                layers=architecture,
                                optimize='sgd',
                                hidden_activation='tanh',
                                output_activation='linear',
                                learning_rate=0.01)

        e.train(train_set, valid_set)
        self.e = e
Example #25
0
def main(args):
    # load up the MNIST digit dataset.
    train, valid, _ = load_mnist()

    e = theanets.Experiment(theanets.Autoencoder,
                            layers=(784, args.features**2, 784))

    e.train(train,
            valid,
            input_noise=0.1,
            weight_l2=0.0001,
            algorithm='rmsprop',
            momentum=0.9,
            min_improvement=0.1)

    plot_layers([e.network.find('hid1', 'w'), e.network.find('out', 'w')])
    plt.tight_layout()
    plt.show()

    v = valid[:100]
    plot_images(v, 121, 'Sample data')
    plot_images(e.network.predict(v), 122, 'Reconstructed data')
    plt.tight_layout()
    plt.show()
Example #26
0
 def partial_fit(self,
                 X,
                 y,
                 sample_weight=None,
                 keep_trainer=True,
                 **trainer):
     allow_multiple_targets = False if len(numpy.shape(y)) == 1 else True
     X, y, sample_weight = self._prepare_for_partial_fit(
         X,
         y,
         sample_weight=sample_weight,
         allow_multiple_targets=allow_multiple_targets,
         keep_trainer=keep_trainer,
         **trainer)
     if self.exp is None:
         layers = self._construct_layers(
             X.shape[1],
             1 if len(numpy.shape(y)) == 1 else numpy.shape(y)[1])
         self.exp = tnt.Experiment(tnt.Regressor,
                                   layers=layers,
                                   weighted=True)
     params = self._prepare_network_params()
     params.update(**trainer)
     if len(numpy.shape(y)) == 1:
         y = y.reshape(len(y), 1)
     if len(numpy.shape(sample_weight)) == 1:
         sample_weight = numpy.repeat(sample_weight, y.shape[1])
         sample_weight = sample_weight.reshape(y.shape)
     if trainer.get('algo') == 'pretrain':
         self.exp.train([X.astype(numpy.float32)], **params)
     else:
         self.exp.train([
             X.astype(numpy.float32), y,
             sample_weight.astype(numpy.float32)
         ], **params)
     return self
Example #27
0
    return np.dot(x, np.dot(vecs, np.diag(1. / vals)))


def color(z):
    return np.dot(z, np.dot(np.diag(vals), vecs.T))


# now train our model on the whitened dataset.

N = 16

e = theanets.Experiment(
    RICA,
    layers=(K, N * N, K),
    activation='linear',
    hidden_l1=0.2,
    no_learn_biases=True,
    tied_weights=True,
    train_batches=100,
    weight_inverse=0.01,
)
e.run(whiten(train), whiten(valid))

# color the network weights so they are viewable as digits.
plot_layers([color(e.network.weights[0].get_value().T).T], tied_weights=True)
plt.tight_layout()
plt.show()

plot_images(valid[:N * N], 121, 'Sample data')
plot_images(color(e.network.predict(whiten(valid[:N * N]))), 122,
            'Reconstructed data')
plt.tight_layout()
Example #28
0
import logging
import matplotlib.pyplot as plt
import numpy as np
import theanets

climate.enable_default_logging()

TIME = 10
BITS = 3
BATCH_SIZE = 32

mask = np.ones((TIME, BATCH_SIZE, 1), bool)
mask[:TIME - BITS] = 0

e = theanets.Experiment(theanets.recurrent.Regressor,
                        layers=(1, ('rnn', 10), 1),
                        weighted=True)


def generate():
    s, t = np.random.randn(2, TIME, BATCH_SIZE, 1).astype('f')
    s[:BITS] = t[-BITS:] = np.random.randn(BITS, BATCH_SIZE, 1)
    return s, t, mask


src, tgt, msk = generate()
logging.info('data batches: %s -> %s @ %s', src.shape, tgt.shape, msk.shape)

e.train(generate, batch_size=BATCH_SIZE)

predict = e.network.predict(src)[:, :, 0]
#!/usr/bin/env python

import matplotlib.pyplot as plt
import theanets

from utils import load_mnist, plot_layers, plot_images


train, valid, _ = load_mnist()

e = theanets.Experiment(
    theanets.Autoencoder,
    layers=(784, 256, 100, 64, ('tied', 100), ('tied', 256), ('tied', 784)),
)
e.train(train, valid,
        algorithm='layerwise',
        patience=1,
        min_improvement=0.05,
        train_batches=100)
e.train(train, valid, min_improvment=0.01, train_batches=100)

plot_layers([e.network.find(i, 'w') for i in (1, 2, 3)], tied_weights=True)
plt.tight_layout()
plt.show()

valid = valid[:16*16]
plot_images(valid, 121, 'Sample data')
plot_images(e.network.predict(valid), 122, 'Reconstructed data')
plt.tight_layout()
plt.show()
Example #30
0
def train_nn(speech_data, speech_alignment):
    vta = MLFMFCCOnlineAlignedArray(usec0=usec0,
                                    n_last_frames=last_frames,
                                    usedelta=usedelta,
                                    useacc=useacc,
                                    mel_banks_only=mel_banks_only)
    sil_count = 0
    speech_count = 0
    for sd, sa in zip(speech_data, speech_alignment):
        mlf_speech = load_mlf(sa, max_files, max_frames_per_segment)
        vta.append_mlf(mlf_speech)
        vta.append_trn(sd)

        sil_count += mlf_speech.count_length('sil')
        speech_count += mlf_speech.count_length('speech')

    print "The length of sil segments:    ", sil_count
    print "The length of speech segments: ", speech_count

    mfcc = vta.__iter__().next()

    print "MFCC length:", len(mfcc[0])
    input_size = len(mfcc[0])

    e = theanets.Experiment(
        theanets.Classifier,
        layers=(input_size, hidden_units, hidden_units, hidden_units,
                hidden_units, 2),
        optimize="hf",
        num_updates=30,
        validate=1,
        initial_lambda=0.1,
        preconditioner=True if preconditioner else False,
        hidden_dropouts=hidden_dropouts,
        weight_l2=weight_l2,
        batch_size=500,
    )

    random.seed(0)
    print "Generating the cross-validation and train MFCC features"
    crossvalid_x = []
    crossvalid_y = []
    train_x = []
    train_y = []
    i = 0
    for frame, label in vta:
        frame = frame - (10.0 if mel_banks_only else 0.0)

        if i % (max_frames / 10) == 0:
            print "Already processed: %.2f%% of data" % (100.0 * i /
                                                         max_frames)

        if i > max_frames:
            break

        if random.random() < float(crossvalid_frames) / max_frames:
            # sample validation (test) data
            crossvalid_x.append(frame)
            if label == "sil":
                crossvalid_y.append(0)
            else:
                crossvalid_y.append(1)
        else:
            train_x.append(frame)
            if label == "sil":
                train_y.append(0)
            else:
                train_y.append(1)

        i += 1

    crossvalid = [
        np.array(crossvalid_x),
        np.array(crossvalid_y).astype('int32')
    ]
    train = [np.array(train_x), np.array(train_y).astype('int32')]

    print
    print "The length of training data: ", len(train_x)
    print "The length of test data:     ", len(crossvalid_x)
    print

    dc_acc = deque(maxlen=20)
    dt_acc = deque(maxlen=20)

    epoch = 0
    while True:

        predictions_y = e.network.predict(crossvalid_x)
        c_acc, c_sil = get_accuracy(crossvalid_y, predictions_y)
        predictions_y = e.network.predict(train_x)
        t_acc, t_sil = get_accuracy(train_y, predictions_y)

        print
        print "max_frames, max_files, max_frames_per_segment, trim_segments, max_epoch, hidden_units, last_frames, crossvalid_frames, usec0, usedelta, useacc, mel_banks_only, preconditioner, hidden_dropouts, weight_l2"
        print max_frames, max_files, max_frames_per_segment, trim_segments, max_epoch, hidden_units, last_frames, crossvalid_frames, usec0, usedelta, useacc, mel_banks_only, preconditioner, hidden_dropouts, weight_l2
        print "Epoch: %d" % (epoch, )
        print
        print "Cross-validation stats"
        print "------------------------"
        print "Epoch predictive accuracy:  %0.2f" % c_acc
        print "Last epoch accs:", ["%.2f" % x for x in dc_acc]
        print "Epoch sil bias: %0.2f" % c_sil
        print
        print "Training stats"
        print "------------------------"
        print "Epoch predictive accuracy:  %0.2f" % t_acc
        print "Last epoch accs:", ["%.2f" % x for x in dt_acc]
        print "Epoch sil bias: %0.2f" % t_sil

        if epoch == max_epoch:
            break
        epoch += 1

        e.run(train, crossvalid)

        dc_acc.append(c_acc)
        dt_acc.append(t_acc)

        nn = ffnn.FFNN()
        for w, b in zip(e.network.weights, e.network.biases):
            nn.add_layer(w.get_value(), b.get_value())
        nn.save(file_name = "model_voip/vad_sds_mfcc_is%d_hu%d_lf%d_mfr%d_mfl%d_mfps%d_ts%d_usec0%d_usedelta%d_useacc%d_mbo%d.nn" % \
                            (input_size, hidden_units, last_frames, max_frames, max_files, max_frames_per_segment, trim_segments, usec0, usedelta, useacc, mel_banks_only))