Exemplo n.º 1
0
    def test_LearnBeginningMode(self):
        """ testing learn_beginning mode.
        """
        print("\nDyBMTestCase.testLearnFirstMode")

        length = 300
        period = 60
        std = 0
        dim = 1
        data = NoisySin(length, period, std, dim).to_list()

        print("\nlearn_beginning = True (Default mode)")
        for d in xrange(10, 20):
            model = BatchLinearDyBM(dim, delay=d, learn_beginning=True)
            model.fit(data)
            result = model.get_predictions(data)
            rmse = RMSE(result[d:], data[d:])
            print("Training RMSE", rmse)
            self.assertLessEqual(rmse, 0.1)

        print("\nlearn_beginning = False")
        for d in xrange(10, 20):
            model = BatchLinearDyBM(dim, delay=d, learn_beginning=False)
            model.fit(data)
            result = model.get_predictions(data)
            rmse = RMSE(result[d:], data[d:])
            print("Training RMSE", rmse)
            self.assertLessEqual(rmse, 0.1)
Exemplo n.º 2
0
 def testRMSE(self):
     np.random.seed(0)
     y = np.random.random((self.L, self.N))
     z = y + 1.0
     err = RMSE(y, y)
     self.assertAlmostEqual(err, 0)
     err = RMSE(y, z)
     self.assertAlmostEqual(err, np.sqrt(self.N))
Exemplo n.º 3
0
def test_sin(model, max_repeat=30):

    random = amath.random.RandomState(0)

    dim = model.get_input_dimension()
    # phase = np.zeros(dim)
    # phase = 2 * np.pi * np.arange(dim) / dim
    phase = 2 * amath.pi * random.uniform(size=dim)

    std = 1.
    period = 100
    length = period

    print("Testing with noisy sin wave")
    print(" dimension: %d" % dim)
    print(" period: %d" % period)
    print(" standard deviation: %f" % std)

    wave = NoisySin(length, period, std, dim, phase=phase)

    y_pred = model.get_predictions(wave)
    wave.reset()
    y_true = [w for w in wave]
    init_error = RMSE(y_true, y_pred)
    print(" 0 Error: %f" % init_error)

    for i in xrange(max_repeat):
        wave.reset()
        result = model.learn(wave)
        y_true = result["actual"]
        y_pred = result["prediction"]
        error = RMSE(y_true, y_pred)
        print("%2d Error: %f" % (i + 1, error))

    reduction = 100 * (init_error - error) / init_error
    assert reduction > 0, \
        "Error increased from %f to %f" % (init_error, error)
    print("Error is reduced from %f to %f by %f percents in %d iterations" %
          (init_error, error, reduction, max_repeat))
Exemplo n.º 4
0
    def test_LearnMultiSequences(self):
        """ testing learn_beginning mode.
        """
        print("\nDyBMTestCase.testLearnMultiSequences")

        seqs = list()
        for i in xrange(10):
            length = 300
            period = 60
            std = 0.1 * i
            dim = 1
            data = NoisySin(length, period, std, dim).to_list()
            seqs.append(data)

        for d in xrange(10, 20):
            model = BatchLinearDyBM(dim, delay=d, learn_beginning=True)
            model.fit_multi_seqs(seqs)
            result = model.get_predictions(seqs[0])
            rmse = RMSE(result[d:], seqs[0][d:])
            print("Training RMSE", rmse)
            self.assertLessEqual(rmse, 0.2)
Exemplo n.º 5
0
def experiment(period,
               std,
               delay,
               decay,
               Nh,
               repeat,
               bidirectional,
               sigma=0.01):
    """
    A run of experiment

    Parameters
    ----------
    period : int
        period of the wave
    std : float
        standard deviation of noise
    delay : int
        delay
    decay : list
        list of decay rates
    Nh : int
        number of hidden units
    repeat : int
        number of iterations of training
    bidirectional : boolean
        whether to train bidirectionally
    sigma : float
        std of random initialization
        0.01 is recommended in
        https://www.cs.toronto.edu/~hinton/absps/guideTR.pdf
    """
    """
    Prepare data generators
    """

    dim = 1  # dimension of the wave
    phase = amath.zeros(dim)  # initial phase of the wave

    # forward sequence
    wave = NoisySawtooth(0, period, std, dim, phase, False)
    wave.reset(seed=0)

    # backward sequence
    revwave = NoisySawtooth(0, period, std, dim, phase, True)
    revwave.reset(seed=1)
    """
    Prepare a Gaussian Bernoulli DyBM
    """

    Nv = dim  # number of visible units

    sgd = AdaGrad()
    dybm = GaussianBernoulliDyBM([delay, delay], [decay, decay], [Nv, Nh],
                                 [sgd, deepcopy(sgd)],
                                 sigma=sigma,
                                 insert_to_etrace="w_delay")
    dybm.layers[0].layers[1].SGD.set_learning_rate(0)
    dybm.layers[1].layers[1].SGD.set_learning_rate(0)
    """
    Learn
    """
    error = list()  # list of numpy array
    bi_end = 0.5
    bi_factor = 2
    for i in range(repeat):
        # update internal states by reading forward sequence
        wave.add_length(period)
        dybm.get_predictions(wave)

        if bidirectional and i % (bi_factor + 1) == 0 and bi_factor > 0 \
           and i < repeat * bi_end:
            # make a time-reversed DyBM
            dybm._time_reversal()

            # update internal states by reading backward sequence
            revwave.add_length(period)
            dybm.get_predictions(revwave)

            # learn backward sequence for one period
            revwave.add_length(period)
            dybm.learn(revwave, get_result=False)

            # make a non time-reversed DyBM
            dybm._time_reversal()
        else:
            # update internal states by reading forward sequence
            wave.add_length(period)
            dybm.get_predictions(wave)

            # learn forward sequence
            wave.add_length(period)
            result = dybm.learn(wave, get_result=True)

            if i % (bi_factor + 1) == bi_factor:
                rmse = RMSE(result["actual"], result["prediction"])
                rmse = amath.to_numpy(rmse)
                error.append(rmse)

    return error, dybm, wave
Exemplo n.º 6
0
def experiment(dataset, delay, Nh, repeat, bi_factor, bi_end, sigma, rate):
    """
    A run of experiment

    Parameters
    ----------
    delay : int
        delay
    Nh : int
        number of hidden units
    repeat : int
        number of iterations of training
    bi_factor : boolean
        amount of bidirectional training
    rate : float
        initial learning rate
    sigma : float
        standard deviation of noise
    """

    Nv = dataset.train.get_dim()  # number of visible units
    decay = [0.0]

    # Prepare a Gaussian Bernoulli DyBM

    dybm = GaussianBernoulliDyBM([delay, delay], [decay, decay], [Nv, Nh],
                                 sigma=sigma)

    if len(dybm.layers[0].layers[0].variables["W"]) > 0:
        dybm.layers[0].layers[0].variables["W"][0] += amath.eye(Nv)
    for i in range(2):
        dybm.layers[i].layers[0].SGD.set_learning_rate(rate)
        dybm.layers[i].layers[1].SGD.set_learning_rate(0)

    # Learn

    train_rmse = list()
    test_rmse = list()
    step = list()

    dybm.init_state()
    dataset.warmup.reset()
    dataset.train.reset()
    dybm.get_predictions(dataset.warmup)
    prediction = dybm.get_predictions(dataset.train)
    rmse = RMSE(dataset.train.to_list(), prediction)
    rmse = amath.to_numpy(rmse)
    train_rmse.append(rmse)

    print("init", rmse)

    dybm.init_state()
    dataset.cooldown.reset()
    dataset.test.reset()
    dybm.get_predictions(dataset.cooldown)
    prediction = dybm.get_predictions(dataset.test)
    rmse = RMSE(dataset.test_seq, prediction)
    rmse = amath.to_numpy(rmse)

    print(rmse)

    test_rmse.append(rmse)

    step.append(0)

    for i in range(repeat):

        dybm.init_state()
        dataset.warmup.reset()
        dataset.train.reset()
        dataset.cooldown.reset()

        if i % (bi_factor + 1) == 0 and bi_factor > 0 and i < repeat * bi_end:
            print("backward")
            # make a time-reversed DyBM and dataset
            dybm._time_reversal()
            dataset.warmup.reverse()
            dataset.train.reverse()
            dataset.cooldown.reverse()

            # update internal states by reading backward sequence
            dybm.get_predictions(dataset.cooldown)

            # learn backward sequence
            dybm.learn(dataset.train, get_result=False)
            dybm.learn(dataset.warmup, get_result=False)

            # make a non time-reversed DyBM
            dybm._time_reversal()
            dataset.warmup.reverse()
            dataset.train.reverse()
            dataset.cooldown.reverse()
        else:
            print("forward")
            # update internal states by reading forward sequence
            dybm.get_predictions(dataset.warmup)

            # learn forward sequence
            dybm.learn(dataset.train, get_result=False)
            dybm.learn(dataset.cooldown, get_result=False)

        if i % (bi_factor + 1) == bi_factor:
            print("evaluate")

            dybm.init_state()
            dataset.warmup.reset()
            dataset.train.reset()
            dybm.get_predictions(dataset.warmup)
            prediction = dybm.get_predictions(dataset.train)
            rmse = RMSE(dataset.train.to_list(), prediction)
            rmse = amath.to_numpy(rmse)
            train_rmse.append(rmse)

            print(i, rmse)

            dybm.init_state()
            dataset.cooldown.reset()
            dataset.test.reset()
            dybm.get_predictions(dataset.cooldown)
            prediction = dybm.get_predictions(dataset.test)
            rmse = RMSE(dataset.test_seq, prediction)
            rmse = amath.to_numpy(rmse)
            test_rmse.append(rmse)

            print(rmse)

            step.append(i + 1)

    return (train_rmse, test_rmse, step, dybm)
Exemplo n.º 7
0
def get_init_rate(dataset, delay, Nh, sigma, epochs):
    Nv = dataset.train.get_dim()  # number of visible units
    decay = [0.0]

    dybm = GaussianBernoulliDyBM([delay, delay], [decay, decay], [Nv, Nh],
                                 sigma=sigma)
    if len(dybm.layers[0].layers[0].variables["W"]) > 0:
        dybm.layers[0].layers[0].variables["W"][0] += amath.eye(Nv)

    dataset.train.reset()
    prediction = dybm.get_predictions(dataset.train)
    init_train_rmse = RMSE(dataset.train.to_list(), prediction)

    print("init train rmse", init_train_rmse)

    best_rate = 1.0
    rate = 1.0
    best_train_rmse = init_train_rmse
    while rate > 0:
        # prepare a dybm
        dybm = GaussianBernoulliDyBM([delay, delay], [decay, decay], [Nv, Nh],
                                     sigma=sigma)

        if len(dybm.layers[0].layers[0].variables["W"]) > 0:
            dybm.layers[0].layers[0].variables["W"][0] += amath.eye(Nv)
        for i in range(2):
            dybm.layers[i].layers[0].SGD.set_learning_rate(rate)
            dybm.layers[i].layers[1].SGD.set_learning_rate(0)

        # train epochs
        for i in range(epochs):
            dataset.warmup.reset()
            dataset.train.reset()
            dybm.get_predictions(dataset.warmup)
            dybm.learn(dataset.train, get_result=False)

        dybm.init_state()
        dataset.warmup.reset()
        dataset.train.reset()
        dybm.get_predictions(dataset.warmup)
        prediction = dybm.get_predictions(dataset.train)
        train_rmse = RMSE(dataset.train.to_list(), prediction)

        print("rate", rate, "train_rmse", train_rmse)

        if train_rmse < best_train_rmse:
            best_train_rmse = train_rmse

        if train_rmse > best_train_rmse and train_rmse < init_train_rmse:
            best_rate = rate * 2
            break

        rate = rate * 0.5

    if best_rate == 1.0:
        rate = 2.0
        while rate < amath.inf:
            # prepare a dybm
            dybm = GaussianBernoulliDyBM([delay, delay], [decay, decay],
                                         [Nv, Nh],
                                         sigma=sigma)

            if len(dybm.layers[0].layers[0].variables["W"]) > 0:
                dybm.layers[0].layers[0].variables["W"][0] += amath.eye(Nv)
            for i in range(2):
                dybm.layers[i].layers[0].SGD.set_learning_rate(rate)
                dybm.layers[i].layers[1].SGD.set_learning_rate(0)

            # train one epoch
            dataset.warmup.reset()
            dataset.train.reset()
            dybm.get_predictions(dataset.warmup)
            dybm.learn(dataset.train, get_result=False)

            dybm.init_state()
            dataset.warmup.reset()
            dataset.train.reset()
            dybm.get_predictions(dataset.warmup)
            prediction = dybm.get_predictions(dataset.train)
            train_rmse = RMSE(dataset.train.to_list(), prediction)

            print("rate", rate, "train_rmse", train_rmse)

            if train_rmse > init_train_rmse:
                best_rate = rate / 2.
                rate = rate / 2.
                break

            rate = rate * 2

    print("best initial learning rate", best_rate)

    return best_rate
Exemplo n.º 8
0
def test_binary_model(model, max_repeat=1000, generator=False):
    """
    minimal test with learning a constant with noise
    """

    in_dim = model.get_input_dimension()
    out_dim = model.get_target_dimension()
    batch = in_dim
    eps = 1e-2

    in_seq = amath.array([[i % in_dim == j % in_dim for j in range(in_dim)]
                          for i in range(batch)],
                         dtype=int)

    if in_dim == out_dim:
        print("Testing generative learning")
        out_seq = in_seq
    else:
        print("Testing discriminative learning")
        out_seq = amath.array(
            [[i % out_dim == j % out_dim for j in range(out_dim)]
             for i in range(batch)],
            dtype=int)

    in_gen = SequenceGenerator(in_seq)
    out_gen = SequenceGenerator(out_seq)

    i = 0
    for i in xrange(max_repeat):
        if in_dim == out_dim:
            if generator:
                in_gen.reset()
                model.learn(in_gen)
            else:
                model._learn_sequence(in_seq)
        else:
            if generator:
                in_gen.reset()
                out_gen.reset()
                model.learn(in_gen, out_gen)
            else:
                model._learn_sequence(in_seq, out_seq)

        if i % 1000 == 0:
            if in_dim == out_dim:
                if generator:
                    in_gen.reset()
                    predictions = model.get_predictions(in_gen)
                else:
                    predictions = model.get_predictions(in_seq)
            else:
                if generator:
                    in_gen.reset()
                    predictions = model.get_predictions(in_gen)
                else:
                    predictions = model.get_predictions(in_seq)
            """
            diffs = predictions - out_seq
            SE = [np.dot(diff, diff) for diff in diffs]
            RMSE2 = np.sqrt(np.mean(SE))
            """

            rmse = RMSE(predictions, out_seq)

            print("%d\t%1.3f" % (i, rmse))
            if rmse < eps * out_dim:
                print("Successfully completed in %d iterations with RMSE: %f" %
                      (i + 1, rmse))
                break

    if in_dim == out_dim:
        LL = model.get_LL_sequence(in_seq)
    else:
        LL = model.get_LL_sequence(in_seq, out_seq)
    print("LL: %f" % amath.mean(LL))

    return i + 1
Exemplo n.º 9
0
def test_real_model(model, max_repeat=1000, generator=False):
    """
    minimal test with learning a constant with noise
    """

    in_dim = model.get_input_dimension()
    out_dim = model.get_target_dimension()
    batch = 3
    in_mean = 1.0
    out_mean = 2.0
    d = 0.001

    random = amath.random.RandomState(0)
    in_seq = random.uniform(low=in_mean - d,
                            high=in_mean + d,
                            size=(batch, in_dim))
    in_gen = Uniform(length=batch,
                     low=in_mean - d,
                     high=in_mean + d,
                     dim=in_dim)

    if in_dim == out_dim:
        print("Testing generative learning for a real model")
        out_seq = in_seq
    elif out_dim.__class__ is list:
        random = amath.random.RandomState(0)
        out_seq = list()
        for i in xrange(batch):
            patterns = [
                random.uniform(low=out_mean - d, high=out_mean + d, size=dim)
                for dim in out_dim
            ]
            out_seq.append(patterns)
        out_gens = [
            Uniform(length=batch, low=out_mean - d, high=out_mean + d, dim=dim)
            for dim in out_dim
        ]
        out_gen = ListGenerator(out_gens)
    else:
        print("Testing discriminative learning for a real model")
        random = amath.random.RandomState(0)
        out_seq = random.uniform(low=out_mean - d,
                                 high=out_mean + d,
                                 size=(batch, out_dim))
        out_gen = Uniform(length=batch,
                          low=out_mean - d,
                          high=out_mean + d,
                          dim=out_dim)

    print("Input dimension: %d" % in_dim)
    if out_dim.__class__ is list:
        print("Target dimension: " + str(out_dim))
    else:
        print("Target dimension: %d" % out_dim)

    i = 0
    for i in xrange(max_repeat):
        if in_dim == out_dim:
            if generator:
                # print("Predicting with input generator of length: %d"
                # % in_gen.limit)
                in_gen.reset()
                model.learn(in_gen)
                in_gen.reset()
                predictions = model.get_predictions(in_gen)
            else:
                # print("Predicting with input sequence")
                model._learn_sequence(in_seq)
                predictions = model.get_predictions(in_seq)
        else:
            if generator:
                # print("Predicting with input generator and target generator")
                in_gen.reset()
                out_gen.reset()
                model.learn(in_gen, out_gen)
            else:
                # print("Predicting with input sequence and target sequence")
                model._learn_sequence(in_seq, out_seq)
            predictions = model.get_predictions(in_seq)
        """
        diffs = predictions - out_seq
        SE = [amath.dot(diff, diff) for diff in diffs]
        rmse = amath.sqrt(amath.mean(SE))
        """
        if out_dim.__class__ is list:
            predictions = [amath.concatenate(pred) for pred in predictions]
            rmse = RMSE(predictions,
                        [amath.concatenate(pat) for pat in out_seq])
        else:
            rmse = RMSE(predictions, out_seq)

        if i % 1000 == 0:
            print("%d\t%1.4f" % (i, rmse))
        if rmse < d:
            print("Successfully completed in %d iterations with RMSE: %f" %
                  (i + 1, rmse))
            break

    op = getattr(model, "get_LL_sequence", None)
    if callable(op):
        if in_dim == out_dim:
            LL = model.get_LL_sequence(in_seq)
        else:
            LL = model.get_LL_sequence(in_seq, out_seq)
            if LL is not None:
                print("LL: %f" % amath.mean(LL))

    return i + 1