Ejemplo n.º 1
0
def anneal_learning_rate(initial_learning_rate, normaliser, iteration):
    return sfloatX(initial_learning_rate / (1 + (iteration / normaliser)))
from neuralnilm.objectives import mdn_nll
from neuralnilm.utils import sfloatX
from neuralnilm.updates import anneal_learning_rate
from neuralnilm.plot import MDNPlotter


# Number of units in the hidden (recurrent) layer
N_HIDDEN_LAYERS = 2
N_UNITS_PER_LAYER = 25
N_COMPONENTS = 2
# Number of training sequences in each batch
N_SEQ_PER_BATCH = 16
SEQ_LENGTH = 256
SHAPE = (N_SEQ_PER_BATCH, SEQ_LENGTH, 1)
# SGD learning rate
INITIAL_LEARNING_RATE = sfloatX(5e-4)
# LEARNING_RATE_NORMALISER = 5
learning_rate = theano.shared(INITIAL_LEARNING_RATE, name='learning_rate')
LEARNING_RATE_CHANGES = {
    500: 1e-04, 
    1000: 5e-05, 
    2000: 1e-05, 
    3000: 5e-06,
    4000: 1e-06,
    10000: 5e-07,
    50000: 1e-07
}

# Number of iterations to train the net
N_ITERATIONS = 100
VALIDATE = False
Ejemplo n.º 3
0
    n_seq_per_batch = error.shape[1]
#    n_seq_per_batch = 16 # CHANGE THIS!
    for seq_i in range(n_seq_per_batch):
        elements_above_thresh = t[seq_i, :, 0] > THRESHOLD
        n_above_thresh = elements_above_thresh.sum()
        if n_above_thresh == 0:
            error[seq_i, :, :] = 0
        else:
            error[seq_i, elements_above_thresh, 0] *= 0.5 / n_above_thresh
            n_below_thresh = seq_length - n_above_thresh
            error[seq_i, -elements_above_thresh, 0] *= 0.5 / n_below_thresh

    return error.sum()


TWO_PI = sfloatX(2 * np.pi)

def mdn_nll(theta, t):
    """Computes the mean of negative log likelihood for P(t|theta) for
    Mixture Density Network output layers.

    :parameters:
        - theta : Output of the net. Contains mu, sigma, mixing
        - t : targets. Shape = (minibatch_size, output_size)

    :returns:
        - NLL per output
    """
    # Adapted from NLL() in
    # github.com/aalmah/ift6266amjad/blob/master/experiments/mdn.py
Ejemplo n.º 4
0
def anneal_learning_rate(initial_learning_rate, normaliser, iteration):
    return sfloatX(initial_learning_rate / (1 + (iteration / normaliser)))
Ejemplo n.º 5
0
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import mdn_nll
from neuralnilm.utils import sfloatX
from neuralnilm.updates import anneal_learning_rate
from neuralnilm.plot import MDNPlotter

# Number of units in the hidden (recurrent) layer
N_HIDDEN_LAYERS = 2
N_UNITS_PER_LAYER = 25
N_COMPONENTS = 2
# Number of training sequences in each batch
N_SEQ_PER_BATCH = 16
SEQ_LENGTH = 256
SHAPE = (N_SEQ_PER_BATCH, SEQ_LENGTH, 1)
# SGD learning rate
INITIAL_LEARNING_RATE = sfloatX(5e-4)
# LEARNING_RATE_NORMALISER = 5
learning_rate = theano.shared(INITIAL_LEARNING_RATE, name='learning_rate')
LEARNING_RATE_CHANGES = {
    500: 1e-04,
    1000: 5e-05,
    2000: 1e-05,
    3000: 5e-06,
    4000: 1e-06,
    10000: 5e-07,
    50000: 1e-07
}

# Number of iterations to train the net
N_ITERATIONS = 100
VALIDATE = False