Esempio n. 1
0
def load_models(model_path=save_path,
                in_size=len(input_columns),
                out_size=len(output_columns) -
                1 if cost_mode == 'RL-MDN' else len(output_columns),
                hidden_size=hidden_size,
                num_recurrent_layers=num_recurrent_layers,
                model=layer_models[0]):
    initials = []
    if not os.path.isfile(model_path):
        print 'Could not find model file.'
        sys.exit(0)
    print 'Loading model from {0}...'.format(model_path)
    x = tensor.tensor3('features', dtype=theano.config.floatX)
    y = tensor.tensor3('targets', dtype='floatX')
    train_flag = [theano.shared(0)]
    _, latent_size = load_encoder()
    in_size = latent_size + len(input_columns)
    y_hat, cost, cells = nn_fprop(x, y, in_size, out_size, hidden_size,
                                  num_recurrent_layers, train_flag)
    main_loop = MainLoop(algorithm=None,
                         data_stream=None,
                         model=Model(cost),
                         extensions=[saveload.Load(model_path)])
    for extension in main_loop.extensions:
        extension.main_loop = main_loop
    main_loop._run_extensions('before_training')
    bin_model = main_loop.model
    print 'Model loaded. Building prediction function...'
    hiddens = []
    for i in range(num_recurrent_layers):
        brick = [
            b for b in bin_model.get_top_bricks()
            if b.name == layer_models[i] + str(i)
        ][0]
        hiddens.extend(
            VariableFilter(theano_name=brick.name + '_apply_states')(
                bin_model.variables))
        hiddens.extend(
            VariableFilter(theano_name=brick.name + '_apply_cells')(cells))
        initials.extend(
            VariableFilter(roles=[roles.INITIAL_STATE])(brick.parameters))
    predict_func = theano.function([x], hiddens + [y_hat])
    encoder, code_size = load_encoder()
    return predict_func, initials, encoder, code_size
Esempio n. 2
0
from blocks_extras.extensions.plot import Plot

from utils import get_stream, track_best, MainLoop, get_seed, make_wav, rescale
from model import nn_fprop
from config import config
# Load config parameters
locals().update(config)

# Set up model and prediction function
x = tensor.tensor3('inputs', dtype='float64')
y = tensor.tensor3('targets', dtype='float64')

model = 'bs'
with open ('gru_best.pkl', 'r') as picklefile:
    model = load(picklefile)
y_hat, cost, cells = nn_fprop(x, y, frame_length, hidden_size, num_layers, model)
predict_fn = theano.function([x], y_hat)

# Generate
print "generating audio..."
seed = get_seed(hdf5_file, [seed_index])
sec = 16000
samples_to_generate = sec*secs_to_generate
num_frames_to_generate = samples_to_generate/frame_length + seq_length #don't include seed
predictions = []
prev_input = seed
for i in range(num_frames_to_generate):
    prediction = predict_fn(prev_input)
    predictions.append(prediction)
    pred_min = numpy.min(predictions)
    pred_max = numpy.max(predictions)
Esempio n. 3
0
from model import nn_fprop
from config import config

# Load config parameters
locals().update(config)

# DATA
ix_to_char, char_to_ix, vocab_size = get_metadata(hdf5_file)
train_stream = get_stream(hdf5_file, 'train', batch_size)
dev_stream = get_stream(hdf5_file, 'dev', batch_size)


# MODEL
x = tensor.matrix('features', dtype='uint8')
y = tensor.matrix('targets', dtype='uint8')
y_hat, cost = nn_fprop(x, y, vocab_size, hidden_size, num_layers, model)

# COST
cg = ComputationGraph(cost)

if dropout > 0:
    # Apply dropout only to the non-recurrent inputs (Zaremba et al. 2015)
    inputs = VariableFilter(theano_name_regex=r'.*apply_input.*')(cg.variables)
    cg = apply_dropout(cg, inputs, dropout)
    cost = cg.outputs[0]

# Learning algorithm
step_rules = [RMSProp(learning_rate=learning_rate, decay_rate=decay_rate),
              StepClipping(step_clipping)]
algorithm = GradientDescent(cost=cost, parameters=cg.parameters,
                            step_rule=CompositeRule(step_rules))
Esempio n. 4
0
from model import nn_fprop
from config import config

# Load config parameters
locals().update(config)

# DATA
ix_to_char, char_to_ix, vocab_size = get_metadata(hdf5_file)
train_stream = get_stream(hdf5_file, 'train', batch_size)
dev_stream = get_stream(hdf5_file, 'dev', batch_size)


# MODEL
x = tensor.matrix('features', dtype='uint8')
y = tensor.matrix('targets', dtype='uint8')
y_hat, cost, cells = nn_fprop(x, y, vocab_size, hidden_size, num_layers, model)

# COST
cg = ComputationGraph(cost)

if dropout > 0:
    # Apply dropout only to the non-recurrent inputs (Zaremba et al. 2015)
    inputs = VariableFilter(theano_name_regex=r'.*apply_input.*')(cg.variables)
    cg = apply_dropout(cg, inputs, dropout)
    cost = cg.outputs[0]

# Learning algorithm
step_rules = [RMSProp(learning_rate=learning_rate, decay_rate=decay_rate),
              StepClipping(step_clipping)]
algorithm = GradientDescent(cost=cost, parameters=cg.parameters,
                            step_rule=CompositeRule(step_rules))
Esempio n. 5
0
# MODEL
x = T.TensorType('floatX', [False] * 3)('features')
y = T.tensor3('targets', dtype='floatX')
train_flag = [theano.shared(0)]
x = x.swapaxes(0, 1)
y = y.swapaxes(0, 1)
out_size = len(output_columns) - 1 if cost_mode == 'RL-MDN' else len(
    output_columns)
_, latent_size = load_encoder()
in_size = latent_size + len(input_columns)
# mean = x[:,:,0:latent_size]
# var = T.clip(T.exp(x[:,:,latent_size:latent_size*2]), .0001, 1000)
# rrng = MRG_RandomStreams(seed)
# rand = rrng.normal(var.shape, 0, 1, dtype=theano.config.floatX)
# x  = ifelse(T.lt(train_flag[0], .5), T.concatenate([mean , x[:,:,latent_size*2:]], axis=2) , T.concatenate([mean + (var * rand), x[:,:,latent_size*2:]], axis=2))
y_hat, cost, cells = nn_fprop(x, y, in_size, out_size, hidden_size,
                              num_recurrent_layers, train_flag)

# COST
cg = ComputationGraph(cost)
extra_updates = []

# Learning optimizer
if training_optimizer == 'Adam':
    step_rules = [
        Adam(learning_rate=learning_rate),
        StepClipping(step_clipping)
    ]  # , VariableClipping(threshold=max_norm_threshold)
elif training_optimizer == 'RMSProp':
    step_rules = [
        RMSProp(learning_rate=learning_rate, decay_rate=decay_rate),
        StepClipping(step_clipping)
Esempio n. 6
0
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    args = parser.parse_args()

    print('Loading model from {0}...'.format(save_path[network_mode]))

    x = tensor.tensor3('features', dtype='floatX')
    y = tensor.tensor3('targets', dtype='floatX')
    x = x.swapaxes(0, 1)
    y = y.swapaxes(0, 1)
    in_size = num_features
    out_size = num_features
    y_hat, cost, cells = nn_fprop(x,
                                  y,
                                  in_size,
                                  out_size,
                                  hidden_size[network_mode],
                                  num_layers,
                                  layer_models[network_mode][0],
                                  'MDN',
                                  training=False)
    main_loop = MainLoop(algorithm=None,
                         data_stream=None,
                         model=Model(cost),
                         extensions=[saveload.Load(save_path[network_mode])])

    for extension in main_loop.extensions:
        extension.main_loop = main_loop
    main_loop._run_extensions('before_training')
    bin_model = main_loop.model
    print 'Model loaded. Building prediction function...'
    hiddens = []
Esempio n. 7
0
def load_models(
        models=hierarchy_models,
        in_size=len(hierarchy_input_columns[level_number_in_hierarchy]),
        out_size=len(hierarchy_output_columns[level_number_in_hierarchy]),
        hidden_size=hidden_size,
        num_layers=num_layers,
        model=layer_models[0]):
    predict_funcs = []
    initials = []
    for hierarchy_index in range(len(models)):
        saved_model = models[hierarchy_index]
        print 'Loading model from {0}...'.format(models[hierarchy_index])
        x = tensor.tensor3('features', dtype=theano.config.floatX)
        y = tensor.tensor3('targets', dtype=theano.config.floatX)
        y_hat, cost, cells = nn_fprop(x,
                                      y,
                                      in_size,
                                      out_size,
                                      hidden_size,
                                      num_layers,
                                      model,
                                      training=False)
        main_loop = MainLoop(algorithm=None,
                             data_stream=None,
                             model=Model(cost),
                             extensions=[saveload.Load(saved_model)])
        for extension in main_loop.extensions:
            extension.main_loop = main_loop
        main_loop._run_extensions('before_training')
        bin_model = main_loop.model
        print 'Model loaded. Building prediction function...'
        hiddens = []
        initials.append([])
        for i in range(num_layers - specialized_layer_num):
            brick = [
                b for b in bin_model.get_top_bricks()
                if b.name == layer_models[i] + str(i) + '-' + str(-1)
            ][0]
            hiddens.extend(
                VariableFilter(theano_name=brick.name + '_apply_states')(
                    bin_model.variables))
            hiddens.extend(
                VariableFilter(theano_name=brick.name + '_apply_cells')(cells))
            initials[hierarchy_index].extend(
                VariableFilter(roles=[roles.INITIAL_STATE])(brick.parameters))
        specialized_count = len(game_tasks) if task_specialized else 0
        for task in range(specialized_count):
            for i in range(num_layers - specialized_layer_num, num_layers):
                brick = [
                    b for b in bin_model.get_top_bricks()
                    if b.name == layer_models[i] + str(i) + '-' + str(task)
                ][0]
                hiddens.extend(
                    VariableFilter(theano_name=brick.name + '_apply_states')(
                        bin_model.variables))
                hiddens.extend(
                    VariableFilter(theano_name=brick.name +
                                   '_apply_cells')(cells))
                initials[hierarchy_index].extend(
                    VariableFilter(roles=[roles.INITIAL_STATE])(
                        brick.parameters))
        output_count = len(game_tasks) if task_specialized else 1
        predict_funcs.append([])
        for task in range(output_count):
            predict_funcs[hierarchy_index].append(
                theano.function([x], hiddens + [y_hat[task]]))
    return predict_funcs, initials
Esempio n. 8
0
test_stream = get_stream(hdf5_file, 'test', batch_size)

# MODEL
x = tensor.tensor3('features', dtype='floatX')
y = tensor.tensor3('targets',
                   dtype='uint8' if cost_mode == 'Softmax' else 'floatX')
out_size = len(hierarchy_output_columns[level_number_in_hierarchy])
if use_helper_model:
    in_size = helper_hidden_size * helper_num_layers + len(
        hierarchy_input_columns[level_number_in_hierarchy])
else:
    in_size = len(hierarchy_input_columns[level_number_in_hierarchy])
y_hat, cost, cells = nn_fprop(x,
                              y,
                              in_size,
                              out_size,
                              hidden_size,
                              num_layers,
                              layer_models[0],
                              training=True)

# COST
cg = ComputationGraph(cost)

if dropout > 0:
    # Apply dropout only to the non-recurrent inputs (Zaremba et al. 2015)
    inputs = VariableFilter(theano_name_regex=r'.*apply_input.*')(cg.variables)
    print inputs
    cg = apply_dropout(cg, inputs, dropout)
    cost = cg.outputs[0]

# Learning algorithm
Esempio n. 9
0
                          batch_size[network_mode])
test_stream = get_stream(hdf5_file[network_mode], 'test',
                         batch_size[network_mode])

# MODEL
x = T.tensor3('features', dtype='floatX')
y = T.tensor3('targets', dtype='floatX')
x = x.swapaxes(0, 1)
y = y.swapaxes(0, 1)
in_size = num_features
out_size = num_features
linear_output, cost, cells = nn_fprop(x,
                                      y,
                                      in_size,
                                      out_size,
                                      hidden_size[network_mode],
                                      num_layers,
                                      layer_models[network_mode][0],
                                      'MDN',
                                      training=True)
# COST
cg = ComputationGraph(cost)

if dropout[network_mode] > 0:
    # Apply dropout only to the non-recurrent inputs (Zaremba et al. 2015)
    inputs = VariableFilter(theano_name_regex=r'.*apply_input.*')(cg.variables)
    print inputs
    cg = apply_dropout(cg, inputs, dropout[network_mode])
    cost = cg.outputs[0]

# Learning algorithm