Ejemplo n.º 1
0
def _compare_rnn_layer(rnn_layers,
                       x_train,
                       y_train,
                       epochs=40,
                       batch_size=5,
                       input_shape=(499, 1),
                       memory_length=15,
                       activation='tanh',
                       batch_norm=False,
                       optimizer='adam'):
    """Builds and trains models with different types of RNN layers."""
    histories = []
    names = []

    for rnn_layer in rnn_layers:
        if rnn_layer.lower() not in ['lstm', 'gru', 'simplernn']:
            print('{} is not a valid RNN layer. Only LSTM, GRU, or SimpleRNN'
                  ' RNN layers are acceptable.'.format(rnn_layer))
            raise ValueError

        rnn_model = rnn.RNN(input_shape, memory_length, rnn_layer, activation,
                            batch_norm, optimizer)

        rnn_model_history = rnn_model.train_rnn_model(x_train, y_train,
                                                      batch_size, epochs)

        histories.append(rnn_model_history)
        names.append(rnn_layer)
    return histories, names
Ejemplo n.º 2
0
def test_getitem_and_setitem_work():
    nn = rnn.RNN(n_a=4, n_x=1)
    for i in range(len(nn)):
        nn[i] = float(i)

    for i in range(len(nn)):
        assert nn[i] == float(i)
Ejemplo n.º 3
0
def init_rnn(m):

    X, char_to_ind = dr.read_data()
    K = len(X)
    RNN = rnn.RNN(K, m=m)

    return RNN, X, char_to_ind
Ejemplo n.º 4
0
def _compare_batch_norm(batch_norms,
                        x_train,
                        y_train,
                        epochs=40,
                        batch_size=5,
                        input_shape=(499, 1),
                        memory_length=15,
                        rnn_layer='LSTM',
                        activation='tanh',
                        optimizer='adam'):
    """Builds and trains models with and without Batch Normalization."""
    histories = []
    names = []

    for batch_norm in batch_norms:
        if not isinstance(batch_norm, bool):
            print(
                '{} is not a boolean value. Only input boolean values'.format(
                    batch_norm))
            raise ValueError

        rnn_model = rnn.RNN(input_shape, memory_length, rnn_layer, activation,
                            batch_norm, optimizer)

        rnn_model_history = rnn_model.train_rnn_model(x_train, y_train,
                                                      batch_size, epochs)

        histories.append(rnn_model_history)
        if batch_norm:
            names.append('With Batch Normalization')
        else:
            names.append('Without Batch Normalization')
    return histories, names
Ejemplo n.º 5
0
    def initialize_model(self, use_pretrained_embedding=True):
        """ Get tree data and initialize a model

        #data: a dictionary; key-value example: "train"-(tree_list, ner_list)
        data: a dictionary; key-value example:
            "train"-{"tree_pyramid_list": tree_pyramid_list, "ner_list": ner_list}
        tree_pyramid_list: a list of (tree, pyramid) tuples
        ner_list: a list of dictionaries; key-value example: (3,5)-"PERSON"
        ne_list: a list of distinct string labels, e.g. "PERSON"
        """
        if self.model and self.model.init:
            return

        # Load data and determine dataset related hyperparameters
        config = rnn.Config()

        config.alphabet_size = self.character_length
        config.pos_dimension = self.pos_length
        config.output_dimension = self.entities_length
        config.lexicons = self.lexicon_length
        config.vocabulary_size = len(self.word_list)

        # Initialize a model
        self.model = rnn.RNN(config)
        self.model.sess = tf.Session()
        self.model.sess.run(tf.global_variables_initializer())

        if use_pretrained_embedding: self.load_embedding()
Ejemplo n.º 6
0
    def __init__(self, rng, input_data, dim, n_feature_maps, window_sizes,
                 n_hidden, n_out, h_prev, y_prev):

        self.cnn = cnn.CNN(input_data=input_data,
                           rng=rng,
                           dim=dim,
                           n_feature_maps=n_feature_maps,
                           window_sizes=window_sizes)
        self.rnn = rnn.RNN(input_data=self.cnn.output,
                           rng=rng,
                           n_in=n_feature_maps * len(window_sizes),
                           n_hidden=n_hidden,
                           n_out=n_out,
                           h_prev=h_prev,
                           y_prev=y_prev,
                           activation=T.nnet.sigmoid)

        self.h = self.rnn.h
        self.window_sizes = window_sizes
        self.dim = dim
        self.n_out = n_out
        self.n_hidden = self.rnn.n_hidden
        self.params = self.cnn.params + self.rnn.params
        self.output = self.rnn.output
        self.loss = self.rnn.loss
        self.error = self.rnn.error
        return
Ejemplo n.º 7
0
def new_agent(i):
    #if i == 0:
    #    print("New 0")
    #nets[i] = net.Net(input_sz, hidden_sz, output_sz, genes[i])
    nets[i] = rnn.RNN(input_sz, state_sz, genes[i])
    pos[i] = [random.random(), random.random()]
    set_mass(i, starting_mass)
    alive[i] = True
Ejemplo n.º 8
0
def rnnMethod(area):
    global rnn_model
    if rnn_model is None:
        rnn_model = rnn.RNN()

    ranked_scores = rnn_model.train(False, area)
    ranked_scores = ranked_scores[1:]  #delete the first element
    return ranked_scores
Ejemplo n.º 9
0
 def testBuildRNN_inferenceMode(self):
     batch_size = 10
     seq_len = 5
     dict_size = 20
     model = rnn.RNN(rnn.RNNConfig(),
                     batch_size=batch_size,
                     dict_size=dict_size)
     init_inputs = tf.zeros(shape=(batch_size, ), dtype=tf.int32)
     init_state = model.zero_state(batch_size)
     finished_fn = lambda step, outputs: tf.constant(True,
                                                     shape=(batch_size, ))
     t_outputs, t_state = model(init_inputs, init_state, finished_fn,
                                seq_len)
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         outputs, state = sess.run([t_outputs, t_state])
     self.assertEqual((batch_size, seq_len, dict_size), outputs.shape)
Ejemplo n.º 10
0
def load_data_and_initialize_model(dataset,
                                   split_list=["train", "validate", "test"],
                                   use_pretrained_embedding=True):
    """ Get tree data and initialize a model
    
    #data: a dictionary; key-value example: "train"-(tree_list, ner_list)
    data: a dictionary; key-value example:
        "train"-{"tree_pyramid_list": tree_pyramid_list, "ner_list": ner_list}
    tree_pyramid_list: a list of (tree, pyramid) tuples
    ner_list: a list of dictionaries; key-value example: (3,5)-"PERSON"
    ne_list: a list of distinct string labels, e.g. "PERSON"
    """
    # Select the implementation of loading data according to dataset
    if dataset == "ontonotes":
        import ontonotes as data_utils
    elif dataset == "ontochinese":
        import ontochinese as data_utils
    elif dataset == "conll2003":
        import conll2003 as data_utils
    elif dataset == "conll2003dep":
        import conll2003dep as data_utils

    # Load data and determine dataset related hyperparameters
    config = rnn.Config()
    (data, word_list, ne_list, config.alphabet_size, config.pos_dimension,
     config.output_dimension,
     config.lexicons) = data_utils.read_dataset(split_list)
    config.vocabulary_size = len(word_list)

    # Initialize a model
    model = rnn.RNN(config)
    """
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    model.sess = tf.Session(config=tf_config)
    """
    model.sess = tf.Session()
    model.sess.run(tf.global_variables_initializer())
    if use_pretrained_embedding: load_embedding(model, word_list, dataset)
    return data, ne_list, model
Ejemplo n.º 11
0
def main():
    dataset_filepath = '../../data/datasets/ngsim_feature_trajectories.h5'
    binedges = [10,15,25,50]
    max_len = 100
    data = utils.load_ngsim_trajectory_data(
        dataset_filepath,
        binedges=binedges,
        max_len=max_len,
        max_samples=None,
        train_ratio=.9,
        target_keys=['lidar_10']
    )

    exp_dir = '../../data/experiments/imputation'
    utils.maybe_mkdir(exp_dir)

    model = rnn.RNN(
        name='supervised_imputation',
        input_dim=data['train_x'].shape[2],
        hidden_dim=256,
        max_len=max_len,
        output_dim=len(binedges),
        batch_size=500,
        learning_rate=.0005,
        dropout_keep_prob=.75
    )
    writer = tf.summary.FileWriter(os.path.join(exp_dir, 'train'))
    val_writer = tf.summary.FileWriter(os.path.join(exp_dir, 'val'))

    utils.write_baseline_summary(data['train_lengths'], data['train_y'], writer)
    utils.write_baseline_summary(data['val_lengths'], data['val_y'], val_writer)
    
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        model.train(
            data, 
            n_epochs=1000,
            writer=writer,
            val_writer=val_writer
        )
Ejemplo n.º 12
0
def lambda_handler(event, context):
    language = event['language']
    start_letters = event['start_letters']

    model_parameter_object_key = event['model_parameter_object_key']  # example : rnn_params.pkl
    model_object_key = event['model_object_key']  # example : rnn_model.pth
    model_bucket = event['model_bucket']

    # Load pre-processing parameters
    # Check if model parameters are available
    parameter_path = tmp + model_parameter_object_key
    if not os.path.isfile(parameter_path):
        s3_client.download_file(model_bucket, model_parameter_object_key, parameter_path)

    with open(parameter_path, 'rb') as pkl:
        params = pickle.load(pkl)

    all_categories = params['all_categories']
    n_categories = params['n_categories']
    all_letters = params['all_letters']
    n_letters = params['n_letters']

    # Check if models are available
    # Download model from S3 if model is not already present
    model_path = tmp + model_object_key
    if not os.path.isfile(model_path):
        s3_client.download_file(model_bucket, model_object_key, model_path)

    rnn_model = rnn.RNN(n_letters, 128, n_letters, all_categories, n_categories, all_letters, n_letters)
    rnn_model.load_state_dict(torch.load(model_path))
    rnn_model.eval()

    start = time()
    output_names = list(rnn_model.samples(language, start_letters))
    latency = time() - start

    return {'latency': latency, 'predict': output_names}
Ejemplo n.º 13
0
def testReal():
    tgt_path = './train/'
    test_cls_name = 'T'
    trandir = tgt_path + test_cls_name + '/'
    testdir = tgt_path + test_cls_name + '/'

    n1 = rf.RNN(trandir, 'REAL')
    n2 = lgl.FFNN(trandir)

    filecount = 0
    for filename in os.listdir(testdir):
        filecount += 1

    signals = np.zeros((filecount, cfg.INPUT_SIZE), np.int)
    index = 0
    for filename in os.listdir(testdir):
        signal = np.fromfile(testdir + filename, dtype=np.int)
        #if  len(signal) != cfg.INPUT_SIZE:
        #    signal = utl.resizeDim(signal,cfg.DIM)
        signal = signal.reshape((cfg.INPUT_SIZE, ))
        signals[index, :] = signal[:]
        index += 1

    refined = n1.test(signals)
    for i in range(filecount):
        inp = cv2.resize(
            utl.arr2img(signals[i].reshape((cfg.DIM, cfg.FEATURE_SIZE))),
            (200, 200))
        out = cv2.resize(
            utl.arr2img(refined[i].reshape((cfg.DIM, cfg.FEATURE_SIZE))),
            (200, 200))
        cv2.imshow('in' + str(i), inp)
        cv2.imshow('out' + str(i), out)
    cv2.waitKey(100000)
    final = n2.getresult(refined)
    print(final)
def main():
    
    # loading data
    X, y = DataLoader.load_data()
    text, X_test, y_test = DataLoader.load_test_data()

    # intializing network
    network = rnn.RNN()

    network.fit(X, y, ephocs=3000)

    # sample predictions
    label_names = ['ham', 'spam']
    tag = np.random.randint(0,10)
    print('\n\n[main]: Test prediction:',
          '\nSMS: ',text[tag] ,
          '\n\nactual: ',label_names[np.argmax(y_test[tag])],
          '\nprediction: ',label_names[np.argmax(network.predict(X_test[tag]))])
    
    tag = np.random.randint(0,10)
    print('\n\n[main]: Test prediction:',
          '\nSMS: ',text[tag] ,
          '\n\nactual: ',label_names[np.argmax(y_test[tag])],
          '\nprediction: ',label_names[np.argmax(network.predict(X_test[tag]))])
Ejemplo n.º 15
0
def fs():
    input_length = 100
    hidden_cnt = 50
    data = get_test_data(input_length)

    rnn_nn = nn.NeuralNetwork(nn=rnn.RNN(input_length, hidden_cnt,
                                         data.x.shape[2], data.y.shape[1]),
                              validation_split=0.2,
                              batch_size=256,
                              nb_epoch=10,
                              show_accuracy=True)

    features, results = rnn_nn.feature_selection(data)
    print("Selected features: {0}".format(features))
    print(results)

    feature_selection = {
        "features": features,
        "results": results,
        "count": data.x.shape[2]
    }
    output = open('../../results/RNN_features', 'wb')
    pickle.dump(feature_selection, output)
    output.close()
Ejemplo n.º 16
0
def _compare_optimizer(optimizers,
                       x_train,
                       y_train,
                       epochs=40,
                       batch_size=5,
                       input_shape=(499, 1),
                       memory_length=15,
                       rnn_layer='LSTM',
                       activation='tanh',
                       batch_norm=False):
    """Builds and trains models with varying optimizer values."""
    histories = []
    names = []

    for optimizer in optimizers:
        rnn_model = rnn.RNN(input_shape, memory_length, rnn_layer, activation,
                            batch_norm, optimizer)

        rnn_model_history = rnn_model.train_rnn_model(x_train, y_train,
                                                      batch_size, epochs)

        histories.append(rnn_model_history)
        names.append('{}'.format(optimizer))
    return histories, names
Ejemplo n.º 17
0
def _compare_memory_length(memory_lengths,
                           x_train,
                           y_train,
                           epochs=40,
                           batch_size=5,
                           input_shape=(499, 1),
                           rnn_layer='LSTM',
                           activation='tanh',
                           batch_norm=False,
                           optimizer='adam'):
    """Builds and trains models with varying memory length values."""
    histories = []
    names = []

    for memory_length in memory_lengths:
        rnn_model = rnn.RNN(input_shape, memory_length, rnn_layer, activation,
                            batch_norm, optimizer)

        rnn_model_history = rnn_model.train_rnn_model(x_train, y_train,
                                                      batch_size, epochs)

        histories.append(rnn_model_history)
        names.append('Memory Length: {}'.format(memory_length))
    return histories, names
Ejemplo n.º 18
0
def main(print=print):
    np.random.seed(1)

    nn = rnn.RNN(n_a=2, n_x=1)

    print(f"Training a RNN with {nn.size} parameters on a stupid example set.\n")

    avg_loss = 0.0
    avg_acc = 0.0

    for i in range(10000):
        inputs, outputs = create_stupid_example(9, m=16)
        loss, acc = nn.calculate_loss_and_accuracy(inputs, outputs)
        avg_loss = 0.5 * avg_loss + 0.5 * loss
        avg_acc = 0.5 * avg_acc + 0.5 * acc
        if i % 50 == 0:
            print(f"At iteration {i}, average accuracy is {int(avg_acc * 100)}% (loss is {avg_loss}).")
            if avg_acc > 0.99:
                break
        grad = nn.calculate_gradient_very_slowly(inputs, outputs)
        nn.learn_very_slowly(grad)

    print("\nDone training!\n")

    inputs, outputs = create_stupid_example(10)

    print(f"Example input  : {inputs.ravel()}")
    print(f"Expected output: {outputs.ravel()}\n")
    print(f"Actual output:")

    pred = [pred_y for pred_y in nn.forward_prop_seq(inputs)]

    for i in range(len(outputs)):
        print(f"  At index {i}, we expected {outputs[i][0][0]} and predicted {pred[i][0][0]}.")

    return (avg_loss, avg_acc)
Ejemplo n.º 19
0
def train_rnn(dataset, net_settings, train_optimizer=tf.train.AdamOptimizer):
    batch_size = FLAGS.batch_size
    num_hidden_last = net_settings[-1]['num_hidden']

    input_placeholder = tf.placeholder(tf.float32,
                                       shape=[batch_size, net_settings[0]['dim_size']],
                                       name="input_placeholder")
    labels_placeholder = tf.placeholder(tf.int64, shape=[batch_size], name="labels_placeholder")
    optimizer = train_optimizer(FLAGS.learning_rate)

    rnn_model = rnn.RNN(net_settings)

    with tf.name_scope("LSTM"):
        net = rnn_model.fit_layers(input_placeholder)

    with tf.variable_scope("Dense1"):
        dense = tf.reshape(net, [batch_size, -1])
        weights = tf.get_variable(name="weights", shape=[num_hidden_last, FLAGS.num_classes],
                                  initializer=tf.truncated_normal_initializer())
        bias = tf.get_variable(name="bias", shape=[FLAGS.num_classes],
                               initializer=tf.truncated_normal_initializer())

        logits = tf.matmul(dense, weights) + bias

    loss = compute_loss(logits=logits, labels=labels_placeholder)
    train_op = optimizer.minimize(loss)

    accuracy = compute_accuracy(logits=logits, labels=labels_placeholder)

    saver = tf.train.Saver(tf.trainable_variables())
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        current_exec = str(time.time())
        train_dir = FLAGS.checkpoint_path
        model_save_dir = os.path.join(train_dir, current_exec)

        if not os.path.exists(model_save_dir):
            os.makedirs(model_save_dir)

        model_filename = os.path.join(model_save_dir, "lstm_no_spark.model")

        with open(os.path.join(model_save_dir, "params_settings"), "w+") as f:
            f.write(params_str)

        if os.path.isfile(model_filename) and FLAGS.use_pretrained_model:
            saver.restore(sess, model_filename)

        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(os.path.join(model_save_dir, "train"), sess.graph)
        # test_writer = tf.summary.FileWriter(os.path.join(model_save_dir, "test"), sess.graph)

        train_x, train_y = process_batch(dataset)
        # val_x, val_y = process_batch(dataset[1])

        train_batches = next_batch(train_x, train_y, batch_size=FLAGS.batch_size)
        # val_batches = next_batch(val_x, val_y, batch_size=FLAGS.batch_size)

        batch_size = FLAGS.batch_size if FLAGS.batch_size else 1
        max_steps = FLAGS.epochs * batch_size
        total_steps = trange(max_steps)

        start = time.time()
        t_acc, v_acc, t_loss, v_loss = 0., 0., 0., 0.
        for step in total_steps:
            train_input, train_labels = next(train_batches)
            print(train_input, type(train_input))
            _, t_loss = sess.run([train_op, loss], feed_dict={
                input_placeholder: train_input,
                labels_placeholder: train_labels
            })

            t_loss = np.mean(t_loss)
            total_steps.set_description('Loss: {:.4f} - t_acc {:.3f}'
                                        .format(t_loss, t_acc))

            if step % FLAGS.evaluate_every == 0 or (step + 1) == max_steps:
                saver.save(sess, os.path.join(model_save_dir, 'lstm_no_spark'), global_step=step)

                summary, t_loss, t_acc = sess.run([merged, loss, accuracy], feed_dict={
                    input_placeholder: train_input,
                    labels_placeholder: train_labels
                })
                train_writer.add_summary(summary, step)
                # t_loss = np.mean(t_loss)

                # val_input, val_labels = val_batches.next()
                # summary, v_loss, v_acc = sess.run([merged, loss, accuracy], feed_dict={
                #     input_placeholder: val_input,
                #     labels_placeholder: val_labels
                # })
                # test_writer.add_summary(summary, step)
                # v_loss = np.mean(v_loss)

                total_steps.set_description('Loss: {:.4f} - t_acc {:.3f}'
                                            .format(t_loss, t_acc))

    print('RNN-LSTM - Time: {}'.format(time.time() - start))
    return []
Ejemplo n.º 20
0
def main(videoPath):
    global t_clip
    global t_feature2rnn

    # global t_pred_vec
    global t_cnn_out_avg
    global t_pred_avg
    global t_rnn_pred
    global cnn_model
    global rnn_model

    # build graph
    cnn_graph = tf.Graph()
    with cnn_graph.as_default():
        t_clip = tf.placeholder(tf.float32, [None, 250, 250, 3],
                                name='clip_input')
        with tf.name_scope('CNN'):
            cnn_model = cnn.CNN(data_format='NHWC')
            cnn_out = cnn_model(t_clip)  # (batch, 2)
            print(cnn_out.shape)
            # pred_vec = tf.argmax(cnn_out, axis=1, output_type=tf.int32) # (batch,)
            # t_cnn_out_avg = tf.reduce_sum(cnn_out, axis=0) # (classes, ) batch维度上累加
            t_cnn_out_avg = tf.reduce_mean(cnn_out,
                                           axis=0)  # (classes, ) batch维度上均值
            # print(cnn_out_avg.shape)
            t_pred_avg = tf.nn.softmax(t_cnn_out_avg)
            # print(t_pred_avg.shape) #(2,)
            # print(t_pred_avg.dtype) <class 'float'>
            # return
            # t_pred_vec = tf.argmax(cnn_out, axis=1, output_type=tf.float32) # (batch,)
            # t_pred_avg = tf.reduce_mean(t_pred_vec)

    rnn_graph = tf.Graph()
    with rnn_graph.as_default():
        t_feature2rnn = tf.placeholder(tf.float32, [1, None, 128],
                                       name='rnn_input')
        with tf.name_scope('RNN'):
            rnn_model = rnn.RNN('GRU', n_hidden=10, n_classes=2)
            logits = rnn_model(t_feature2rnn, batch_sz=1)
            t_rnn_pred = tf.nn.softmax(logits)  # 可信度 (1, 2)
            # 0:a, 1:1-b, 2:1-a-b

    # create sess & restore param
    sess_conf = tf.ConfigProto()
    sess_conf.gpu_options.allow_growth = True
    # sess_conf.gpu_options.per_process_gpu_memory_fraction = 0.75
    cnn_sess = tf.Session(graph=cnn_graph, config=sess_conf)
    rnn_sess = tf.Session(graph=rnn_graph, config=sess_conf)

    with cnn_graph.as_default():
        saver = tf.train.Saver()
        saver.restore(cnn_sess, cnn_ckpt)
    with rnn_graph.as_default():
        saver = tf.train.Saver()
        saver.restore(rnn_sess, rnn_ckpt)
    # saver.restore(cnn_sess, cnn_ckpt)
    # saver.restore(rnn_sess, rnn_ckpt)

    # work on video
    videoPath = r'D:\Lab408\cnn_rnn\src_dir\1.mp4'
    # videoPath = r'D:\Lab408\cnn_rnn\src_dir\2.mp4'
    # videoPath = r'D:\Lab408\cnn_rnn\src_dir\3.mp4'
    # videoPath = r'D:\Lab408\cnn_rnn\src_dir\4.mp4'
    # videoPath = r'D:\Lab408\cnn_rnn\src_dir\5.mp4'
    # videoPath = r'D:\Lab408\cnn_rnn\src_dir\fire-smoke-small(13).avi'
    # videoPath = r'D:\Lab408\cnn_rnn\src_dir\NIST Re-creation of The Station Night Club fire   without sprinklers (1).mp4'
    handelVideo(cnn_sess, rnn_sess, videoPath, videoPath + '.log')

    cnn_sess.close()
    rnn_sess.close()
    return
Ejemplo n.º 21
0
    def create_agents(self):
        """ create agents, may use parameter sharing
        when self.training = False, this function will load the saved models
        """
        input_shape = self.env.observation_space
        output_shape_NO = self.env.action_space_NO
        output_shape_VNO = self.env.action_space_VNO

        if self.training:
            # ! create the first unique agent
            # create NO agent
            if self.dueling:
                model = rnn.DuelingRNN(input_shape, output_shape_NO)
                target_model = rnn.DuelingRNN(input_shape, output_shape_NO)
            else:
                model = rnn.RNN(input_shape, output_shape_NO)
                target_model = rnn.RNN(input_shape, output_shape_NO)
            target_model.load_state_dict(model.state_dict())
            self.agent_NO = SingleNoAgent.SingleNoAgent(
                model,
                target_model,
                self.env.action_space_NO,
                self.batch_size,
                self.learning_rate,
                self.gamma,
                self.epsilon,
                self.hysteretic,
                training=self.training)

            # create VNO agent
            if self.dueling:
                model = rnn.DuelingRNN(input_shape, output_shape_VNO)
                target_model = rnn.DuelingRNN(input_shape, output_shape_VNO)
            else:
                model = rnn.RNN(input_shape, output_shape_VNO)
                target_model = rnn.RNN(input_shape, output_shape_VNO)
            target_model.load_state_dict(model.state_dict())
            self.agent_VNO = SingleVnoAgent.SingleVnoAgent(
                model,
                target_model,
                self.env.action_space_VNO,
                self.batch_size,
                self.learning_rate,
                self.gamma,
                self.epsilon,
                self.hysteretic,
                training=self.training)
        else:
            # ! load the saved (trained) models
            # load NO agent
            if self.dueling:
                model = rnn.DuelingRNN(input_shape, output_shape_NO)
            else:
                model = rnn.RNN(input_shape, output_shape_NO)
            target_model = None
            path = 'individual_model/agent_{}/model'.format(0)
            path = os.path.join(self.model_path, path)
            model.load_state_dict(torch.load(path))
            model.eval()
            self.agent_NO = SingleNoAgent.SingleNoAgent(
                model,
                target_model,
                self.env.action_space_NO,
                self.batch_size,
                self.learning_rate,
                self.gamma,
                self.epsilon,
                self.hysteretic,
                training=self.training)

            # load VNO agent
            if self.dueling:
                model = rnn.DuelingRNN(input_shape, output_shape_VNO)
            else:
                model = rnn.RNN(input_shape, output_shape_VNO)
            target_model = None
            path = 'individual_model/agent_{}/model'.format(1)
            path = os.path.join(self.model_path, path)
            model.load_state_dict(torch.load(path))
            model.eval()
            self.agent_VNO = SingleVnoAgent.SingleVnoAgent(
                model,
                target_model,
                self.env.action_space_VNO,
                self.batch_size,
                self.learning_rate,
                self.gamma,
                self.epsilon,
                self.hysteretic,
                training=self.training)
Ejemplo n.º 22
0
import wawProcessor as ww
import rnn as r

import os
import tensorflow as tf
from tensorflow.python.keras.optimizers import Adadelta

processor = ww.Processor()
RNN = r.RNN()
model = RNN.getRNN()

totoOriginal = processor.openWave(
    "C:\\Users\\NKF786\\PycharmProjects\\musicEncoding\\originals" + os.sep + "africa-toto-8bit.wav",
    'rb')

totoCover = processor.openWave(
    "C:\\Users\\NKF786\\PycharmProjects\\musicEncoding\\covers" + os.sep + "toto-metal-cover-cut-8bit.wav",
    'rb')

x_train, y_train = processor.getRNNTrainSequences(totoOriginal, totoCover, 50)
# print(x_train.shape)
# print(y_train.shape)
originalSequences = x_train

x_train, y_train, x_test, y_test = processor.getValidationSet(x_train, y_train)

RNN.fitRNN(model, x_train, y_train, (x_test, y_test))
processor.writeRNNCover(originalSequences, model)

totoOriginal.close()
totoCover.close()
Ejemplo n.º 23
0
#                                    optimizer_title)

# Based on the above comparisons, an optimal RNN would have
epochs = 40
batch_size = 3

memory_length = 20
input_shape = (499, 1)
rnn_layer = 'GRU'
activation = 'tanh'
batch_norm = False
optimizer = 'nadam'

# Load the training and testing data
(x_train, y_train), (x_test,
                     y_test) = load_rnn_data.load_data('data.npy',
                                                       normalize_data=True,
                                                       train_split=.66)

# Build and train the model
test_rnn = rnn.RNN(input_shape, memory_length, rnn_layer, activation,
                   batch_norm, optimizer)

rnn_model_history = test_rnn.train_rnn_model(x_train, y_train, batch_size,
                                             epochs)

# Evaluate the final model
test_mse = test_rnn.evaluate_rnn_model(x_test, y_test, batch_size)

print('Test MSE: {}'.format(test_mse))
Ejemplo n.º 24
0
        model = gan_rnn.GAN_RNN(g_input_step, g_input_size, g_hidden_size,
                                g_output_step, g_batch_size, g_rate, g_epochs,
                                d_input_step, d_input_size, d_hidden_size,
                                d_batch_size, d_rate, d_epochs, num_epochs,
                                print_interval, num_epochs_test,
                                args.attention, args.wgan, args.w_clip, gumbel,
                                data_file)
    else:
        if args.feature == 0:
            model = gan_rnn_gcn.GAN_RNN_GCN(
                g_input_step, g_input_size, g_hidden_size, g_output_step,
                g_batch_size, g_rate, g_epochs, d_input_step, d_input_size,
                d_hidden_size, d_batch_size, d_rate, d_epochs, num_epochs,
                print_interval, num_epochs_test, args.attention, args.wgan,
                args.w_clip, num_support, gumbel, graph_file, data_file)
        else:
            model = gan_rnn_gcn_feature.GAN_RNN_GCN_Feature(
                g_input_step, g_input_size, g_hidden_size, g_output_step,
                g_batch_size, g_rate, g_epochs, d_input_step, d_input_size,
                d_hidden_size, d_batch_size, d_rate, d_epochs, num_epochs,
                print_interval, num_epochs_test, args.attention, args.wgan,
                args.w_clip, num_support, gumbel, graph_file, data_file)

    if args.baseline == 1:
        model = rnn.RNN(g_input_step, g_input_size, g_hidden_size,
                        g_output_step, g_batch_size, g_rate, num_epochs,
                        print_interval, num_epochs_test, args.attention,
                        data_file)
    model.build_model()
    model.train()
Ejemplo n.º 25
0
Archivo: model.py Proyecto: Avmb/lrn
    def __init__(self, rnn_type, ntoken, ninp, nhid, nhidlast, nlayers, 
                 dropout=0.5, dropouth=0.5, dropouti=0.5, dropoute=0.1, wdrop=0, 
                 tie_weights=False, ldropout=0.5, n_experts=10, ndistilstudents=0, 
                 unigram_prob_on_zero=False, unigram_frequencies=None,
                 rnd_models=None):
        super(RNNModel, self).__init__()
        self.ndistilstudents=ndistilstudents
        self.unigram_prob_on_zero=unigram_prob_on_zero
        self.unigram_frequencies=unigram_frequencies
        self.rnd_models=rnd_models
        self.use_dropout = True
        self.lockdrop = LockedDropout()
        self.encoder = nn.Embedding(ntoken, ninp)
        
        rnn_type = rnn_type.lower()
        self.rnns = [rnn.RNN(rnn_type, ninp if l == 0 else nhid, nhid if l != nlayers - 1 else nhidlast, 1, dropout=0, n_students=ndistilstudents) for l in range(nlayers)]
        if wdrop:
            self.rnns = [WeightDrop(rnn, ['_W', '_U'], dropout=wdrop if self.use_dropout else 0) for rnn in self.rnns]
        self.rnns = torch.nn.ModuleList(self.rnns)
        
        self.prior = nn.Linear(nhidlast, n_experts, bias=False)
        latent_linear = nn.Linear(nhidlast, n_experts*ninp, bias=not unigram_prob_on_zero)
        #latent_linear = nn.Linear(nhidlast, n_experts*ninp, bias=True)
        self.latent = nn.Sequential(latent_linear, nn.Tanh())
        self.decoder = nn.Linear(ninp, ntoken)
        self.decoder.bias.data[:] = torch.zeros_like(self.decoder.bias.data)
        self.decoder_gain = nn.Parameter(torch.ones(ninp), requires_grad=False)
        #self.decoder_gain = nn.Parameter(torch.scalar_tensor(1.0), requires_grad=True)
        #self.decoder_gain = nn.Parameter(torch.zeros(ninp))

        # Optionally tie weights as in:
        # "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
        # https://arxiv.org/abs/1608.05859
        # and
        # "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
        # https://arxiv.org/abs/1611.01462
        if tie_weights:
            #if nhid != ninp:
            #    raise ValueError('When using the tied flag, nhid must be equal to emsize')
            self.decoder.weight = self.encoder.weight

        if unigram_prob_on_zero:
            self.decoder.bias.requires_grad=False
            pass
            
        self.init_weights()

        self.rnn_type = rnn_type
        self.ninp = ninp
        self.nhid = nhid
        self.nhidlast = nhidlast
        self.nlayers = nlayers
        self.dropout = dropout
        self.dropouti = dropouti
        self.dropouth = dropouth
        self.dropoute = dropoute
        self.ldropout = ldropout
        self.dropoutl = ldropout
        self.n_experts = n_experts
        self.ntoken = ntoken

        size = 0
        for p in self.parameters():
            size += p.nelement()
        print('param size: {}'.format(size))
Ejemplo n.º 26
0
taxonomy_infile = "/dev/shm/b/wiki_taxonomy_lemmatized.pkl" #pickle 2
parent_taxonomy_infile = "data/parent_taxonomy.pkl"
vector_model_infile = "/dev/shm/a/wiki_text_20161201_1to4_200d.model"
mag_infile = "/dev/shm/b/mag2.pkl"
ccs_infile = "/dev/shm/b/acm_ccs.pkl"
"""

taxonomy_infile = "/dev/shm/a/wiki_taxonomy_lemmatized.pkl"  #pickle 3
parent_taxonomy_infile = "data/parent_taxonomy.pkl"
vector_model_infile = "/dev/shm/a/wiki_text_20161201_1to4_200d.model"
mag_infile = "/dev/shm/a/mag2.pkl"
ccs_infile = "/dev/shm/a/acm_ccs.pkl"
mlp_infile = "data/mlp_model.h5"
expert_filter_infile = "data/expert_annotation.json"

#mag_fos_infile = "data/fos_levelname.csv"

taxonomy = load_taxonomy(taxonomy_infile)
taxonomy = preprocessTaxonomy(taxonomy)
w2v_model = load_vector_model(vector_model_infile)
mag = get_data_from_pickle(mag_infile)
ccs = get_data_from_pickle(ccs_infile)
parent_taxonomy = load_parent_taxonomy(parent_taxonomy_infile)

expert_filter = load_json(expert_filter_infile)

if os.path.exists(mlp_infile):
    mlp_model = load_model(mlp_infile)

rnn_model = rnn.RNN()
Ejemplo n.º 27
0
def print_emissions(net, fname, i2voc):
  o = open("Emissions.{}.txt".format(fname),'w')
  e_list = net.emissions_list()
  for i in range(net.hidden_dim):
    listed = [(float(e_list[j][i]), str(i2voc[j])) for j in range(net.vocab_size)]
    listed.sort()
    listed.reverse()
    o.write("\n%d\n" % i)
    for prob, word in listed[:50]:
      o.write("   {:10.8f}  {:10s}\n".format(100*prob, str(word)))
  o.close()

vocab_size = len(corpus.dict)
print("Vocab size: %d" % vocab_size)

net = rnn.RNN(vocab_size, args).to(device)
net.corpus = corpus  # HACK

def repackage_hidden(h):
  """Wraps hidden states in new Tensors, to detach them from their history."""
  if isinstance(h, torch.Tensor):
    return h.detach()
  else:
    return tuple(repackage_hidden(v) for v in h)

def get_batch(source, i, tag_source=None):
  seq_len = min(args.max_len, source.size()[1] - 1 - i)
  data = source[:,i:i+seq_len+1]
  if tag_source is not None:
    tags = tag_source[:,i:i+seq_len+1]
    return data, tags
Ejemplo n.º 28
0
            lines, kmer, 1, rnn_model_name)
        vocab_len = len(indexed_word) + 1

        # get feature and labels
        features, labels = tk.feature_label_extractor(tokenized_sequence,
                                                      vocab_len)

        # split training and test sets
        split = int(0.8 * len(features))
        x_train = features[:split]
        x_test = features[split:]
        y_train = labels[:split]
        y_test = labels[split:]

        # initialize the models
        rnn_model = rnn.RNN(vocab_len, 1, rnn_model_name)

        # train and save outputs
        rnn_history = rnn_model.train(x_train,
                                      y_train,
                                      x_test,
                                      y_test,
                                      e=epochs)
        pp.save_and_plot(rnn_history, rnn_model_name)
        print('Trained RNN saved, outputs generated and saved.')

        # CNN
        print('Training CNN')
        cnn_model_name = str(kmer) + '_kmer_CNN'
        # shuffle the data
        random.shuffle(lines)
Ejemplo n.º 29
0
    def __init__(self,
                 rnn_type,
                 ntoken,
                 ninp,
                 nhid,
                 nhidlast,
                 nlayers,
                 dropout=0.5,
                 dropouth=0.5,
                 dropouti=0.5,
                 dropoute=0.1,
                 wdrop=0,
                 tie_weights=False,
                 ldropout=0.5,
                 n_experts=10):
        super(RNNModel, self).__init__()
        self.use_dropout = True
        self.lockdrop = LockedDropout()
        self.encoder = nn.Embedding(ntoken, ninp)

        self.rnns = [
            rnn.RNN(rnn_type,
                    ninp if l == 0 else nhid,
                    nhid if l != nlayers - 1 else nhidlast,
                    1,
                    dropout=0) for l in range(nlayers)
        ]
        if wdrop:
            self.rnns = [
                WeightDrop(rnn, ['_W', '_U'],
                           dropout=wdrop if self.use_dropout else 0)
                for rnn in self.rnns
            ]
        self.rnns = torch.nn.ModuleList(self.rnns)

        self.prior = nn.Linear(nhidlast, n_experts, bias=False)
        self.latent = nn.Sequential(nn.Linear(nhidlast, n_experts * ninp),
                                    nn.Tanh())
        self.decoder = nn.Linear(ninp, ntoken)

        # Optionally tie weights as in:
        # "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
        # https://arxiv.org/abs/1608.05859
        # and
        # "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
        # https://arxiv.org/abs/1611.01462
        if tie_weights:
            #if nhid != ninp:
            #    raise ValueError('When using the tied flag, nhid must be equal to emsize')
            self.decoder.weight = self.encoder.weight

        self.init_weights()

        self.rnn_type = rnn_type
        self.ninp = ninp
        self.nhid = nhid
        self.nhidlast = nhidlast
        self.nlayers = nlayers
        self.dropout = dropout
        self.dropouti = dropouti
        self.dropouth = dropouth
        self.dropoute = dropoute
        self.ldropout = ldropout
        self.dropoutl = ldropout
        self.n_experts = n_experts
        self.ntoken = ntoken

        size = 0
        for p in self.parameters():
            size += p.nelement()
        print('param size: {}'.format(size))
Ejemplo n.º 30
0
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# create mapping of characters to integers (0-25) and the reverse
char_to_int = dict((c, i) for i, c in enumerate(alphabet))
int_to_char = dict((i, c) for i, c in enumerate(alphabet))

# prepare the dataset of input to output pairs encoded as integers
seq_length = 1
dataX = []
dataY = []
for i in range(0, len(alphabet) - seq_length, 1):
    seq_in = alphabet[i:i + seq_length]
    seq_out = alphabet[i + seq_length]
    dataX.append([char_to_int[char] for char in seq_in])
    dataY.append(char_to_int[seq_out])
    print(seq_in, '->', seq_out)

# reshape X to be [samples, time steps, features]
X = np.reshape(dataX, (len(dataX), seq_length, 1))
# normalize
X = X / float(len(alphabet))
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
print(dataX)
print(y)

vocabulary_size = len(char_to_int)

model = rnn.RNN(vocabulary_size)
o, s = model.forward(X[10])
print(o.shape)
print(o)