コード例 #1
0
def test(fileName=None):
    # training parameters
    hidden_units = 128
    num_layers = 2
    training_batch_size = 1 #100
    num_val_batches = 1
    model='rnn'
    # data_genrator
    data_generator = DataGenerator(training_batch_size,fileName)
    seq_length = data_generator.seq_length
    input_length = data_generator.encoder.size_x()
    output_length = data_generator.encoder.size_y()
    model_create_time = 0
    model_load_time = 0
    with tf.Session(config=config.get_tf_config()) as sess:
        #print "building model"
        start = time.time()
        model = Seq2SeqModel(session=sess,
                hidden_units=hidden_units,
                model=model,
                num_layers=num_layers,
                seq_length=seq_length,
                input_length=input_length,
                output_length=output_length,
                batch_size=training_batch_size,
                scope="model")
        end = time.time()
        model_create_time = end-start
        model.load('vrep/version1/model.ckpt-967')
        start = time.time()
        model_load_time = start-end
        saver = tf.train.Saver(write_version=saver_pb2.SaverDef.V1)
        save_path = saver.save(sess, "{}/model.ckpt".format("output"), global_step=967)
コード例 #2
0
def train(training_data_version, output_dir):
    # training parameters
    hidden_units = 128
    num_layers = 2
    num_epochs = 1000
    training_batch_size = 20
    num_val_batches = 1
    keep_prob = 1.0
    learning_rate = 0.1
    #model='lstm'
    model = 'rnn'
    # data_genrator

    model_config_file = './output/' + output_dir + "/params.yaml"
    import yaml
    with open(model_config_file, 'r') as stream:
        model_params = yaml.load(stream)
    if 'model' in model_params.keys():
        model = model_params['model']
    if 'hidden_units' in model_params.keys():
        hidden_units = model_params['hidden_units']
    if 'num_layers' in model_params.keys():
        num_layers = model_params['num_layers']
    if 'training_batch_size' in model_params.keys():
        training_batch_size = model_params['training_batch_size']

    data_generator = DataGenerator(training_batch_size, training_data_version)
    print len(data_generator.xseqs)
    print data_generator.seq_length
    print 'data number of batches', data_generator.num_batches
    batches_per_epoch = data_generator.num_batches
    seq_length = data_generator.seq_length
    input_length = data_generator.encoder.size_x()
    output_length = data_generator.encoder.size_y()

    with tf.Session(config=config.get_tf_config()) as sess:
        print "building model"
        model = Seq2SeqModel(session=sess,
                             hidden_units=hidden_units,
                             model=model,
                             num_layers=num_layers,
                             seq_length=seq_length,
                             input_length=input_length,
                             output_length=output_length,
                             keep_prob=keep_prob,
                             learning_rate=learning_rate,
                             batch_size=training_batch_size,
                             scope="model")
        #summary_writer = tf.train.SummaryWriter('output', sess.graph)
        model.init_variables()

        print "finished building model"
        model.fit(data_generator,
                  num_epochs=num_epochs,
                  batches_per_epoch=batches_per_epoch,
                  num_val_batches=num_val_batches,
                  output_dir="output/" + output_dir)
        #model.debug(data_generator)
        print "finished training"
コード例 #3
0
def launch_learning_server(model_name, random_id):
    import rospy
    from learning_ros_server.srv import LearningServer

    server_name = get_learning_server_name(random_id)
    rospy.init_node(server_name)

    global h_to_a_model

    with tf.Session(config=config.get_tf_config()) as sess:
        h_to_a_model = load_model(model_name, sess)
        rospy.Service(server_name, LearningServer,
                      handle_learning_server_request)
        rospy.spin()
コード例 #4
0
def get_learning_model_output(model_name, raw_input, rnn_state, batch_size):
    data_generator = rnn_model.DataGenerator(batch_size, raw_input)
    seq_length = data_generator.seq_length
    print len(data_generator.xseqs)
    print data_generator.seq_length
    print 'data number of batches', data_generator.num_batches
    with tf.Session(config=config.get_tf_config()) as sess:
        h_to_a_model = load_model(model_name, sess, seq_length,
                                  data_generator.batch_size)
        x, y = data_generator.next_batch()
        target = np.argmax(y, axis=2)  #target  = batch size*seq length *1
        probs, outputs, new_rnn_state, image_summary = h_to_a_model.predict_and_return_state(
            x, rnn_state,
            summary=False)  #output = seqlength*batch size* hiddenunits
        return (probs, outputs, new_rnn_state, target, x)
コード例 #5
0
def train(model_name, output_dir, model_input):

    data_generator = rnn_model.DataGenerator(1, model_input)
    num_val_batches = data_generator.num_batches
    print len(data_generator.xseqs)
    print data_generator.seq_length
    print 'data number of batches', data_generator.num_batches
    #num_val_batches = 1
    seq_length = data_generator.seq_length
    #generate training data for two svms
    with tf.Session(config=config.get_tf_config()) as sess:
        h_to_a_model = load_model(model_name, sess, seq_length)
        #summary_writer = tf.summary.FileWriter('output', sess.graph, seq_length)

        correct_prediction_outputs = []
        wrong_prediction_outputs = []
        for _ in xrange(num_val_batches):
            x, y = data_generator.next_batch(
            )  # x/y = batch size*seq length*input_length/output length
            target = np.argmax(y, axis=2)  #target  = batch size*seq length *1
            probs, outputs, rnn_state, image_summary = h_to_a_model.predict_and_return_state(
                x, None,
                summary=False)  #output = seqlength*batch size* hiddenunits
            #summary_writer.add_summary(image_summary)
            prediction = np.argmax(
                probs, axis=2)  # prediction = batch size*seq length * 1
            #print data_generator.xseqs
            #print y
            #print target[0]
            #print prediction[0]
            #print outputs[0:2]
            correct_prediction = target == prediction  #batch size *seq length * 1
            for i in xrange(len(outputs)):
                if (not rnn_model.is_stump(x[0][i])) and (not rnn_model.is_pad(
                        x[0][i])):  #not stump nd pad
                    if correct_prediction[0][i]:
                        correct_prediction_outputs.append(outputs[i][0])
                    else:
                        wrong_prediction_outputs.append(outputs[i][0])

        print 'num correct prediction traces', len(correct_prediction_outputs)
        print 'num wrong prediction traces', len(wrong_prediction_outputs)
        correct_prediction_svm = compute_svm(correct_prediction_outputs,
                                             output_dir, 'correct_prediction')
        #y_correct_predict = correct_prediction_svm.predict(correct_prediction_outputs)
        wrong_prediction_svm = compute_svm(wrong_prediction_outputs,
                                           output_dir, 'wrong_prediction')
コード例 #6
0
def train():
    # training parameters
    hidden_units = 128
    num_layers = 2
    num_epochs = 1000
    training_batch_size = 20
    num_val_batches = 1
    keep_prob = 1.0
    learning_rate = 0.1
    #model='lstm'
    model='rnn'
    # data_genrator
    data_generator = DataGenerator(training_batch_size)
    print len(data_generator.xseqs)
    print data_generator.seq_length
    print 'data number of batches', data_generator.num_batches
    batches_per_epoch = data_generator.num_batches
    seq_length = data_generator.seq_length
    input_length = data_generator.encoder.size_x()
    output_length = data_generator.encoder.size_y()

    with tf.Session(config=config.get_tf_config()) as sess:
        print "building model"
        model = Seq2SeqModel(session=sess,
                hidden_units=hidden_units,
                model=model,
                num_layers=num_layers,
                seq_length=seq_length,
                input_length=input_length,
                output_length=output_length,
                keep_prob=keep_prob,
                learning_rate=learning_rate,
                batch_size=training_batch_size,
                scope="model")
        #summary_writer = tf.train.SummaryWriter('output', sess.graph)
        model.init_variables()

        print "finished building model"
        model.fit(data_generator,
                num_epochs=num_epochs,
                batches_per_epoch=batches_per_epoch,
                num_val_batches=num_val_batches)
        #model.debug(data_generator)
        print "finished training"
コード例 #7
0
def test(fileName=None, model_name=None):
    # training parameters
    hidden_units = 128
    num_layers = 2
    training_batch_size = 1  #100
    num_val_batches = 1
    model = 'rnn'
    # data_genrator
    data_generator = DataGenerator(training_batch_size, fileName)
    seq_length = data_generator.seq_length
    input_length = data_generator.encoder.size_x()
    output_length = data_generator.encoder.size_y()
    model_create_time = 0
    model_load_time = 0
    with tf.Session(config=config.get_tf_config()) as sess:
        #print "building model"
        start = time.time()
        model = Seq2SeqModel(session=sess,
                             hidden_units=hidden_units,
                             model=model,
                             num_layers=num_layers,
                             seq_length=seq_length,
                             input_length=input_length,
                             output_length=output_length,
                             batch_size=training_batch_size,
                             scope="model")
        end = time.time()
        model_create_time = end - start
        #model.load('vrep/version1/model.ckpt-967')
        model.load(model_name)
        start = time.time()
        model_load_time = start - end

        #print "finished building and loading model"
        if fileName is None:
            num_val_batches = data_generator.num_batches
            for _ in xrange(num_val_batches):
                val_set = [data_generator.next_batch(validation=True)]
                start = time.time()
                validate_loss = model.validate(val_set)
                end = time.time()
            print 'validate loss: ', validate_loss, ' time/sequence: {:.5f}'.format(
                (end - start) / num_val_batches / training_batch_size)

        else:
            x, y = data_generator.next_batch(validation=True)
            start = time.time()
            probs = model.predict(x)
            end = time.time()
            #print probs.shape
            probs_without_dummy_actions = [i[:-2] for i in probs[0]]
            prediction = np.argmax([probs_without_dummy_actions], axis=2)
            #prob_prediction = get_prob_prediction(probs_without_dummy_actions)

            #print prediction[0]
            print prediction[0][-2]
            #if prob_prediction == -1:
            #    print prediction[0][-2]
            #else:
            #    print prob_prediction
            print ' '.join(str(p) for p in probs_without_dummy_actions[-2])
            #print prob_prediction
            print prediction[0][-2]
            print prediction[0]

            #print data_generator.seqs
            #print data_generator.xseqs
            print data_generator.yseqs
            print model_name
            print 'model create time : {:.5f}'.format(model_create_time)
            print 'model load time : {:.5f}'.format(model_load_time)
            print ' time to predict: {:.5f}'.format((end - start))

            #print np.argmax(y, axis=2)[0]
        '''
コード例 #8
0
def test(model_name, svm_model_prefix, model_input, action='test'):
    #generate training data for two svms
    data_generator = rnn_model.DataGenerator(1, model_input)
    num_val_batches = data_generator.num_batches
    if action == 'testBatch':
        print len(data_generator.xseqs)
        print data_generator.seq_length
        print 'data number of batches', data_generator.num_batches
    #num_val_batches = 1
    seq_length = data_generator.seq_length
    with tf.Session(config=config.get_tf_config()) as sess:
        h_to_a_model = load_model(model_name, sess, seq_length)
        #summary_writer = tf.summary.FileWriter('output', sess.graph)

        prediction_outputs = []
        num_seen_predictions_correct_svm = 0
        num_unseen_prediction_correct_svm = 0
        num_seen_predictions_wrong_svm = 0
        num_unseen_prediction_wrong_svm = 0

        for _ in xrange(num_val_batches):
            x, y = data_generator.next_batch(
            )  # x/y = batch size*seq length*input_length/output length
            #target = np.argmax(y, axis=2) #target  = batch size*seq length *1
            probs, outputs, image_summary = h_to_a_model.predict_and_return_state(
                x, summary=False)  #output = seqlength*batch size* hiddenunits
            #summary_writer.add_summary(image_summary)
            prediction = np.argmax(
                probs, axis=2)  # prediction = batch size*seq length * 1
            #print data_generator.xseqs
            #print y
            #print target[0]
            #print prediction[0]
            #print outputs[0:2]

            for i in xrange(len(outputs)):
                if (not rnn_model.is_stump(x[0][i])) and (not rnn_model.is_pad(
                        x[0][i])):  #not stump nd pad
                    prediction_outputs.append(outputs[i][0])

        if len(prediction_outputs) == 0:  #Initial stump
            print 1
            print -1
        else:
            correct_prediction_svm = joblib.load(svm_model_prefix +
                                                 'correct_prediction_svm.pkl')
            wrong_prediction_svm = joblib.load(svm_model_prefix +
                                               'wrong_prediction_svm.pkl')
            if action == 'test':
                y_correct_predict = correct_prediction_svm.predict(
                    [prediction_outputs[-1]])
                y_wrong_predict = wrong_prediction_svm.predict(
                    [prediction_outputs[-1]])

                print y_correct_predict[-1]
                print y_wrong_predict[-1]
                #if(y_correct_predict[-1] == 1) and (y_wrong_predict[-1] == -1):
                #    print 1
                #else:
                #    print 0
                print y_correct_predict
                print y_wrong_predict
                #print data_generator.xseqs
            if action == 'testBatch':
                y_correct_predict = correct_prediction_svm.predict(
                    prediction_outputs)
                y_wrong_predict = wrong_prediction_svm.predict(
                    prediction_outputs)
                num_seen_predictions_correct_svm = num_seen_predictions_correct_svm + sum(
                    xx for xx in y_correct_predict if xx == 1)
                num_unseen_prediction_correct_svm = num_unseen_prediction_correct_svm + sum(
                    xx for xx in y_correct_predict if xx == -1)
                num_seen_predictions_wrong_svm = num_seen_predictions_wrong_svm + sum(
                    xx for xx in y_wrong_predict if xx == 1)
                num_unseen_prediction_wrong_svm = num_unseen_prediction_wrong_svm + sum(
                    xx for xx in y_wrong_predict if xx == -1)
        if action == 'test':
            print data_generator.yseqs
            print prediction[0]
        if action == 'testBatch':
            print "Num seen predictions (correct svm, wrong svm):" + repr(
                num_seen_predictions_correct_svm) + "," + repr(
                    num_seen_predictions_wrong_svm)
            print "Num unseen predictions (corect svm, wrong svm):" + repr(
                num_unseen_prediction_correct_svm) + "," + repr(
                    num_unseen_prediction_wrong_svm)
コード例 #9
0
def debug(model_name, model_input):

    hidden_units = 128
    model = 'rnn'
    num_layers = 2
    prob_config = config.get_problem_config(rnn_model.PROBLEM_NAME)
    observation_length = prob_config['input_length']
    action_length = prob_config['output_length']
    encoder = rnn_model.Encoder(action_length, observation_length)
    input_length = encoder.size_x()
    output_length = encoder.size_y()
    batch_size = 3
    seq_length = 1
    #data_generator = rnn_model.DataGenerator(batch_size, model_input)
    #seq_length = data_generator.seq_length
    #print len(data_generator.xseqs)
    #print data_generator.seq_length
    #print 'data number of batches', data_generator.num_batches
    x1 = np.ones((batch_size, seq_length, input_length), dtype=np.int8)
    x2 = 2 * x1
    x = np.concatenate((x1, x2), axis=1)
    print x1.shape
    print x2.shape
    print x.shape

    with tf.Session(config=config.get_tf_config()) as sess:
        #h_to_a_model = load_model(model_name, sess, seq_length, data_generator.batch_size)
        print "Before creating model"
        h_to_a_model_1 = Seq2SeqModelExt(session=sess,
                                         hidden_units=hidden_units,
                                         model=model,
                                         num_layers=num_layers,
                                         seq_length=seq_length,
                                         input_length=input_length,
                                         output_length=output_length,
                                         batch_size=batch_size,
                                         scope="model")
        print "model_rceated"
        h_to_a_model_1.init_variables()
        tf.train.Saver(max_to_keep=0).save(sess, "output/" + model_name)
        #x, y = data_generator.next_batch()
        #target = np.argmax(y, axis=2) #target  = batch size*seq length *1
        #probs, outputs, image_summary = h_to_a_model.predict_and_return_state(x, summary = False) #output = seqlength*batch size* hiddenunits
        #print probs
        state_size, rnn_state_1 = h_to_a_model_1.get_rnn_state_info(
            x1)  #output = seqlength*batch size* hiddenunits
        print np.array(rnn_state_1).shape
        print rnn_state_1
        print "Querying again"
        state_size, rnn_state_2 = h_to_a_model_1.get_rnn_state_info(
            x2, rnn_state_1)
        #print state_size
        print np.array(rnn_state_2).shape
        print rnn_state_2

    #for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model'):
    #    print i

    tf.reset_default_graph()

    #for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model'):
    #    print i

    with tf.Session(config=config.get_tf_config()) as sess:
        #h_to_a_model = load_model(model_name, sess, seq_length, data_generator.batch_size)
        print "Before creating model"
        h_to_a_model_2 = Seq2SeqModelExt(session=sess,
                                         hidden_units=hidden_units,
                                         model=model,
                                         num_layers=num_layers,
                                         seq_length=2 * seq_length,
                                         input_length=input_length,
                                         output_length=output_length,
                                         batch_size=batch_size,
                                         scope="model")
        h_to_a_model_2.load(model_name)
        state_size, rnn_state_3 = h_to_a_model_2.get_rnn_state_info(x)
        print rnn_state_3
        assert np.array_equal(rnn_state_3, rnn_state_2)
コード例 #10
0
def get_learning_model(model_name, problem_name):
    rnn_model.PROBLEM_NAME = problem_name
    sess = tf.Session(config=config.get_tf_config())
    h_to_a_model = load_model(model_name, sess, 1, 1)
    return h_to_a_model