Exemplo n.º 1
0
    def __init__(self,
                 sess,
                 width=256,
                 height=256,
                 channels=3,
                 action_dim=9,
                 learning_rate=0.0001,
                 model_name=None,
                 graph=None,
                 export=False):
        self.sess = sess
        self.width = width
        self.height = height
        self.channels = channels
        self.action_dim = action_dim
        self.learning_rate = learning_rate
        self.rgb, self.conf, self.vel, = self.create_network('visuonet')

        if not export:
            self.velocities = tf.placeholder(tf.float32, [None, 9])
            self.loss = tf.reduce_mean(
                tf.square(self.velocities - self.vel)
            )  #+ 0.001 * tf.reduce_mean(tf.square(self.gripper - self.vel[:,6]))
            self.optimizer = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.loss)

            self.model = model(sess, MODELS_DIR, model_name=model_name)
            self.summary = self.model.summary(graph=graph,
                                              **dict(loss=self.loss))
            self.eval_indice = len(
                get_folders(pathname(RES_PATH, **dict(flag=1))))
Exemplo n.º 2
0
    def __init__(self,
                 sess,
                 width=256,
                 height=256,
                 channels=3,
                 learning_rate=0.0001,
                 z_shape=8,
                 j_shape=9,
                 model_name=None,
                 graph=None,
                 coeff=0.002):
        self.sess = sess
        self.width = width
        self.height = height
        self.channels = channels
        self.learning_rate = learning_rate
        self.z_shape = z_shape
        self.j_shape = j_shape

        self.img, self.p_t, self.p_t_1 = self.gen_network('generator')
        # self.p_t = tf.placeholder(tf.float32, shape = (None, self.j_shape))
        # self.std = tf.exp(self.logstds)
        # self.var = self.std ** 2

        # self.loss = 0.5 * tf.reduce_sum(tf.square((self.p_t - self.action_mean) / self.var)) \
        #             + 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(self.p_t)[0]) \
        #             + tf.reduce_sum(self.logstds)

        self.std = tf.exp(self.logstds)
        # self.var = tf.square(self.std)
        self.p_t_1_l = tf.placeholder(tf.float32, (None, 9))

        self.mse_loss = tf.reduce_mean(tf.square(self.p_t_1 - self.p_t_1_l))
        lh_loss = 0.5 * tf.reduce_sum(tf.square((self.p_t_1_l - self.p_t_1) / self.std), axis=-1) \
               + 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(self.p_t_1)[-1]) + tf.reduce_sum(self.logstds, axis=-1)
        self.lh_loss = tf.reduce_mean(lh_loss)

        self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
            self.lh_loss)

        self.model = model(sess, MODELS_DIR, model_name=model_name)
        self.summary = self.model.summary(graph=graph,
                                          **dict(mse_loss=self.mse_loss,
                                                 lh_loss=self.lh_loss))
        self.eval_indice = len(get_folders(pathname(RES_PATH, **dict(flag=1))))
Exemplo n.º 3
0
    train_data = utils.HeadData(config.train_id_docs, np.arange(len(config.train_id_docs)))
    test_data = utils.HeadData(config.test_id_docs, np.arange(len(config.test_id_docs)))
    tf.reset_default_graph()
    tf.set_random_seed(1)
    utils.printParameters(config)

    # ---- Training ----
    config1 = tf.ConfigProto()
    config1.gpu_options.per_process_gpu_memory_fraction = 0.85
    with tf.Session(config=config1) as sess:
        # saver = tf.train.import_meta_graph('model.ckpt.meta')
        # saver.restore(sess, 'model.ckpt')
        embedding_matrix = tf.get_variable('embedding_matrix', shape=config.wordvectors.shape, dtype=tf.float32,
                                           trainable=False).assign(config.wordvectors)
        emb_mtx = sess.run(embedding_matrix)
        model = tf_utils.model(config, emb_mtx, sess)
        obj, m_op, predicted_op_ner, actual_op_ner, predicted_op_rel, actual_op_rel, score_op_rel = model.run()

        train_step = model.get_train_op(obj)
        operations = tf_utils.operations(train_step, obj, m_op, predicted_op_ner, actual_op_ner, predicted_op_rel,
                                         actual_op_rel, score_op_rel)

        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        best_score = 0
        nepoch_no_imprv = 0  # for early stopping

        for iter in range(config.nepochs + 1):
            model.train(train_data, operations, iter)
            save_path = saver.save(sess, "model.ckpt")
            print("Model saved in path: %s" % save_path)
Exemplo n.º 4
0
from cnn_utils import load_dataset, preprocess_data
from tf_utils import model

num_classes = 6

# Loading the data (signs)
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()

# Preprocess data
X_train, X_test, Y_train, Y_test = preprocess_data(X_train_orig, X_test_orig, Y_train_orig, Y_test_orig, num_classes)

# Training the parameters
_, _, parameters = model(X_train, Y_train, X_test, Y_test)
Exemplo n.º 5
0
import data_service, plot_service, tf_utils

X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = data_service.load_dataset(
)
plot_service.plot_sample(X_train_orig, Y_train_orig)
X_train, Y_train, X_test, Y_test = data_service.preprocess_data(
    X_train_orig, Y_train_orig, X_test_orig, Y_test_orig)

parameters = tf_utils.model(X_train, Y_train, X_test, Y_test)