Example #1
0
 def test_variable(self):
     # Bulk Tests
     with tf.Graph().as_default():
         W = tflearn.variable(name='W1', shape=[784, 256],
                  initializer='uniform_scaling',
                  regularizer='L2')
         W = tflearn.variable(name='W2', shape=[784, 256],
                  initializer='uniform_scaling',
                  regularizer='L2')
Example #2
0
 def test_variable(self):
     # Bulk Tests
     with tf.Graph().as_default():
         W = tflearn.variable(name='W1',
                              shape=[784, 256],
                              initializer='uniform_scaling',
                              regularizer='L2')
         W = tflearn.variable(name='W2',
                              shape=[784, 256],
                              initializer='uniform_scaling',
                              regularizer='L2')
Example #3
0
    def __init__(self, hidden_dim=100, state_dim=128, batch_size=1):
        """
        Instantiate an Addition Core object, with the necessary hyperparameters.
        """
        self.hidden_dim, self.state_dim, self.bsz = hidden_dim, state_dim, batch_size
        self.env_dim = CONFIG["ENVIRONMENT_ROW"] * CONFIG[
            "ENVIRONMENT_DEPTH"]  # 4 * 10 = 40
        self.arg_dim = CONFIG["ARGUMENT_NUM"] * CONFIG[
            "ARGUMENT_DEPTH"]  # 3 * 10 = 30
        self.program_dim = CONFIG["PROGRAM_EMBEDDING_SIZE"]

        # Setup Environment Input Layer
        self.env_in = tf.placeholder(tf.float32,
                                     shape=[self.bsz, self.env_dim],
                                     name="Env_Input")

        # Setup Argument Input Layer
        self.arg_in = tf.placeholder(tf.float32,
                                     shape=[self.bsz, self.arg_dim],
                                     name="Arg_Input")

        # Setup Program ID Input Layer
        self.prg_in = tf.placeholder(tf.int32,
                                     shape=[None, 1],
                                     name='Program_ID')

        # Build Environment Encoder Network (f_enc)
        self.state_encoding = self.build_encoder()

        # Build Program Matrices
        self.program_key = tflearn.variable(
            name='Program_Keys',
            shape=[CONFIG["PROGRAM_NUM"], CONFIG["PROGRAM_KEY_SIZE"]],
            initializer='truncated_normal')
        self.program_embedding = self.build_program_store()
def run():
    # model variables
    X = tf.placeholder('float', [None, 784])
    Y = tf.placeholder('float', [None, 10])

    W1 = tf.Variable(tf.random_normal([784, 256]))
    W2 = tf.Variable(tf.random_normal([256, 256]))
    W3 = tf.Variable(tf.random_normal([256, 10]))
    b1 = tf.Variable(tf.random_normal([256]))
    b2 = tf.Variable(tf.random_normal([256]))
    b3 = tf.Variable(tf.random_normal([10]))

    def dnn(x):
        # using tflearn PReLU activation ops
        x = tflearn.prelu(tf.add(tf.matmul(x, W1), b1))
        tflearn.summaries.monitor_activation(x)  # Monitor activation
        x = tflearn.prelu(tf.add(tf.matmul(x, W2), b2))
        tflearn.summaries.monitor_activation(x)  # Monitor activation
        x = tf.nn.softmax(tf.add(tf.matmul(x, W3), b3))
        return x

    net = dnn(X)

    # use objective ops from TFLearn
    loss = tflearn.categorical_crossentropy(net, Y)
    # use metric ops from TFLearn
    acc = tflearn.metrics.accuracy_op(net, Y)
    # use SGF Optimizer class from TFLearn
    optimizer = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=200)
    # Because of lr decay, it is required to first build the Optimizer with
    # the step tensor that will monitor training step.
    # (Note: When using TFLearn estimators wrapper, build is self managed,
    # so only using above `Optimizer` class as `DNN` optimizer arg is enough).
    step = tflearn.variable('step', initializer='zeros', shape=[])
    optimizer.build(step_tensor=step)
    optim_tensor = optimizer.get_tensor()

    # Use TFLearn Trainer
    # def training op for backprop
    trainop = tflearn.TrainOp(loss=loss, optimizer=optim_tensor,
                              metric=acc, batch_size=128,
                              step_tensor=step)

    trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=3)
    trainer.fit({X: trainX, Y: trainY}, val_feed_dicts={
                X: testX, Y: testY}, n_epoch=2, show_metric=True)
    def __init__(self, obs_dim, act_dim, gamma):
        self.obs_dim = obs_dim
        self.act_dim = act_dim
        self.gamma = gamma
        self.Q_max = 0

        self.weights_path = 'models/qnet'
        if not os.path.exists(self.weights_path):
            os.makedirs(self.weights_path)

        self.g = tf.Graph()
        with self.g.as_default():

            self.obs_ph = tf.placeholder(tf.float32, (None, self.obs_dim), 'obs')
            self.act_ph = tf.placeholder(tf.float32, (None, self.act_dim), 'act')
            self.y_ph = tf.placeholder(tf.float32, (None, 1), 'y')

            # Make policy
            self.Q = self._qnet(self.obs_ph, self.act_ph, reuse=False)

            self.lr = 1e-3
            self.tderr = tf.losses.mean_squared_error(self.Q, self.y_ph)
            self.optim = tf.train.AdamOptimizer(self.lr).minimize(self.tderr)

            # Optimization action
            self.opt_act = tfl.variable("opt_act", shape=(act_dim))
            self.Q_opt = self._qnet(self.obs_ph, tf.expand_dims(self.opt_act, 0), reuse=True)
            self.init_act_rnd = tf.assign(self.opt_act, tf.random_normal(tf.shape(self.opt_act)))
            self.init_act_zero = tf.assign(self.opt_act, tf.zeros(tf.shape(self.opt_act)))
            self.optimize_action = tf.train.AdamOptimizer(5e-3).minimize(-self.Q_opt, var_list=self.opt_act)

            self.init = tf.global_variables_initializer()      

        config = tf.ConfigProto(
            device_count={'GPU': 0}
        )

        self.sess = tf.Session(graph=self.g, config=config)
        self.sess.run(self.init)
Example #6
0
    net = dnn(X)

    # Using objective ops from TFLearn to compute crossentropy
    loss = tflearn.categorical_crossentropy(net, Y)

    # Using metric ops from TFLearn to compute accuracy
    acc = tflearn.metrics.accuracy_op(net, Y)

    # Using TFLearn SGD Optimizer class
    optimizer = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=200)
    # Because of lr decay, it is required to first build the Optimizer with
    # the step tensor that will monitor training step.
    # (Note: When using TFLearn estimators wrapper, build is self managed,
    # so only using above 'Optimizer' class as 'DNN' optimizer arg is enough).
    step = tflearn.variable("step", initializer='zeros', shape=[])
    optimizer.build(step_tensor=step)
    optim_tensor = optimizer.get_tensor()

    # Using TFLearn Trainer
    # Define a training op (op for backprop, only need 1 in this model)
    trainop = tflearn.TrainOp(loss=loss,
                              optimizer=optim_tensor,
                              metric=acc,
                              batch_size=128,
                              step_tensor=step)

    # Create Trainer, providing all training ops. Tensorboard logs stored
    # in /tmp/tflearn_logs/. It is possible to change verbose level for more
    # details logs about gradients, varibles etc...
    trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=0)
Example #7
0
import tflearn
from tflearn.models.dnn import DNN
from tflearn.models.generator import SequenceGenerator
from tflearn.data_utils import VocabularyProcessor
from tflearn.data_preprocessing import DataPreprocessing
from tflearn.helpers.trainer import Trainer
from tflearn.helpers.evaluator import Evaluator
from tflearn.helpers.summarizer import summarize
from tflearn.helpers.regularizer import add_weights_regularizer
from tensorflow.contrib.slim import dataset
from tensorflow.contrib.slim import dataset

tflearn.input_data()
tflearn.variable()
tflearn.conv_2d()
tflearn.single_unit()
tflearn.lstm()
tflearn.embedding()
tflearn.batch_normalization()
tflearn.merge()
tflearn.regression()
tflearn.tanh()
tflearn.softmax_categorical_crossentropy()
tflearn.SGD()
tflearn.initializations.uniform()
tflearn.losses.L1()
tflearn.add_weights_regularizer()
tflearn.metrics.Accuracy()
tflearn.summaries()
tflearn.ImagePreprocessing()
tflearn.ImageAugmentation()
Example #8
0
        return x

    net = dnn(X)

    # Using objective ops from TFLearn to compute crossentropy
    loss = tflearn.categorical_crossentropy(net, Y)

    # Using metric ops from TFLearn to compute accuracy
    acc = tflearn.metrics.accuracy_op(net, Y)

    # Using TFLearn SGD Optimizer class
    optimizer = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=200)
    # Because of lr decay, it is required to first build the Optimizer with
    # the step tensor that will monitor training step.
    # (Note: When using TFLearn estimators wrapper, build is self managed,
    # so only using above `Optimizer` class as `DNN` optimizer arg is enough).
    step = tflearn.variable("step", initializer="zeros", shape=[])
    optimizer.build(step_tensor=step)
    optim_tensor = optimizer.get_tensor()

    # Using TFLearn Trainer
    # Define a training op (op for backprop, only need 1 in this model)
    trainop = tflearn.TrainOp(loss=loss, optimizer=optim_tensor, metric=acc, batch_size=128, step_tensor=step)

    # Create Trainer, providing all training ops. Tensorboard logs stored
    # in /tmp/tflearn_logs/. It is possible to change verbose level for more
    # details logs about gradients, variables etc...
    trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=0)
    # Training for 10 epochs.
    trainer.fit({X: trainX, Y: trainY}, val_feed_dicts={X: testX, Y: testY}, n_epoch=10, show_metric=True)
Example #9
0
        x = tf.nn.softmax(tf.add(tf.matmul(x, W3), b3))

        return x

    net = dnn(X)

    loss = tflearn.categorical_crossentropy(net, Y)

    acc = tflearn.metrics.accuracy_op(net, Y)

    optimizer = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=200)
    # Because of lr decay, it is required to first build the Optimizer with
    # the step tensor that will monitor training step.
    # (Note: When using TFLearn estimators wrapper, build is self managed,
    # so only using above `Optimizer` class as `DNN` optimizer arg is enough).
    step = tflearn.variable('step', initializer='zeros', shape=[])

    optimizer.build(step_tensor=step)
    optim_tensor = optimizer.get_tensor()

    # Using TFLearn Trainer
    # Define a training op (op for backprop, only need 1 in this model)
    trainop = tflearn.TrainOp(loss=loss,
                              optimizer=optim_tensor,
                              metric=acc,
                              batch_size=128,
                              step_tensor=step)

    # Create Trainer, providing all training ops. Tensorboard logs stored
    # in /tmp/tflearn_logs/. It is possible to change verbose level for more
    # details logs about gradients, variables etc...