Пример #1
0
    def __init__(self, training, development, synthetic, config):
        self.Training = training
        self.Development = development
        self.config = config
        self.num_objs = 20  # 20 blocks + Background

        with tf.variable_scope('language'):
            self.text_embeddings = Embedding(
                self.Training.vocab_size,
                name='txt',
                one_hot=False,
                embedding_size=self.config.txt_dim)
        with tf.variable_scope('ops'):
            self.op_embeddings = Embedding(
                self.config.num_ops,
                name='ops',
                one_hot=False,
                embedding_size=self.config.pixel_dim)

        self.locs = []
        self.vision_bn_phase = tf.placeholder(tf.bool, name='train_phase')
        self.dropout = tf.placeholder(tf.float32, name='dropout')
        self.interactive_mode = tf.placeholder(tf.bool, name='interactive')

        # Create language pipeline
        with tf.variable_scope('language') as scope:
            l_phs, self.vision_loss, (pred_x, pred_y, pred_z,
                                      pred_t) = self._model()
            scope.reuse_variables()
        self.locs.extend([pred_x, pred_y, pred_z, pred_t])

        self.placeholders = l_phs

        with tf.variable_scope('lang_op'):
            self.language_op = self._language_loss()

        self.opts = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        self.hist_summary = tf.summary.merge_all()

        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))

        # Tensorboard
        self.train_reg_summary = tf.summary.scalar('train_reg', self.reg_loss)
        self.train_rot_summary = tf.summary.scalar('train_rot',
                                                   self.rotation_loss)
        self.val_cost_summary = tf.summary.scalar('val_cost', self.reg_loss)
        self.val_rot_summary = tf.summary.scalar('val_rot', self.rotation_loss)
        self.writer = tf.summary.FileWriter(self.config.summary_path,
                                            graph=tf.get_default_graph())
        self.saver = tf.train.Saver()

        if config.load_model is not None:
            print "Restoring ", config.load_model
            self.saver.restore(self.sess, config.load_model)
        else:
            print "Initializing"
            self.sess.run(tf.global_variables_initializer())
Пример #2
0
    def __init__(self, training, development, synthetic, config):
        self.Training = training
        self.Development = development
        self.config = config
        self.num_objs = 20

        with tf.variable_scope('language'):
            self.text_embeddings = Embedding(
                self.Training.vocab_size,
                name='txt',
                one_hot=False,
                embedding_size=self.config.txt_dim)
        self.dropout = tf.placeholder(tf.float32, name='dropout')
        self.interactive_mode = tf.placeholder(tf.bool, name='interactive')

        # Create language pipeline
        with tf.variable_scope('language') as scope:
            self.placeholders, self.source_loss, self.prediction = self._model(
            )
            scope.reuse_variables()

        with tf.variable_scope('lang_op'):
            self.language_op = self._language_loss()

        self.opts = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        self.hist_summary = tf.summary.merge_all()

        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))

        # Tensorboard
        self.train_summary = tf.summary.scalar('train', self.source_loss)
        self.val_summary = tf.summary.scalar('val', self.source_loss)
        self.writer = tf.summary.FileWriter(self.config.summary_path,
                                            graph=tf.get_default_graph())
        self.saver = tf.train.Saver()

        if config.load_model is not None:
            print "Restoring ", config.load_model
            self.saver.restore(self.sess, config.load_model)
        else:
            print "Initializing"
            self.sess.run(tf.global_variables_initializer())
Пример #3
0
training, training_labels       = Sparse.matrix(train)
development, development_labels = Sparse.matrix(dev)
testing, testing_labels         = Sparse.matrix(test)

## TODO:
## MutiCellLSTM

batch_size = 128
hiddendim = 256
embeddingdim = 100
onehot = False
graph = tf.Graph()
dropout=0.5

# Define embeddings matrix
embeddings = Embedding(vocabsize, one_hot=onehot, embedding_size=embeddingdim)

# Input -> LSTM -> Outstate
inputs = tf.placeholder(tf.int32, [batch_size, maxlength])
labels = tf.placeholder(tf.float32, [batch_size, labelspace])
lengths = tf.placeholder(tf.int32, [batch_size])


# RNN
lstm = tf.nn.rnn_cell.LSTMCell(hiddendim,
                               initializer=tf.contrib.layers.xavier_initializer(seed=20160501))
lstm = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=dropout)

# Prediction
output_layer = Layer.W(2*hiddendim, labelspace, 'Output')
output_bias  = Layer.b(labelspace, 'OutputBias')
Пример #4
0
test, _, _ = Sparse.read("JSONReader/data/2016-NAACL/SRD/Test.mat")

## Create sparse arrays
training, training_labels = Sparse.matrix(train)
development, development_labels = Sparse.matrix(dev)
testing, testing_labels = Sparse.matrix(test)

batch_size = 128
hiddendim = 100
embeddingdim = 100
graph = tf.Graph()
onehot = True
inputdim = maxlength * vocabsize if onehot else maxlength * embeddingdim

# Define embeddings matrix
embeddings = Embedding(vocabsize, one_hot=onehot, embedding_size=embeddingdim)
# Input data.
dataset = tf.placeholder(tf.int32, shape=[batch_size, maxlength], name='Train')
labels = tf.placeholder(tf.float32,
                        shape=[batch_size, labelspace],
                        name='Label')
# Model
hidden_layer = Layer.W(inputdim, hiddendim, 'Hidden')
hidden_bias = Layer.b(hiddendim, 'HiddenBias')
# Prediction
output_layer = Layer.W(hiddendim, labelspace, 'Output')
output_bias = Layer.b(labelspace, 'OutputBias')

embedded = tf.reshape(embeddings.lookup(dataset), [batch_size, inputdim])
forward = tf.nn.relu(tf.matmul(embedded, hidden_layer) + hidden_bias)
dropout = tf.nn.dropout(forward, 0.5)
Пример #5
0
    ## Create sparse arrays
    t, t_l = Sparse.matrix(train)
    training[prediction] = t
    training_labels[prediction] = t_l
    d, d_l = Sparse.matrix(dev)
    development[prediction] = d
    development_labels[prediction] = d_l
    t, t_l = Sparse.matrix(test)
    testing[prediction] = t
    testing_labels[prediction] = t_l

# Define embeddings matrix
embeddings = {}
embeddings[0] = Embedding(vocabsize,
                          one_hot=onehot,
                          embedding_size=embeddingdim)
embeddings[1] = Embedding(vocabsize,
                          one_hot=onehot,
                          embedding_size=embeddingdim)
embeddings[2] = Embedding(vocabsize,
                          one_hot=onehot,
                          embedding_size=embeddingdim)

# Input -> LSTM -> Outstate
inputs = tf.placeholder(tf.int32, [batch_size, maxlength])
labels = {}
labels[0] = tf.placeholder(tf.float32, [batch_size, 20])
labels[1] = tf.placeholder(tf.float32, [batch_size, 20])
labels[2] = tf.placeholder(tf.float32, [batch_size, 9])
lengths = tf.placeholder(tf.int32, [batch_size])
Пример #6
0
test, _, _          = Sparse.read("JSONReader/data/2016-NAACL/SRD/Test.mat")

## Create sparse arrays
training, training_labels       = Sparse.matrix(train)
development, development_labels = Sparse.matrix(dev)
testing, testing_labels         = Sparse.matrix(test)

batch_size = 128
hiddendim = 100
embeddingdim = 100
graph = tf.Graph()
onehot = True
inputdim = maxlength*vocabsize if onehot else maxlength*embeddingdim

# Define embeddings matrix
embeddings = Embedding(vocabsize, one_hot=onehot, embedding_size=embeddingdim)
# Input data.
dataset = tf.placeholder(tf.int32, shape=[batch_size, maxlength], name='Train')
labels = tf.placeholder(tf.float32, shape=[batch_size, labelspace], name='Label')
# Model
hidden_layer = Layer.W(inputdim, hiddendim, 'Hidden')
hidden_bias  = Layer.b(hiddendim, 'HiddenBias')
# Prediction
output_layer = Layer.W(hiddendim, labelspace, 'Output')
output_bias  = Layer.b(labelspace, 'OutputBias')

embedded = tf.reshape(embeddings.lookup(dataset), [batch_size,inputdim])
forward = tf.nn.relu(tf.matmul(embedded, hidden_layer) + hidden_bias)
dropout = tf.nn.dropout(forward, 0.5)
logits = tf.matmul(dropout, output_layer) + output_bias
Пример #7
0
    'out':
    Layer.W(final_size * final_size * filters + 2 * hiddendim,
            rep_dim * rep_dim)
}

B = {
    'cb1': Layer.b(filters, init='Normal'),
    'cb2': Layer.b(filters, init='Normal'),
    'cb3': Layer.b(filters, init='Normal'),
    'cb4': Layer.b(filters, init='Normal'),
    'cb5': Layer.b(filters, init='Normal'),
    'out': Layer.b(rep_dim * rep_dim)
}

# Define embeddings matrix
embeddings = Embedding(vocabsize, one_hot=False, embedding_size=hiddendim)

# RNN
dropout = 0.75
lstm = tf.nn.rnn_cell.LSTMCell(
    hiddendim, initializer=tf.contrib.layers.xavier_initializer(seed=20160501))
lstm = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=dropout)

# Encode from 18x18 to 12x12
l1 = conv2d('l1', cur_world, W['cl1'], B['cb1'],
            padding='VALID')  # -> 32->30  18->16
l2 = conv2d('l2', l1, W['cl2'], B['cb2'], padding='VALID')  # -> 30-28   16->14
l3 = conv2d('l3', l2, W['cl3'], B['cb3'], padding='VALID')  # -> 28->26  14->12
l4 = conv2d('l4', l3, W['cl4'], B['cb4'], padding='VALID')  # -> 26->24  12->10
l5 = conv2d('l5', l4, W['cl5'], B['cb5'], padding='VALID')  # -> 24->22  10->8