예제 #1
0
    def build_model(self, is_training=True, dropout_keep_prob=0.5):
        self.inputs = tf.placeholder(real_type(self.FLAGS), [self.FLAGS.batch_size, 224, 224, 3])
        self.targets = tf.placeholder(tf.int32, [self.FLAGS.batch_size])
    
        with tf.variable_scope("alexnet_v2"):
            with slim.arg_scope(alexnet_v2_arg_scope()):
                net = slim.conv2d(self.inputs, 64, [11, 11], 4, padding = 'VALID', scope = 'conv1')
                net = slim.max_pool2d(net, [3, 3], 2, scope='pool1')
                net = slim.conv2d(net, 192, [5, 5], scope='conv2')
                net = slim.max_pool2d(net, [3, 3], 2, scope='pool2')
                net = slim.conv2d(net, 384, [3, 3], scope='conv3')
                net = slim.conv2d(net, 384, [3, 3], scope='conv4')
                net = slim.conv2d(net, 256, [3, 3], scope='conv5')
                net = slim.max_pool2d(net, [3, 3], 2, scope='pool5')

                with slim.arg_scope([slim.conv2d],
                                    weights_initializer=trunc_normal(0.005),
                                    biases_initializer=tf.constant_initializer(0.1)):
                    net = slim.conv2d(net, 4096, [5, 5], padding='VALID', scope='fc6')
                    net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
                                       scope='dropout6')
                    net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
                    net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout7')
                    net = slim.conv2d(net, self.FLAGS.num_classes, [1, 1], activation_fn=None, normalizer_fn=None,
                                  biases_initializer=tf.constant_initializer(0.0), scope='fc8')

                    logits = tf.squeeze(net, [1, 2], name = 'fc8/squeezed')

                    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits, labels = self.targets)
                    self.cost = tf.reduce_sum(loss)
                    self.global_step = tf.contrib.framework.get_or_create_global_step()
                    self.train_op = tf.train.AdagradOptimizer(0.01).minimize(
                        self.cost, 
                        global_step = self.global_step)
예제 #2
0
    def build_model(self):
        self.inputs = tf.placeholder(real_type(
            self.FLAGS), [self.FLAGS.batch_size, self.FLAGS.num_features])
        self.targets = tf.placeholder(tf.int32, [self.FLAGS.batch_size])

        with tf.variable_scope("Fully_connected"):
            with slim.arg_scope([slim.fully_connected],
                                activation_fn=tf.nn.sigmoid,
                                reuse=False):
                net = slim.fully_connected(self.inputs,
                                           self.FLAGS.hidden_size,
                                           scope='input_layer')
                net = slim.repeat(net,
                                  self.FLAGS.num_layers - 2,
                                  slim.fully_connected,
                                  self.FLAGS.hidden_size,
                                  scope='hidden_layer')
                net = slim.fully_connected(net,
                                           self.FLAGS.num_classes,
                                           activation_fn=None,
                                           scope='output_layer')
                loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=net, labels=self.targets)
                self.cost = tf.reduce_sum(loss)
                self.global_step = tf.contrib.framework.get_or_create_global_step(
                )
                self.train_op = tf.train.AdagradOptimizer(0.01).minimize(
                    loss, global_step=self.global_step)
예제 #3
0
    def build_model(self):
        # Build model...
        #embedding = tf.get_variable("embedding", [vocab_size, size], dtype=data_type())
        self.inputs = tf.placeholder(real_type(self.FLAGS),
                                     shape=(self.FLAGS.num_steps,
                                            self.FLAGS.batch_size,
                                            self.FLAGS.hidden_size))
        self.targets = tf.placeholder(tf.int32,
                                      shape=(self.FLAGS.num_steps,
                                             self.FLAGS.batch_size))

        #        lstm = tf.nn.rnn_cell.LSTMCell(self.FLAGS.hidden_size)
        lstm = tf.contrib.rnn.LSTMCell(self.FLAGS.hidden_size)
        stacked_lstm = tf.contrib.rnn.MultiRNNCell([lstm] *
                                                   self.FLAGS.num_layers)
        initial_state = state = stacked_lstm.zero_state(
            self.FLAGS.batch_size, real_type(self.FLAGS))

        outputs = []
        with tf.variable_scope("RNN"):
            for time_step in range(self.FLAGS.num_steps):
                if time_step > 0: tf.get_variable_scope().reuse_variables()
                output, state = stacked_lstm(self.inputs[time_step, :, :],
                                             state)
                outputs.append(output)

        output = tf.reshape(tf.concat(axis=1, values=outputs),
                            [-1, self.FLAGS.hidden_size])
        #output = tf.reshape(tf.concat(1, outputs), [-1, self.FLAGS.hidden_size])
        softmax_w = tf.get_variable(
            "softmax_w", [self.FLAGS.hidden_size, self.FLAGS.vocab_size],
            dtype=real_type(self.FLAGS))
        softmax_b = tf.get_variable("softmax_b", [self.FLAGS.vocab_size],
                                    dtype=real_type(self.FLAGS))
        logits = tf.matmul(output, softmax_w) + softmax_b
        loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
            [logits], [tf.reshape(self.targets, [-1])], [
                tf.ones([self.FLAGS.num_steps * self.FLAGS.batch_size],
                        dtype=real_type(self.FLAGS))
            ])
        self.cost = tf.reduce_sum(loss) / self.FLAGS.batch_size
        final_state = state

        self.global_step = tf.contrib.framework.get_or_create_global_step()

        self.train_op = tf.train.AdagradOptimizer(0.01).minimize(
            self.cost, global_step=self.global_step)
예제 #4
0
    def build_model(self, is_training=True, dropout_keep_prob=0.5):
        self.inputs = tf.placeholder(real_type(self.FLAGS),
                                     [self.FLAGS.batch_size, 299, 299, 3])
        self.targets = tf.placeholder(tf.int32, [self.FLAGS.batch_size])

        with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
            logits, endpoints = inception_v3.inception_v3(
                self.inputs, self.FLAGS.num_classes)
        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=self.targets)
        self.cost = tf.reduce_sum(loss)
        self.global_step = tf.contrib.framework.get_or_create_global_step()
        self.train_op = tf.train.AdagradOptimizer(0.01).minimize(
            loss, global_step=self.global_step)
예제 #5
0
    def build_model(self, is_training=True, dropout_keep_prob=0.5):
        self.inputs = tf.placeholder(real_type(self.FLAGS),
                                     [self.FLAGS.batch_size, 224, 224, 3])
        self.targets = tf.placeholder(tf.int32, [self.FLAGS.batch_size])

        with tf.variable_scope("vgg_19"):
            with slim.arg_scope(vgg_arg_scope()):
                net = slim.repeat(self.inputs,
                                  2,
                                  slim.conv2d,
                                  64, [3, 3],
                                  scope='conv1')
                net = slim.max_pool2d(net, [2, 2], scope='pool1')
                net = slim.repeat(net,
                                  2,
                                  slim.conv2d,
                                  128, [3, 3],
                                  scope='conv2')
                net = slim.max_pool2d(net, [2, 2], scope='pool2')
                net = slim.repeat(net,
                                  4,
                                  slim.conv2d,
                                  256, [3, 3],
                                  scope='conv3')
                net = slim.max_pool2d(net, [2, 2], scope='pool3')
                net = slim.repeat(net,
                                  4,
                                  slim.conv2d,
                                  512, [3, 3],
                                  scope='conv4')
                net = slim.max_pool2d(net, [2, 2], scope='pool4')
                net = slim.repeat(net,
                                  4,
                                  slim.conv2d,
                                  512, [3, 3],
                                  scope='conv5')
                net = slim.max_pool2d(net, [2, 2], scope='pool5')

                # Use conv2d instead of fully_connected layers.
                net = slim.conv2d(net,
                                  4096, [7, 7],
                                  padding='VALID',
                                  scope='fc6')
                net = slim.dropout(net,
                                   dropout_keep_prob,
                                   is_training=is_training,
                                   scope='dropout6')
                net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
                net = slim.dropout(net,
                                   dropout_keep_prob,
                                   is_training=is_training,
                                   scope='dropout7')
                net = slim.conv2d(net,
                                  self.FLAGS.num_classes, [1, 1],
                                  activation_fn=None,
                                  normalizer_fn=None,
                                  scope='fc8')

                logits = tf.squeeze(net, [1, 2], name='fc8/squeezed')
                loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=logits, labels=self.targets)
                self.cost = tf.reduce_sum(loss)
                self.global_step = tf.contrib.framework.get_or_create_global_step(
                )
                self.train_op = tf.train.AdagradOptimizer(0.01).minimize(
                    self.cost, global_step=self.global_step)