def __define_network(self):
        #-----------------------------------------------------------------------------------------------------
        #---------------------- define model graph------------------------------------------------------------
        num_classes = self.out_dim  # combinatorics
        tf.reset_default_graph()
        self.batch_size = tf.placeholder(tf.int32, name='batch_size')
        self.inputs = tf.placeholder(tf.float32, [None,
                                                  self.input_dim], 'inputs')
        self.targets = tf.placeholder(tf.float32, [None, num_classes], 'targets')

        # ----------------------------- NETWORK DEFINITION --------------------------------------------------
        # put conv layers on cpu because too big for gtx1050
        # with tf.device('/cpu:0'):
        nonlinearity = tf.nn.tanh
        with tf.name_scope(self.name_scope + 'fully_connected_layer1'):
            fully_connected_layer1 = fully_connected_layer(self.inputs, self.input_dim, self.hl_unit_no, nonlinearity=nonlinearity)
        with tf.name_scope(self.name_scope + 'fully_connected_layer2'):
            fully_connected_layer2 = fully_connected_layer(fully_connected_layer1, self.hl_unit_no, self.hl_unit_no, nonlinearity=nonlinearity)
        with tf.name_scope(self.name_scope + 'output-layer'):
            outputs = fully_connected_layer(fully_connected_layer2, self.hl_unit_no, num_classes, nonlinearity=tf.identity)

        # ------------ define error computation -------------
        with tf.name_scope(self.name_scope +'predictions'):
            # self.predictions = np.softmax(outputs, 1)
            self.predictions = tf.sigmoid(outputs)

        with tf.name_scope(self.name_scope +'error'):
            # vars = tf.trainable_variables()
            self.error = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=outputs, labels=self.targets)) #+
                                   # tf.add_n([tf.nn.l2_loss(v) for v in vars
                                   #           if 'bias' not in v.name]) * 0.001)
        with tf.name_scope(self.name_scope +'accuracy'):
            # self.accuracy = tf.reduce_mean(tf.cast(
            #         tf.equal(tf.argmax(outputs, 1), tf.argmax(self.targets, 1)),
            #         tf.float32))
            self.accuracy = tf.reduce_mean(tf.cast(
                tf.equal(tf.greater(self.predictions, 0.5),
                         tf.equal(self.targets, 1.0)), 'float'))
        # --- define training rule ---
        with tf.name_scope(self.name_scope +'train'):
            self.train_step = tf.train.RMSPropOptimizer(learning_rate=0.0005).minimize(self.error)

        self.saver = tf.train.Saver(max_to_keep=3)
示例#2
0
#     norm3 = norm_layer(pool3)
# with tf.name_scope('conv-layer-4'):
#     conv4 = conv_layer(norm3,
#                        input_channel_dim=norm3.get_shape().as_list()[3],
#                        output_channel_dim=128,
#                        kernel_size=15,
#                        bias_init=0.1,
#                        name="4")
# with tf.name_scope('pool-layer-4'):
#     pool4 = pool_layer(conv4, name="4")
# put fcl on cpu because too big for gtx1050
# with tf.device('/cpu:0'):
with tf.name_scope('fully_connected_layer1'):
    fully_connected_layer1 = reshape_layer(pool3, 200, batch_size)
with tf.name_scope('fully_connected_layer2'):
    fully_connected_layer2 = fully_connected_layer(fully_connected_layer1, 200, 100)
with tf.name_scope('output-layer'):
    outputs = fully_connected_layer(fully_connected_layer2, 100, train_data.num_classes, nonlinearity=tf.identity)

with tf.name_scope('predictions'):
    predictions = tf.nn.softmax(outputs, 1)

# ------------ define error computation -------------
with tf.name_scope('error'):
    vars = tf.trainable_variables()
    error = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=outputs, labels=targets) +
                           tf.add_n([ tf.nn.l2_loss(v) for v in vars
                                     if 'bias' not in v.name]) * 0.005)
with tf.name_scope('accuracy'):
    accuracy = tf.reduce_mean(tf.cast(
            tf.equal(tf.argmax(outputs, 1), tf.argmax(targets, 1)),