示例#1
0
 def testModelVariables(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   with self.cached_session():
     inputs = random_ops.random_uniform((batch_size, height, width, 3))
     alexnet.alexnet_v2(inputs, num_classes)
     expected_names = [
         'alexnet_v2/conv1/weights',
         'alexnet_v2/conv1/biases',
         'alexnet_v2/conv2/weights',
         'alexnet_v2/conv2/biases',
         'alexnet_v2/conv3/weights',
         'alexnet_v2/conv3/biases',
         'alexnet_v2/conv4/weights',
         'alexnet_v2/conv4/biases',
         'alexnet_v2/conv5/weights',
         'alexnet_v2/conv5/biases',
         'alexnet_v2/fc6/weights',
         'alexnet_v2/fc6/biases',
         'alexnet_v2/fc7/weights',
         'alexnet_v2/fc7/biases',
         'alexnet_v2/fc8/weights',
         'alexnet_v2/fc8/biases',
     ]
     model_variables = [v.op.name for v in variables_lib.get_model_variables()]
     self.assertSetEqual(set(model_variables), set(expected_names))
示例#2
0
 def testModelVariables(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = random_ops.random_uniform((batch_size, height, width, 3))
         alexnet.alexnet_v2(inputs, num_classes)
         expected_names = [
             'alexnet_v2/conv1/weights',
             'alexnet_v2/conv1/biases',
             'alexnet_v2/conv2/weights',
             'alexnet_v2/conv2/biases',
             'alexnet_v2/conv3/weights',
             'alexnet_v2/conv3/biases',
             'alexnet_v2/conv4/weights',
             'alexnet_v2/conv4/biases',
             'alexnet_v2/conv5/weights',
             'alexnet_v2/conv5/biases',
             'alexnet_v2/fc6/weights',
             'alexnet_v2/fc6/biases',
             'alexnet_v2/fc7/weights',
             'alexnet_v2/fc7/biases',
             'alexnet_v2/fc8/weights',
             'alexnet_v2/fc8/biases',
         ]
         model_variables = [
             v.op.name for v in variables_lib.get_model_variables()
         ]
         self.assertSetEqual(set(model_variables), set(expected_names))
示例#3
0
 def testForward(self):
     batch_size = 1
     height, width = 224, 224
     with self.test_session() as sess:
         inputs = random_ops.random_uniform((batch_size, height, width, 3))
         logits, _ = alexnet.alexnet_v2(inputs)
         sess.run(variables.global_variables_initializer())
         output = sess.run(logits)
         self.assertTrue(output.any())
示例#4
0
 def testForward(self):
   batch_size = 1
   height, width = 224, 224
   with self.cached_session() as sess:
     inputs = random_ops.random_uniform((batch_size, height, width, 3))
     logits, _ = alexnet.alexnet_v2(inputs)
     sess.run(variables.global_variables_initializer())
     output = sess.run(logits)
     self.assertTrue(output.any())
示例#5
0
 def testForward(self):
   batch_size = 1
   height, width = 224, 224
   with self.test_session() as sess:
     inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, _ = alexnet.alexnet_v2(inputs)
     sess.run(tf.initialize_all_variables())
     output = sess.run(logits)
     self.assertTrue(output.any())
示例#6
0
 def testFullyConvolutional(self):
   batch_size = 1
   height, width = 300, 400
   num_classes = 1000
   with self.test_session():
     inputs = random_ops.random_uniform((batch_size, height, width, 3))
     logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
     self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, 4, 7, num_classes])
示例#7
0
 def testBuild(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   with self.cached_session():
     inputs = random_ops.random_uniform((batch_size, height, width, 3))
     logits, _ = alexnet.alexnet_v2(inputs, num_classes)
     self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
示例#8
0
 def testFullyConvolutional(self):
   batch_size = 1
   height, width = 300, 400
   num_classes = 1000
   with self.cached_session():
     inputs = random_ops.random_uniform((batch_size, height, width, 3))
     logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
     self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, 4, 7, num_classes])
示例#9
0
 def testBuild(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = random_ops.random_uniform((batch_size, height, width, 3))
         logits, _ = alexnet.alexnet_v2(inputs, num_classes)
         self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, num_classes])
示例#10
0
 def testEvaluation(self):
   batch_size = 2
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
     logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     predictions = math_ops.argmax(logits, 1)
     self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
示例#11
0
def save_model(model_path):

    input = tf.placeholder(tf.float32, (None, height, width, 3),
                           'input_tensor')
    logits, _ = alexnet.alexnet_v2(input, num_classes)
    output_tensor = tf.identity(logits, name='output_tensor')
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        chkp = saver.save(sess, model_path)
        print('Save to ' + chkp)
示例#12
0
 def testEvaluation(self):
   batch_size = 2
   height, width = 224, 224
   num_classes = 1000
   with self.cached_session():
     eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
     logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     predictions = math_ops.argmax(logits, 1)
     self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
示例#13
0
def logits(inputs,
           num_classes=notmnist_input.NUM_CLASSES,
           is_training=True,
           dropout_keep_prob=0.5,
           spatial_squeeze=True,
           scope='alexnet_v2'):
    """
    inputs: a tensor of size [batch_size, height, width, channels]
    """
    with slim.arg_scope(alexnet.alexnet_v2_arg_scope()):
        outputs, _ = alexnet.alexnet_v2(inputs)
    return outputs
示例#14
0
 def testTrainEvalWithReuse(self):
   train_batch_size = 2
   eval_batch_size = 1
   train_height, train_width = 224, 224
   eval_height, eval_width = 300, 400
   num_classes = 1000
   with self.test_session():
     train_inputs = random_ops.random_uniform(
         (train_batch_size, train_height, train_width, 3))
     logits, _ = alexnet.alexnet_v2(train_inputs)
     self.assertListEqual(logits.get_shape().as_list(),
                          [train_batch_size, num_classes])
     variable_scope.get_variable_scope().reuse_variables()
     eval_inputs = random_ops.random_uniform(
         (eval_batch_size, eval_height, eval_width, 3))
     logits, _ = alexnet.alexnet_v2(
         eval_inputs, is_training=False, spatial_squeeze=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [eval_batch_size, 4, 7, num_classes])
     logits = math_ops.reduce_mean(logits, [1, 2])
     predictions = math_ops.argmax(logits, 1)
     self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
示例#15
0
 def testTrainEvalWithReuse(self):
   train_batch_size = 2
   eval_batch_size = 1
   train_height, train_width = 224, 224
   eval_height, eval_width = 300, 400
   num_classes = 1000
   with self.cached_session():
     train_inputs = random_ops.random_uniform(
         (train_batch_size, train_height, train_width, 3))
     logits, _ = alexnet.alexnet_v2(train_inputs)
     self.assertListEqual(logits.get_shape().as_list(),
                          [train_batch_size, num_classes])
     variable_scope.get_variable_scope().reuse_variables()
     eval_inputs = random_ops.random_uniform(
         (eval_batch_size, eval_height, eval_width, 3))
     logits, _ = alexnet.alexnet_v2(
         eval_inputs, is_training=False, spatial_squeeze=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [eval_batch_size, 4, 7, num_classes])
     logits = math_ops.reduce_mean(logits, [1, 2])
     predictions = math_ops.argmax(logits, 1)
     self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
示例#16
0
 def testEndPoints(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = random_ops.random_uniform((batch_size, height, width, 3))
         _, end_points = alexnet.alexnet_v2(inputs, num_classes)
         expected_names = [
             'alexnet_v2/conv1', 'alexnet_v2/pool1', 'alexnet_v2/conv2',
             'alexnet_v2/pool2', 'alexnet_v2/conv3', 'alexnet_v2/conv4',
             'alexnet_v2/conv5', 'alexnet_v2/pool5', 'alexnet_v2/fc6',
             'alexnet_v2/fc7', 'alexnet_v2/fc8'
         ]
         self.assertSetEqual(set(end_points.keys()), set(expected_names))
示例#17
0
 def testEndPoints(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   with self.cached_session():
     inputs = random_ops.random_uniform((batch_size, height, width, 3))
     _, end_points = alexnet.alexnet_v2(inputs, num_classes)
     expected_names = [
         'alexnet_v2/conv1', 'alexnet_v2/pool1', 'alexnet_v2/conv2',
         'alexnet_v2/pool2', 'alexnet_v2/conv3', 'alexnet_v2/conv4',
         'alexnet_v2/conv5', 'alexnet_v2/pool5', 'alexnet_v2/fc6',
         'alexnet_v2/fc7', 'alexnet_v2/fc8'
     ]
     self.assertSetEqual(set(end_points.keys()), set(expected_names))
示例#18
0
def VAE(input_shape=[None, 784],
        output_shape=[None, 784],
        n_filters=[64, 64, 64],
        filter_sizes=[4, 4, 4],
        n_hidden=32,
        n_code=2,
        activation=tf.nn.tanh,
        dropout=False,
        denoising=False,
        convolutional=False,
        variational=False,
        softmax=False,
        classifier='alexnet_v2'):
    """(Variational) (Convolutional) (Denoising) Autoencoder.

    Uses tied weights.

    Parameters
    ----------
    input_shape : list, optional
        Shape of the input to the network. e.g. for MNIST: [None, 784].
    n_filters : list, optional
        Number of filters for each layer.
        If convolutional=True, this refers to the total number of output
        filters to create for each layer, with each layer's number of output
        filters as a list.
        If convolutional=False, then this refers to the total number of neurons
        for each layer in a fully connected network.
    filter_sizes : list, optional
        Only applied when convolutional=True.  This refers to the ksize (height
        and width) of each convolutional layer.
    n_hidden : int, optional
        Only applied when variational=True.  This refers to the first fully
        connected layer prior to the variational embedding, directly after
        the encoding.  After the variational embedding, another fully connected
        layer is created with the same size prior to decoding.  Set to 0 to
        not use an additional hidden layer.
    n_code : int, optional
        Only applied when variational=True.  This refers to the number of
        latent Gaussians to sample for creating the inner most encoding.
    activation : function, optional
        Activation function to apply to each layer, e.g. tf.nn.relu
    dropout : bool, optional
        Whether or not to apply dropout.  If using dropout, you must feed a
        value for 'keep_prob', as returned in the dictionary.  1.0 means no
        dropout is used.  0.0 means every connection is dropped.  Sensible
        values are between 0.5-0.8.
    denoising : bool, optional
        Whether or not to apply denoising.  If using denoising, you must feed a
        value for 'corrupt_rec', as returned in the dictionary.  1.0 means no
        corruption is used.  0.0 means every feature is corrupted.  Sensible
        values are between 0.5-0.8.
    convolutional : bool, optional
        Whether or not to use a convolutional network or else a fully connected
        network will be created.  This effects the n_filters parameter's
        meaning.
    variational : bool, optional
        Whether or not to create a variational embedding layer.  This will
        create a fully connected layer after the encoding, if `n_hidden` is
        greater than 0, then will create a multivariate gaussian sampling
        layer, then another fully connected layer.  The size of the fully
        connected layers are determined by `n_hidden`, and the size of the
        sampling layer is determined by `n_code`.

    Returns
    -------
    model : dict
        {
            'cost': Tensor to optimize.
            'Ws': All weights of the encoder.
            'x': Input Placeholder
            'z': Inner most encoding Tensor (latent features)
            'y': Reconstruction of the Decoder
            'keep_prob': Amount to keep when using Dropout
            'corrupt_rec': Amount to corrupt when using Denoising
            'train': Set to True when training/Applies to Batch Normalization.
        }
    """
    # network input / placeholders for train (bn) and dropout
    x = tf.placeholder(tf.float32, input_shape, 'x')
    t = tf.placeholder(tf.float32, output_shape, 't')
    label = tf.placeholder(tf.int32, [None], 'label')
    phase_train = tf.placeholder(tf.bool, name='phase_train')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')
    corrupt_rec = tf.placeholder(tf.float32, name='corrupt_rec')
    corrupt_cls = tf.placeholder(tf.float32, name='corrupt_cls')

    # input of the reconstruction network
    # np.tanh(2) = 0.964
    current_input1 = utils.corrupt(x)*corrupt_rec + x*(1-corrupt_rec) \
        if (denoising and phase_train is not None) else x
    current_input1.set_shape(x.get_shape())
    # 2d -> 4d if convolution
    current_input1 = utils.to_tensor(current_input1) \
        if convolutional else current_input1

    Ws = []
    shapes = []

    # Build the encoder
    for layer_i, n_output in enumerate(n_filters):
        with tf.variable_scope('encoder/{}'.format(layer_i)):
            shapes.append(current_input1.get_shape().as_list())
            if convolutional:
                h, W = utils.conv2d(x=current_input1,
                                    n_output=n_output,
                                    k_h=filter_sizes[layer_i],
                                    k_w=filter_sizes[layer_i])
            else:
                h, W = utils.linear(x=current_input1, n_output=n_output)
            h = activation(batch_norm(h, phase_train, 'bn' + str(layer_i)))
            if dropout:
                h = tf.nn.dropout(h, keep_prob)
            Ws.append(W)
            current_input1 = h

    shapes.append(current_input1.get_shape().as_list())

    with tf.variable_scope('variational'):
        if variational:
            dims = current_input1.get_shape().as_list()
            flattened = utils.flatten(current_input1)

            if n_hidden:
                h = utils.linear(flattened, n_hidden, name='W_fc')[0]
                h = activation(batch_norm(h, phase_train, 'fc/bn'))
                if dropout:
                    h = tf.nn.dropout(h, keep_prob)
            else:
                h = flattened

            z_mu = utils.linear(h, n_code, name='mu')[0]
            z_log_sigma = 0.5 * utils.linear(h, n_code, name='log_sigma')[0]
            # modified by yidawang
            # s, u, v = tf.svd(z_log_sigma)
            # z_log_sigma = tf.matmul(
            #        tf.matmul(u, tf.diag(s)), tf.transpose(v))
            # end yidawang

            # Sample from noise distribution p(eps) ~ N(0, 1)
            epsilon = tf.random_normal(tf.stack([tf.shape(x)[0], n_code]))

            # Sample from posterior
            z = z_mu + tf.multiply(epsilon, tf.exp(z_log_sigma))

            if n_hidden:
                h = utils.linear(z, n_hidden, name='fc_t')[0]
                h = activation(batch_norm(h, phase_train, 'fc_t/bn'))
                if dropout:
                    h = tf.nn.dropout(h, keep_prob)
            else:
                h = z

            size = dims[1] * dims[2] * dims[3] if convolutional else dims[1]
            h = utils.linear(h, size, name='fc_t2')[0]
            current_input1 = activation(batch_norm(h, phase_train, 'fc_t2/bn'))
            if dropout:
                current_input1 = tf.nn.dropout(current_input1, keep_prob)

            if convolutional:
                current_input1 = tf.reshape(
                    current_input1,
                    tf.stack([
                        tf.shape(current_input1)[0], dims[1], dims[2], dims[3]
                    ]))
        else:
            z = current_input1

    shapes.reverse()
    n_filters.reverse()
    Ws.reverse()

    n_filters += [input_shape[-1]]

    # %%
    # Decoding layers
    for layer_i, n_output in enumerate(n_filters[1:]):
        with tf.variable_scope('decoder/{}'.format(layer_i)):
            shape = shapes[layer_i + 1]
            if convolutional:
                h, W = utils.deconv2d(x=current_input1,
                                      n_output_h=shape[1],
                                      n_output_w=shape[2],
                                      n_output_ch=shape[3],
                                      n_input_ch=shapes[layer_i][3],
                                      k_h=filter_sizes[layer_i],
                                      k_w=filter_sizes[layer_i])
            else:
                h, W = utils.linear(x=current_input1, n_output=n_output)
            h = activation(batch_norm(h, phase_train, 'dec/bn' + str(layer_i)))
            if dropout:
                h = tf.nn.dropout(h, keep_prob)
            current_input1 = h

    y = current_input1
    t_flat = utils.flatten(t)
    y_flat = utils.flatten(y)

    # l2 loss
    loss_x = tf.reduce_mean(
        tf.reduce_sum(tf.squared_difference(t_flat, y_flat), 1))
    loss_z = 0

    if variational:
        # Variational lower bound, kl-divergence
        loss_z = tf.reduce_mean(-0.5 * tf.reduce_sum(
            1.0 + 2.0 * z_log_sigma - tf.square(z_mu) -
            tf.exp(2.0 * z_log_sigma), 1))

        # Add l2 loss
        cost_vae = tf.reduce_mean(loss_x + loss_z)
    else:
        # Just optimize l2 loss
        cost_vae = tf.reduce_mean(loss_x)

    # Alexnet for clasification based on softmax using TensorFlow slim
    if softmax:
        axis = list(range(len(x.get_shape())))
        mean1, variance1 = tf.nn.moments(t, axis) \
            if (phase_train is True) else tf.nn.moments(x, axis)
        mean2, variance2 = tf.nn.moments(y, axis)
        var_prob = variance2 / variance1

        # Input of the classification network
        current_input2 = utils.corrupt(x)*corrupt_cls + \
            x*(1-corrupt_cls) \
            if (denoising and phase_train is True) else x
        current_input2.set_shape(x.get_shape())
        current_input2 = utils.to_tensor(current_input2) \
            if convolutional else current_input2

        y_concat = tf.concat([current_input2, y], 3)
        with tf.variable_scope('deconv/concat'):
            shape = shapes[layer_i + 1]
            if convolutional:
                # Here we set the input of classification network is
                # the twice of
                # the input of the reconstruction network
                # 112->224 for alexNet and 150->300 for inception v3 and v4
                y_concat, W = utils.deconv2d(
                    x=y_concat,
                    n_output_h=y_concat.get_shape()[1] * 2,
                    n_output_w=y_concat.get_shape()[1] * 2,
                    n_output_ch=y_concat.get_shape()[3],
                    n_input_ch=y_concat.get_shape()[3],
                    k_h=3,
                    k_w=3)
                Ws.append(W)

        # The following are optional networks for classification network
        if classifier == 'squeezenet':
            predictions, net = squeezenet.squeezenet(y_concat, num_classes=13)
        elif classifier == 'zigzagnet':
            predictions, net = squeezenet.zigzagnet(y_concat, num_classes=13)
        elif classifier == 'alexnet_v2':
            predictions, end_points = alexnet.alexnet_v2(y_concat,
                                                         num_classes=13)
        elif classifier == 'inception_v1':
            predictions, end_points = inception.inception_v1(y_concat,
                                                             num_classes=13)
        elif classifier == 'inception_v2':
            predictions, end_points = inception.inception_v2(y_concat,
                                                             num_classes=13)
        elif classifier == 'inception_v3':
            predictions, end_points = inception.inception_v3(y_concat,
                                                             num_classes=13)

        label_onehot = tf.one_hot(label, 13, axis=-1, dtype=tf.int32)
        cost_s = tf.losses.softmax_cross_entropy(label_onehot, predictions)
        cost_s = tf.reduce_mean(cost_s)
        acc = tf.nn.in_top_k(predictions, label, 1)
    else:
        predictions = tf.one_hot(label, 13, 1, 0)
        label_onehot = tf.one_hot(label, 13, 1, 0)
        cost_s = 0
        acc = 0
    # Using Summaries for Tensorboard
    tf.summary.scalar('cost_vae', cost_vae)
    tf.summary.scalar('cost_s', cost_s)
    tf.summary.scalar('loss_x', loss_x)
    tf.summary.scalar('loss_z', loss_z)
    tf.summary.scalar('corrupt_rec', corrupt_rec)
    tf.summary.scalar('corrupt_cls', corrupt_cls)
    tf.summary.scalar('var_prob', var_prob)
    merged = tf.summary.merge_all()

    return {
        'cost_vae': cost_vae,
        'cost_s': cost_s,
        'loss_x': loss_x,
        'loss_z': loss_z,
        'Ws': Ws,
        'x': x,
        't': t,
        'label': label,
        'label_onehot': label_onehot,
        'predictions': predictions,
        'z': z,
        'y': y,
        'acc': acc,
        'keep_prob': keep_prob,
        'corrupt_rec': corrupt_rec,
        'corrupt_cls': corrupt_cls,
        'var_prob': var_prob,
        'train': phase_train,
        'merged': merged
    }
def network_alexnet_v2():
    input_shape = [1, 224, 224, 3]
    input_ = tf.placeholder(dtype=tf.float32, name='input', shape=input_shape)
    net, _end_points = alexnet_v2(input_, num_classes=1000, is_training=False)
    return net
示例#20
0
    def build_model(self):
        """
        :return:
        """

        """
        Helper Variables
        """
        self.global_step_tensor = tf.Variable(0, trainable=False, name='global_step')
        self.global_step_inc = self.global_step_tensor.assign(self.global_step_tensor + 1)
        self.global_epoch_tensor = tf.Variable(0, trainable=False, name='global_epoch')
        self.global_epoch_inc = self.global_epoch_tensor.assign(self.global_epoch_tensor + 1)

        """
        Inputs to the network
        """
        print("Input to alexnet")
        with tf.variable_scope('inputs'):
            self.x, self.y = self.data_loader.get_input()
            self.is_training = tf.placeholder(tf.bool, name='Training_flag')
        tf.add_to_collection('inputs', self.x)
        tf.add_to_collection('inputs', self.y)
        tf.add_to_collection('inputs', self.is_training)

        """
        Network Architecture
        """

        print("network arch alexnet")
        with tf.variable_scope('network'):
            self.logits, end_points = alexnet.alexnet_v2(inputs=self.x, num_classes=self.num_classes)
#            self.logits = tf.squeeze(self.logits, axis=[1, 2])

            print("network output alexnet")
            with tf.variable_scope('out'):
                # self.out = tf.squeeze(end_points['predictions'], axis=[1,2])
                self.out = tf.nn.softmax(self.logits, dim=-1)

            tf.add_to_collection('out', self.out)

            print("Logits shape: ", self.logits.shape)
            print("predictions out shape: ", self.out.shape)

            print("network output argmax alexnet")
            with tf.variable_scope('out_argmax'):
                self.out_argmax = tf.argmax(self.logits, axis=-1, output_type=tf.int64, name='out_argmax')
                # self.out_argmax = tf.squeeze(tf.argmax(self.out, 1), axis=[1])

                print("Arg Max Shape: ", self.out_argmax.shape)

        print("loss alexnet")
        with tf.variable_scope('loss-acc'):
            # one_hot_y = tf.one_hot(indices=self.y, depth=self.num_classes)

            self.loss = tf.losses.sparse_softmax_cross_entropy(labels=self.y, logits=self.logits)

            # probabilities = end_points['Predictions']

            # accuracy, accuracy_update = tf.metrics.accuracy(labels = one_hot_y, predictions = self.out_argmax)
            self.acc = tf.reduce_mean(tf.cast(tf.equal(self.y, self.out_argmax), tf.float32))

        with tf.variable_scope('train_step'):
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                self.train_step = self.optimizer.minimize(self.loss, global_step=self.global_step_tensor)

        tf.add_to_collection('train', self.train_step)
        tf.add_to_collection('train', self.loss)
        tf.add_to_collection('train', self.acc)
示例#21
0
def my_model_fn(
    features,  # This is batch_features from input_fn
    labels,  # This is batch_labels from input_fn
    mode):  # And instance of tf.estimator.ModeKeys, see below

    print 'HIT! my_model_fn() CALLED!'

    if mode == tf.estimator.ModeKeys.PREDICT:
        tf.logging.info("my_model_fn: PREDICT, {}".format(mode))
    elif mode == tf.estimator.ModeKeys.EVAL:
        tf.logging.info("my_model_fn: EVAL, {}".format(mode))
    elif mode == tf.estimator.ModeKeys.TRAIN:
        tf.logging.info("my_model_fn: TRAIN, {}".format(mode))

    y_pred, _ = alexnet.alexnet_v2(features, num_classes=6, is_training=True)
    '''
    py_x = tf.nn.relu(py_x)
    
    weights = {
        'h1': tf.Variable(tf.random_normal([1000, 6]),name='w_pose'),
    }
    biases = {
        'b1': tf.Variable(tf.zeros([6]),name='b_pose'),
    }
    
    y_pred = tf.add(tf.matmul(py_x, weights['h1']), biases['b1'])
    '''

    print_out = tf.add(0.0, labels, name='print_out')

    # 1. Prediction mode
    # Return our prediction
    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode, predictions=y_pred)

    # Evaluation and Training mode

    # Calculate the loss
    loss = tf.reduce_mean(tf.squared_difference(y_pred, labels))

    # 2. Evaluation mode
    # Return our loss (which is used to evaluate our model)
    # Set the TensorBoard scalar my_accurace to the accuracy
    # Obs: This function only sets value during mode == ModeKeys.EVAL
    # To set values during training, see tf.summary.scalar
    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode, loss=loss)

    # If mode is not PREDICT nor EVAL, then we must be in TRAIN
    assert mode == tf.estimator.ModeKeys.TRAIN, "TRAIN is only ModeKey left"

    # 3. Training mode

    # Default optimizer for DNNClassifier: Adagrad with learning rate=0.05
    # Our objective (train_op) is to minimize loss
    # Provide global step counter (used to count gradient updates)
    optimizer = tf.train.AdamOptimizer(1e-4)
    train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())

    # Set the TensorBoard scalar my_accuracy to the accuracy
    # Obs: This function only sets the value during mode == ModeKeys.TRAIN
    # To set values during evaluation, see eval_metrics_ops
    tf.summary.scalar('loss', loss)

    # Return training operations: loss and train_op
    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
示例#22
0
def run_training():

    with tf.Graph().as_default():
        with slim.arg_scope(alexnet.alexnet_v2_arg_scope()):
            #with slim.arg_scope(resnet_v2.resnet_arg_scope()):
            images, labels = inputs(train='train',
                                    batch_size=gd.BATCH_SIZE,
                                    num_epochs=FLAGS.num_epochs)

            images_val, labels_val = inputs(train='val',
                                            batch_size=gd.BATCH_SIZE_VAL,
                                            num_epochs=FLAGS.num_epochs)

            images = tf.reshape(images, [-1, gd.INPUT_SIZE, gd.INPUT_SIZE, 3])
            # print("images:")
            # print(images)

            #logits,description=resnet_v2.resnet_v2_101(images,4,is_training=True)
            logits, description = alexnet.alexnet_v2(
                images, num_classes=gd.NUM_CLASSES, is_training=True)
            print('logits:')
            print(logits)
            print('description:')
            print(description)
            #loss=slim.losses.softmax_cross_entropy(logits, labels)
            tf.get_variable_scope().reuse_variables()

            images_val = tf.reshape(images_val,
                                    [-1, gd.INPUT_SIZE, gd.INPUT_SIZE, 3])
            #loss=slim.losses.softmax_cross_entropy(logits, labels)
            logits_val, _ = alexnet.alexnet_v2(images_val,
                                               num_classes=gd.NUM_CLASSES,
                                               is_training=True)

            cross_entropy = calc_loss(logits, labels)
            tf.summary.scalar('entropy_mean', tf.reduce_mean(cross_entropy))

            print("cross_entropy:")
            print(cross_entropy)
            loss_beta = 0.001

            #variable_list= tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)

            weight_variable_list = [
                v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
                if v.name.endswith('weights:0')
            ]
            #print("variable_list:")
            #print(variable_list)
            print("weight_list:")
            print(weight_variable_list)

            l2_loss = tf.add_n(
                [tf.nn.l2_loss(v) for v in weight_variable_list]) * loss_beta
            tf.summary.scalar('l2_loss', tf.reduce_mean(l2_loss))
            print("l2_loss:")
            print(l2_loss)

            print("add result:")
            print(tf.add(cross_entropy, l2_loss))

            loss_total = tf.reduce_mean(tf.add(cross_entropy, l2_loss),
                                        name="total_loss")

            tf.summary.scalar('total_loss', loss_total)

            #l2_loss=tf.add_n([ tf.nn.l2_loss(v) for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) ]) * loss_beta

            optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)

            global_step = tf.Variable(0, name='global_step', trainable=False)

            train_op = optimizer.minimize(loss_total, global_step=global_step)

            eval_correct = evaluation(logits, labels, 'train')

            eval_correct_eval = evaluation(logits_val, labels_val, 'val')
            # train_op=slim.learning.create_train_op(loss,optimizer)

            # logdir=FLAGS.log_dir

            # slim.learning.train(train_op,logdir,number_of_steps=1000,
            # 	save_summaries_secs=300,save_interval_secs=600)

            summary_op = tf.summary.merge_all()

        init_op = tf.initialize_all_variables()

        saver = tf.train.Saver()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            sess.run(init_op)
            summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
            coord = tf.train.Coordinator()

            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                step = 0
                while not coord.should_stop():
                    start_time = time.time()
                    _, loss_value = sess.run([train_op, loss_total])
                    if step % 10 == 0:
                        summary_str = sess.run(summary_op)
                        summary_writer.add_summary(summary_str, step)
                        # print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
                        #                                  duration))
                        print('step %d : loss = %.4f' % (step, loss_value))
                        precision_test = do_eval(sess, eval_correct_eval,
                                                 log_name, 'val')
                        logfile = open(log_name, 'a')
                        logfile.write('Step %d: loss = %.4f \n' %
                                      (step, loss_value))
                        logfile.close()

                    if step % 100 == 0 or step == FLAGS.max_steps:
                        logfile = open(log_name, 'a')
                        logfile.write('Train:\n')
                        logfile.close()

                        print('Train:')
                        do_eval(sess, eval_correct, log_name, 'train')

                        logfile = open(log_name, 'a')
                        logfile.write('Val:\n')
                        logfile.close()
                        print('Val:')
                        #precision_test=do_eval(sess,eval_correct_eval,log_name,"val")
                        summary_str = sess.run(summary_op)
                        summary_writer.add_summary(summary_str, step)

#if step%10000 == 0 or step == FLAGS.max_steps:
                    if step % 2000 == 0 and precision_test > 0.98:
                        checkpoint_file = FLAGS.log_dir + '/' + "alexnet_model_" + str(
                            step) + '_' + str(precision_test)
                        saver.save(sess, checkpoint_file)

                    if step % 10000 == 0 or step == FLAGS.max_steps:
                        checkpoint_file = FLAGS.log_dir + '/' + "alexnet_model_" + str(
                            step)

                        saver.save(sess, checkpoint_file)

                    step += 1
            except tf.errors.OutOfRangeError:
                f = open(log_name, 'a')
                f.write('Done training for  epochs,steps.\n')
                f.close()
            finally:
                coord.request_stop()

            coord.join(threads)
示例#23
0
文件: analysis.py 项目: nirvus/test
def run_testing():

    with tf.Graph().as_default():

        with slim.arg_scope(vgg.vgg_arg_scope()):

            images, labels, filenames = inputs(FLAGS.batch_size,
                                               FLAGS.num_epochs)

            images = tf.reshape(images, [-1, gd.INPUT_SIZE, gd.INPUT_SIZE, 3])
            logits, end_points = alexnet.alexnet_v2(images,
                                                    num_classes=gd.NUM_CLASSES,
                                                    is_training=False)

            print(labels)

            print(logits)

            eps = tf.constant(value=1e-10)

            flat_logits = logits + eps

            softmax = tf.nn.softmax(flat_logits)

            probability = tf.reduce_max(softmax, axis=1)
            ll = tf.argmax(logits, axis=1)
            print(ll)
            variables_to_restore = slim.get_variables_to_restore()

            saver = tf.train.Saver(variables_to_restore)

            eval_correct = evaluation(logits, labels)

        config = tf.ConfigProto()

        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:

            saver.restore(sess, checkpoint_file)

            coord = tf.train.Coordinator()

            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            step = 0

            if not os.path.exists(gd.DIR_DESCRIPTION):
                os.makedirs(gd.DIR_DESCRIPTION)

            csvfile = open(
                gd.DIR_DESCRIPTION +
                "/12cls_2017-11-16_alexnet_sensi_color_change_wrongprediction.csv",
                "a")
            writer = csv.writer(csvfile)
            writer.writerow(['labels', 'prediction', 'filename'])

            file_name2 = "/detail_result.csv"
            csvfile2 = open(gd.DIR_DESCRIPTION + file_name2, "wb")
            writer2 = csv.writer(csvfile2)
            writer2.writerow(['labels', 'prediction', 'probability'])

            for step in range(gd.TOTAL):
                #while not coord.should_stop():
                #accuracy=do_eval(sess,eval_correct,log_name)

                labels_out, prediction_out, filename, softmax_out, probability_max = sess.run(
                    [labels, ll, filenames, softmax, probability])
                print("%d : %d ,%d ,max_probability: %f" %
                      (step, labels_out[0], prediction_out[0],
                       probability_max[0]))

                writer2.writerow(
                    [labels_out[0], prediction_out[0], probability_max[0]])
                #print(labels_out[0])

                #print(prediction_out[0])

                count_label[labels_out[0]] += 1

                if labels_out[0] == prediction_out[0]:
                    count_prediction[prediction_out[0]] += 1
                else:
                    writer.writerow(
                        [labels_out[0], prediction_out[0], filename[0]])
                confusion_matrix[labels_out[0]][prediction_out[0]] += 1
                #details_accuray(labels_out,prediction_out,gd.NUM_CLASSES)
            csvfile.close()
        print(count_label)
        print(count_prediction)
        print(confusion_matrix)
        print('\n')
        for i in range(num_of_class):
            print(confusion_matrix[i])
        precision_result = [0 for i in range(num_of_class)]
        recall_result = [0 for i in range(num_of_class)]
        #for i in range(num_of_class):
        #	precision_result[i]=confusion_matrix[i][i]/
        precision_sum = map(sum, zip(*confusion_matrix))

        print("precision_sum:")
        print(precision_sum)
        for i in range(num_of_class):
            precision_result[i] = confusion_matrix[i][i] / precision_sum[i]

        print("average_precision:")
        print(precision_result)

        print("mean_average_precision:")
        print(sum(precision_result) / num_of_class)

        print("recall_sum:")
        recall_sum = map(sum, confusion_matrix)
        print(recall_sum)

        for i in range(num_of_class):
            recall_result[i] = confusion_matrix[i][i] / recall_sum[i]
        print("recall:")
        print(recall_result)

        print("mean_recall:")
        print(sum(recall_result) / num_of_class)

        print("accuracy:%d/%d" % (sum(count_prediction), sum(count_label)))
        #print(sum(count_prediction))
        #print(count_prediction)
        #print(sum(count_label))
        print(sum(count_prediction) / sum(count_label))