Exemplo n.º 1
0
def softmax_classifier(tensor_in,
                       labels,
                       weights,
                       biases,
                       class_weight=None,
                       name=None):
  """Returns prediction and loss for softmax classifier.

  Args:
    tensor_in: Input tensor, [batch_size, feature_size], features.
    labels: Tensor, [batch_size, n_classes], labels of the output classes.
    weights: Tensor, [batch_size, feature_size], linear transformation
      matrix.
    biases: Tensor, [batch_size], biases.
    class_weight: Tensor, optional, [n_classes], weight for each class.
      If not given, all classes are supposed to have weight one.
    name: Operation name.

  Returns:
    Prediction and loss tensors.
  """
  with ops.name_scope(name, "softmax_classifier", [tensor_in, labels]):
    logits = nn.xw_plus_b(tensor_in, weights, biases)
    if class_weight is not None:
      logits = math_ops.mul(logits, class_weight)
    return nn.softmax(logits), loss_ops.softmax_cross_entropy(logits, labels)
Exemplo n.º 2
0
def softmax_classifier(tensor_in,
                       labels,
                       weights,
                       biases,
                       class_weight=None,
                       name=None):
    """Returns prediction and loss for softmax classifier.

  Args:
    tensor_in: Input tensor, [batch_size, feature_size], features.
    labels: Tensor, [batch_size, n_classes], labels of the output classes.
    weights: Tensor, [batch_size, feature_size], linear transformation
      matrix.
    biases: Tensor, [batch_size], biases.
    class_weight: Tensor, optional, [n_classes], weight for each class.
      If not given, all classes are supposed to have weight one.
    name: Operation name.

  Returns:
    Prediction and loss tensors.
  """
    with ops.op_scope([tensor_in, labels], name, "softmax_classifier"):
        logits = nn.xw_plus_b(tensor_in, weights, biases)
        if class_weight is not None:
            logits = math_ops.mul(logits, class_weight)
        return nn.softmax(logits), loss_ops.softmax_cross_entropy(
            logits, labels)
Exemplo n.º 3
0
def softmax_classifier(tensor_in,
                       labels,
                       weights,
                       biases,
                       class_weight=None,
                       name=None):
    """Returns prediction and loss for softmax classifier.

  This function returns "probabilities" and a cross entropy loss. To obtain
  predictions, use `tf.argmax` on the returned probabilities.

  This function requires labels to be passed in one-hot encoding.

  Args:
    tensor_in: Input tensor, [batch_size, feature_size], features.
    labels: Tensor, [batch_size, n_classes], one-hot labels of the output
      classes.
    weights: Tensor, [batch_size, feature_size], linear transformation
      matrix.
    biases: Tensor, [batch_size], biases.
    class_weight: Tensor, optional, [n_classes], weight for each class.
      If not given, all classes are supposed to have weight one.
    name: Operation name.

  Returns:
    `tuple` of softmax predictions and loss `Tensor`s.
  """
    with ops.name_scope(name, 'softmax_classifier', [tensor_in, labels]):
        logits = nn.xw_plus_b(tensor_in, weights, biases)
        if class_weight is not None:
            logits = math_ops.multiply(logits, class_weight)
        return nn.softmax(logits), loss_ops.softmax_cross_entropy(
            logits, labels)
Exemplo n.º 4
0
def softmax_classifier(tensor_in,
                       labels,
                       weights,
                       biases,
                       class_weight=None,
                       name=None):
  """Returns prediction and loss for softmax classifier.

  This function returns "probabilities" and a cross entropy loss. To obtain
  predictions, use `tf.argmax` on the returned probabilities.

  This function requires labels to be passed in one-hot encoding.

  Args:
    tensor_in: Input tensor, [batch_size, feature_size], features.
    labels: Tensor, [batch_size, n_classes], one-hot labels of the output
      classes.
    weights: Tensor, [batch_size, feature_size], linear transformation
      matrix.
    biases: Tensor, [batch_size], biases.
    class_weight: Tensor, optional, [n_classes], weight for each class.
      If not given, all classes are supposed to have weight one.
    name: Operation name.

  Returns:
    `tuple` of softmax predictions and loss `Tensor`s.
  """
  with ops.name_scope(name, "softmax_classifier", [tensor_in, labels]):
    logits = nn.xw_plus_b(tensor_in, weights, biases)
    if class_weight is not None:
      logits = math_ops.mul(logits, class_weight)
    return nn.softmax(logits), loss_ops.softmax_cross_entropy(logits, labels)
Exemplo n.º 5
0
    def __init__(self):
        x = tf.placeholder(tf.float32, shape=[None, IMG_SIZE * IMG_SIZE * 3])
        y_ = tf.placeholder(tf.float32, shape=[None, 2])

        x_image = tf.reshape(x, [-1, IMG_SIZE, IMG_SIZE, 3])  # 128

        W_conv1 = tf.get_variable("W_conv1", shape=[3, 3, 3, 6], initializer=xavier())
        b_conv1 = tf.get_variable('b_conv1', [1, 1, 1, 6])
        h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
        h_pool1 = max_pool_2x2(h_conv1)  # 64

        W_conv2 = tf.get_variable("W_conv2", shape=[3, 3, 6, 6], initializer=xavier())
        b_conv2 = tf.get_variable('b_conv2', [1, 1, 1, 6])
        h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
        h_pool2 = max_pool_2x2(h_conv2)  # 32

        W_conv3 = tf.get_variable("W_conv3", shape=[3, 3, 6, 12], initializer=xavier())
        b_conv3 = tf.get_variable('b_conv3', [1, 1, 1, 12])
        h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
        h_pool3 = max_pool_2x2(h_conv3)  # 16

        W_conv4 = tf.get_variable("W_conv4", shape=[3, 3, 12, 24], initializer=xavier())
        b_conv4 = tf.get_variable('b_conv4', [1, 1, 1, 24])
        h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
        h_pool4 = max_pool_2x2(h_conv4)  # 8

        h_pool4_flat = tf.reshape(h_pool4, [-1, 8 * 8 * 24])

        W_fc1 = tf.get_variable("W_fc1", shape=[8 * 8 * 24, 1024], initializer=xavier())
        b_fc1 = tf.get_variable('b_fc1', [1024], initializer=init_ops.zeros_initializer)
        h_fc1 = tf.nn.relu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1)

        keep_prob = tf.placeholder(tf.float32)
        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

        W_fcO = tf.get_variable("W_fcO", shape=[1024, 2], initializer=xavier())
        b_fcO = tf.get_variable('b_fcO', [2], initializer=init_ops.zeros_initializer)

        logits = tf.matmul(h_fc1_drop, W_fcO) + b_fcO
        y_conv = tf.nn.softmax(logits)

        cross_entropy = loss_ops.softmax_cross_entropy(logits, y_)

        train_step = tf.train.AdagradOptimizer(0.01).minimize(cross_entropy)

        self.predictions = predictions = tf.argmax(y_conv, 1)
        self.pp = y_conv

        correct_prediction = tf.equal(predictions, tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        self.x = x
        self.y_ = y_
        self.keep_prob = keep_prob
        self.trainStep = train_step
        self.accuracy = accuracy
Exemplo n.º 6
0
    def __init__(self):
        x = tf.placeholder(tf.float32, shape=[None, IMG_SIZE * IMG_SIZE * 3])
        y_ = tf.placeholder(tf.float32, shape=[None, 2])

        x_image = tf.reshape(x, [-1, IMG_SIZE, IMG_SIZE, 3])		# 128

        W_conv1 = tf.get_variable("W_conv1", shape=[3, 3, 3, 6], initializer=xavier())
        b_conv1 = tf.get_variable('b_conv1', [1, 1, 1, 6])
        h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
        h_pool1 = max_pool_2x2(h_conv1)								# 64

        W_conv2 = tf.get_variable("W_conv2", shape=[3, 3, 6, 6], initializer=xavier())
        b_conv2 = tf.get_variable('b_conv2', [1, 1, 1, 6])
        h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
        h_pool2 = max_pool_2x2(h_conv2)								# 32

        W_conv3 = tf.get_variable("W_conv3", shape=[3, 3, 6, 12], initializer=xavier())
        b_conv3 = tf.get_variable('b_conv3', [1, 1, 1, 12])
        h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
        h_pool3 = max_pool_2x2(h_conv3)								# 16

        W_conv4 = tf.get_variable("W_conv4", shape=[3, 3, 12, 24], initializer=xavier())
        b_conv4 = tf.get_variable('b_conv4', [1, 1, 1, 24])
        h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
        h_pool4 = max_pool_2x2(h_conv4)								# 8

        h_pool4_flat = tf.reshape(h_pool4, [-1, 8 * 8 * 24])

        W_fc1 = tf.get_variable("W_fc1", shape=[8 * 8 * 24, 1024], initializer=xavier())
        b_fc1 = tf.get_variable('b_fc1', [1024], initializer=init_ops.zeros_initializer)
        h_fc1 = tf.nn.relu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1)

        keep_prob = tf.placeholder(tf.float32)
        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

        W_fcO = tf.get_variable("W_fcO", shape=[1024, 2], initializer=xavier())
        b_fcO = tf.get_variable('b_fcO', [2], initializer=init_ops.zeros_initializer)

        logits = tf.matmul(h_fc1_drop, W_fcO) + b_fcO
        y_conv = tf.nn.softmax(logits)

        cross_entropy = loss_ops.softmax_cross_entropy(logits, y_)

        train_step = tf.train.AdagradOptimizer(0.01).minimize(cross_entropy)

        self.predictions = predictions = tf.argmax(y_conv, 1)

        correct_prediction = tf.equal(predictions, tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        self.x = x
        self.y_ = y_
        self.keep_prob = keep_prob
        self.trainStep = train_step
        self.accuracy = accuracy
Exemplo n.º 7
0
    def __init__(self):
        self.x = tf.placeholder(tf.float32,
                                shape=[None, IMG_SIZE * IMG_SIZE * 3])
        self.y_ = tf.placeholder(tf.float32, shape=[None, 3])

        x_image = tf.reshape(self.x,
                             [-1, IMG_SIZE, IMG_SIZE, 3])  # 128 * 128 * 3

        W_conv1 = tf.get_variable("W_conv1",
                                  shape=[3, 3, 3, 6],
                                  initializer=xavier())  #定義遮罩 (高度、寬度、通道數,遮罩數量)
        b_conv1 = tf.get_variable('b_conv1', [1, 1, 1, 6])
        h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
        h_pool1 = max_pool_2x2(h_conv1)  # 64

        W_conv2 = tf.get_variable("W_conv2",
                                  shape=[3, 3, 6, 6],
                                  initializer=xavier())
        b_conv2 = tf.get_variable('b_conv2', [1, 1, 1, 6])
        h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
        h_pool2 = max_pool_2x2(h_conv2)  # 32

        W_conv3 = tf.get_variable("W_conv3",
                                  shape=[3, 3, 6, 12],
                                  initializer=xavier())
        b_conv3 = tf.get_variable('b_conv3', [1, 1, 1, 12])
        h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
        h_pool3 = max_pool_2x2(h_conv3)  # 16

        W_conv4 = tf.get_variable("W_conv4",
                                  shape=[3, 3, 12, 24],
                                  initializer=xavier())
        b_conv4 = tf.get_variable('b_conv4', [1, 1, 1, 24])
        h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
        h_pool4 = max_pool_2x2(h_conv4)  # 8

        h_pool4_flat = tf.reshape(h_pool4, [-1, 8 * 8 * 24])

        W_fc1 = tf.get_variable("W_fc1",
                                shape=[8 * 8 * 24, 1024],
                                initializer=xavier())
        b_fc1 = tf.get_variable('b_fc1', [1024],
                                initializer=init_ops.zeros_initializer)
        h_fc1 = tf.nn.relu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1)

        self.keep_prob = tf.placeholder(tf.float32)
        h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)

        W_fcO = tf.get_variable("W_fcO", shape=[1024, 3], initializer=xavier())
        b_fcO = tf.get_variable('b_fcO', [3],
                                initializer=init_ops.zeros_initializer)

        logits = tf.matmul(h_fc1_drop, W_fcO) + b_fcO
        y_conv = tf.nn.softmax(logits)

        self.cross_entropy = loss_ops.softmax_cross_entropy(logits, self.y_)

        self.train_step = tf.train.AdagradOptimizer(0.01).minimize(
            self.cross_entropy)

        self.predictions = predictions = tf.argmax(y_conv, 1)

        correct_prediction = tf.equal(predictions, tf.argmax(self.y_, 1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # for tensorboard
        tf.summary.scalar('loss', self.cross_entropy)
        tf.summary.scalar('acc', self.accuracy)
        self.summary_op = tf.summary.merge_all()