def fit(self, X, Y):
        """
        X: of shape [data-size, feature-size]
        Y: of shape [data-size]
        """
        self.feature_size = X.shape[-1]
        # w of shape [feature-size]
        self.w = np.random.rand(self.feature_size)
        # b of shape [1]
        self.b = np.random.rand(1)

        for step in range(self.max_steps):
            # pred of shape [data-size]
            pred = self._predict(X)
            # Bias gradient of shape [data-size]
            gradient_b = Y - pred
            # Weight gradient of shape [data-size, feature-size]
            gradient_w = gradient_b[:, None] * X
            # get mean of gradient across all data
            gradient_b = gradient_b.mean(axis=0)
            gradient_w = gradient_w.mean(axis=0)
            self.w += gradient_w
            self.b += gradient_b
            loss = binary_cross_entropy(pred, Y)
            if self.verbose:
                print(f"Step {step}, Loss is {loss}...")
예제 #2
0
    def calc_obj_loss(self, true_obj, pred_obj, ignore_mask):

        obj_entropy = binary_cross_entropy(pred_obj, true_obj)

        obj_loss = true_obj * obj_entropy
        noobj_loss = (1 - true_obj) * obj_entropy * ignore_mask

        obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3, 4))
        noobj_loss = tf.reduce_sum(noobj_loss,
                                   axis=(1, 2, 3, 4)) * self.lamda_noobj

        return obj_loss + noobj_loss
예제 #3
0
    def calc_class_loss(self, true_obj, true_class, pred_class):
        """
        calculate loss of class prediction

        inputs:
        true_obj: if the object present from ground truth in shape of (batch, grid, grid, anchor, 1)
        true_class: one-hot class from ground truth in shape of (batch, grid, grid, anchor, num_classes)
        pred_class: one-hot class from model prediction in shape of (batch, grid, grid, anchor, num_classes)

        outputs:
        class_loss: class loss
        """
        # Yolov1:
        # "Note that the loss function only penalizes classification error
        # if an object is present in that grid cell (hence the conditional
        # class probability discussed earlier).
        class_loss = binary_cross_entropy(pred_class, true_class)
        class_loss = true_obj * class_loss
        class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3, 4))
        return class_loss
예제 #4
0
    def calc_obj_loss(self, true_obj, pred_obj, ignore_mask):
        """
        calculate loss of objectness: sum of L2 distances

        inputs:
        true_obj: objectness from ground truth in shape of (batch, grid, grid, anchor, num_classes)
        pred_obj: objectness from model prediction in shape of (batch, grid, grid, anchor, num_classes)

        outputs:
        obj_loss: objectness loss
        """
        obj_entropy = binary_cross_entropy(pred_obj, true_obj)

        obj_loss = true_obj * obj_entropy
        noobj_loss = (1 - true_obj) * obj_entropy * ignore_mask

        obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3, 4))
        noobj_loss = tf.reduce_sum(noobj_loss,
                                   axis=(1, 2, 3, 4)) * self.lamda_noobj

        return obj_loss + noobj_loss
예제 #5
0
    def calc_class_loss(self, true_obj, true_class, pred_class):

        class_loss = binary_cross_entropy(pred_class, true_class)
        class_loss = true_obj * class_loss
        class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3, 4))
        return class_loss
예제 #6
0
        H = tf.layers.max_pooling2d(H, pool_size=(2, 2), strides=(1, 1), padding='SAME', name=str(layer_i))
"""

# Number of filters and layers of the FCN
layers = [100, 100, 4]
for layer_i, n_output_i in enumerate(layers):
    H, W = fully_connected(H, n_output=n_output_i, name=layer_i)
    if layer_i == len(layers) - 1:
        H = tf.nn.softmax(H)
    else:
        H = tf.nn.relu(H)

Y_predicted = H

# Cost function
loss = binary_cross_entropy(Y_predicted, Y)
cost = tf.reduce_mean(tf.reduce_sum(loss, 1))

# Measure of accuracy
predicted_y = tf.argmax(Y_predicted, 1)
actual_y = tf.argmax(Y, 1)
correct_prediction = tf.equal(predicted_y, actual_y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

# Training parameters
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
n_epochs = 200

sess = tf.Session()
sess.run(tf.global_variables_initializer())
예제 #7
0
    normalize,
    load_model,
    binary_cross_entropy,
    roc,
)

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="")
    parser.add_argument("dataset_test",
                        type=open_datafile,
                        help="dataset to use")
    parser.add_argument("model", help="model to use")
    parser.add_argument("-vi",
                        "--visu",
                        help="Display graphs",
                        action="store_true")

    args = parser.parse_args()
    n = load_model(args.model)
    # test = args.dataset_test.drop(args.dataset_test.columns[0], axis=1)
    test = args.dataset_test[[
        1, 2, 3, 8, 11, 12, 17, 18, 19, 21, 26, 28, 30, 31
    ]]
    test = normalize(test)
    test = np.array(test)
    error, acc = binary_cross_entropy(test, n)
    print(f"Cross Binary Entropy Error = {error:.5f}")
    print(f"Accuracy = {acc:.5f}")
    if args.visu is True:
        roc(test, n)