예제 #1
0
def loss_car_stop(logits, net_outputs, batch_size=None):
    # net_outputs contains is_stop
    labels = net_outputs[0]  # shape: N * F
    # reshape to 1 dimension
    labels = tf.reshape(labels, [-1])

    prediction = logits[0]  # shape: (N * F) * 2
    # filter the no ground truth data
    labels, prediction = util.filter_no_groundtruth_label(labels, prediction)

    labels_shape = tf.shape(labels)
    effective_batch_size = labels_shape[0]
    num_classes = prediction.get_shape()[-1].value

    dense_labels = util.to_one_hot_label(labels, effective_batch_size,
                                         num_classes)

    if FLAGS.class_balance_path != "":
        path = FLAGS.class_balance_path + "_stop.npy"
        empirical_distribution = np.load(path)
        weights = util.loss_weights(empirical_distribution,
                                    FLAGS.class_balance_epsilon)
        print("using weighted training: ", weights)
        mask = tf.gather(weights, labels)
    else:
        mask = 1.0

    # Cross entropy loss for the main softmax prediction.
    slim.losses.softmax_cross_entropy(prediction, dense_labels, weight=mask)
예제 #2
0
def loss_car_joint(logits, net_outputs, batch_size=None):
    # net_outputs contains is_stop, turn, locs
    future_labels = net_outputs[2]  # shape: N * F * 2
    # reshape to 2 dimension
    num_classes = future_labels.get_shape()[-1].value
    future_labels = tf.reshape(future_labels, [-1, num_classes])

    dense_labels = tf.py_func(course_speed_to_joint_bin, [future_labels],
                              [tf.float32])

    if FLAGS.class_balance_path != "":
        path = FLAGS.class_balance_path + "_joint.npy"
        dist = np.load(path)

        weights = util.loss_weights(dist, FLAGS.class_balance_epsilon)
        print("using weighted training: ", weights)
        # assume the label being the max response at that point
        labels = tf.argmax(dense_labels, dimension=1)
        masks = tf.gather(weights, labels)
    else:
        masks = 1.0

    future_predict = logits[0]
    slim.losses.softmax_cross_entropy(future_predict,
                                      dense_labels,
                                      weight=masks)
예제 #3
0
def loss_car_discrete(logits, net_outputs, batch_size=None):
    # net_outputs contains is_stop, turn
    dense_labels = net_outputs[1]  # shape: N * F * nclass
    # reshape to 2 dimension
    num_classes = dense_labels.get_shape()[-1].value
    dense_labels = tf.reshape(dense_labels, [-1, num_classes])

    if FLAGS.class_balance_path != "":
        path = FLAGS.class_balance_path + "_discrete.npy"
        empirical_distribution = np.load(path)
        weights = util.loss_weights(empirical_distribution,
                                    FLAGS.class_balance_epsilon)
        print("using weighted training: ", weights)
        # assume the label being the max response at that point
        labels = tf.argmax(dense_labels, dimension=1)
        mask = tf.gather(weights, labels)
    else:
        mask = 1.0

    # Cross entropy loss for the main softmax prediction.
    slim.losses.softmax_cross_entropy(logits[0], dense_labels, weight=mask)
예제 #4
0
def loss_car_loc_xy(logits, net_outputs, batch_size=None):
    # net_outputs contains is_stop, turn, locs
    future_labels = net_outputs[2]  # shape: N * F * 2
    # reshape to 2 dimension
    num_classes = future_labels.get_shape()[-1].value
    NF = future_labels.get_shape()[0].value * \
         future_labels.get_shape()[1].value
    future_labels = tf.reshape(future_labels, [-1, num_classes])

    dense_course, dense_speed = tf.py_func(call_label_to_dense_smooth,
                                           [future_labels],
                                           [tf.float32, tf.float32])

    if FLAGS.class_balance_path != "":
        path = FLAGS.class_balance_path + "_continuous.npy"
        dists = np.load(path)

        masks = []
        dense_labels = [dense_course, dense_speed]
        for i in range(2):
            weights = util.loss_weights(dists[i], FLAGS.class_balance_epsilon)
            print("using weighted training: ", weights)
            # assume the label being the max response at that point
            labels = tf.argmax(dense_labels[i], dimension=1)
            mask = tf.gather(weights, labels)
            mask.set_shape([NF])
            masks.append(mask)
    else:
        masks = [1.0, 1.0]

    future_predict = logits[0]  # shape: (N*F) * 2Nbins
    n = FLAGS.discretize_n_bins
    slim.losses.softmax_cross_entropy(future_predict[:, 0:n],
                                      dense_course,
                                      scope="cross_entropy_loss/course",
                                      weight=masks[0])
    slim.losses.softmax_cross_entropy(future_predict[:, n:],
                                      dense_speed,
                                      scope="cross_entropy_loss/speed",
                                      weight=masks[1])