Пример #1
0
def eval_generator(model, gen_xy, length_xy, clss):

    classes = reduce(lambda x, y: x + y, clss, [])

    pbar = tqdm(total=length_xy)
    y_true = np.zeros((length_xy, len(classes)))
    y_pred = np.zeros((length_xy, len(classes)))

    for i, (X, Ys) in enumerate(gen_xy):
        k = len(X)
        y_hat = model.predict(X)
        for j, y in enumerate(zip(*Ys)):
            assert len(X) == len(y)
            s = sum([len(cls) for cls in clss[:j]])
            e = s + len(clss[j])
            y_pred[i:i + k, s:e], y_true[i:i + k, s:e] = y_hat[j], y

        if (i + 1) % 100 == 0:
            loss = np.mean(hinge(y_true, y_pred).eval(session=sess))
            pbar.set_description("Validation Loss:%.5f" % loss)
        pbar.update(k)

    pbar.close()

    y_pred = y_pred[~np.all(y_pred == 0, axis=1)]
    y_true = y_true[~np.all(y_true == 0, axis=1)]

    loss = np.mean(hinge(y_true, y_pred).eval(session=sess))
    y_pred = oneminusone2zeroone(y_pred)
    y_true = oneminusone2zeroone(y_true)
    f_max = F_max(y_pred, y_true, classes, np.arange(0.1, 1, 0.1))

    return y_true, y_pred, loss, f_max
Пример #2
0
def eval_generator(model, gen_xy, length_xy, classes):

    pbar = tqdm(total=length_xy)
    i, m, n = 0, length_xy, len(classes)
    y_pred, y_true = np.zeros((m, n)), np.zeros((m, n))
    for i, (X, Y) in enumerate(gen_xy):
        assert len(X) == len(Y)
        k = len(Y)
        y_hat, y = model.predict(X), Y
        y_pred[i:i + k, ], y_true[i:i + k, ] = y_hat, y

        if (i + 1) % 20 == 0:
            loss = np.mean(hinge(y, y_hat).eval(session=sess))
            pbar.set_description("Validation Loss:%.5f" % loss)
        pbar.update(k)

    pbar.close()

    y_pred = y_pred[~np.all(y_pred == 0, axis=1)]
    y_true = y_true[~np.all(y_true == 0, axis=1)]

    loss = np.mean(hinge(y_true, y_pred).eval(session=sess))
    y_pred = oneminusone2zeroone(y_pred)
    y_true = oneminusone2zeroone(y_true)
    f_max = F_max(y_pred, y_true, classes, np.arange(0.1, 1, 0.1))

    return y_true, y_pred, loss, f_max
Пример #3
0
 def loss(y_true, y_pred):
     fac = MaxHingeTarget.fac
     #overall_fac = np.prod(np.array(K.shape(y_pred)[1:]).astype(np.float32))
     overall_fac = K.prod(K.cast(K.shape(y_pred)[1:],K.floatx()))
     max_val = K.max(y_pred,axis=-2) #temporal axis!
     max_val1 = K.repeat(max_val,K.shape(y_pred)[-2])
     mask = K.cast(K.equal(max_val1,y_pred),K.floatx())
     y_pred1 = mask * y_pred + (1-mask) * y_true
     weight_mask = K.mean(y_true,axis=-1)
     weight_mask = K.cast(K.greater(weight_mask,0.0),K.floatx()) #positive label!
     weight_mask = fac*weight_mask + (1 - weight_mask)
     #return weight_mask*squared_hinge(y_true,y_pred1)
     return conf['model']['loss_scale_factor']*overall_fac*weight_mask*hinge(y_true,y_pred1)
Пример #4
0
def evaluate(model, X, Y, classes):
    i, m, n = 0, sum(map(lambda k: len(Y[k]), Y.keys())), len(classes)
    y_pred, y_true = np.zeros((m, n)), np.zeros((m, n))
    for x_shp, y_shp in zip(X.keys(), Y.keys()):
        k = len(Y[y_shp])
        y_hat, y = model.predict(X[x_shp]), Y[y_shp]
        y_pred[i:i + k, ], y_true[i:i + k, ] = y_hat, y
        i += k
    loss = np.mean(hinge(y_true, y_pred).eval(session=sess))
    y_pred = oneminusone2zeroone(y_pred)
    y_true = oneminusone2zeroone(y_true)
    f_max = F_max(y_pred, y_true, classes)

    return y_true, y_pred, loss, f_max
Пример #5
0
 def loss(y_true, y_pred):
     from plasma.conf import conf
     fac = MaxHingeTarget.fac
     #overall_fac = np.prod(np.array(K.shape(y_pred)[1:]).astype(np.float32))
     overall_fac = K.prod(K.cast(K.shape(y_pred)[1:],K.floatx()))
     max_val = K.max(y_pred,axis=-2) #temporal axis!
     max_val1 = K.repeat(max_val,K.shape(y_pred)[-2])
     mask = K.cast(K.equal(max_val1,y_pred),K.floatx())
     y_pred1 = mask * y_pred + (1-mask) * y_true
     weight_mask = K.mean(y_true,axis=-1)
     weight_mask = K.cast(K.greater(weight_mask,0.0),K.floatx()) #positive label!
     weight_mask = fac*weight_mask + (1 - weight_mask)
     #return weight_mask*squared_hinge(y_true,y_pred1)
     return conf['model']['loss_scale_factor']*overall_fac*weight_mask*hinge(y_true,y_pred1)
Пример #6
0
 def loss(y_true, y_pred):
     # TODO(KGF): this function is unused and unique to this class
     from plasma.conf import conf
     fac = MaxHingeTarget.fac
     # overall_fac =
     # np.prod(np.array(K.shape(y_pred)[1:]).astype(np.float32))
     overall_fac = K.prod(K.cast(K.shape(y_pred)[1:], K.floatx()))
     max_val = K.max(y_pred, axis=-2)  # temporal axis!
     max_val1 = K.repeat(max_val, K.shape(y_pred)[-2])
     mask = K.cast(K.equal(max_val1, y_pred), K.floatx())
     y_pred1 = mask * y_pred + (1-mask) * y_true
     weight_mask = K.mean(y_true, axis=-1)
     weight_mask = K.cast(K.greater(weight_mask, 0.0),
                          K.floatx())  # positive label!
     weight_mask = fac*weight_mask + (1 - weight_mask)
     # return weight_mask*squared_hinge(y_true, y_pred1)
     return conf['model']['loss_scale_factor'] * \
         overall_fac*weight_mask*hinge(y_true, y_pred1)
Пример #7
0
def evaluate(model, X, Y, classes):

    i, m, n = 0, sum(map(lambda k: len(Y[k]), Y.keys())), len(classes)
    y_pred, y_true = np.zeros((m, n)), np.zeros((m, n))
    for x_shp, y_shp in zip(X.keys(), Y.keys()):
        k = len(Y[y_shp])
        y_hat, y = model.predict(X[x_shp]), Y[y_shp]
        y_pred[i:i + k, ], y_true[i:i + k, ] = y_hat, y
        i += k
    loss = np.mean(hinge(y_true, y_pred).eval(session=sess))
    y_pred = oneminusone2zeroone(y_pred)
    y_true = oneminusone2zeroone(y_true)
    f_max = F_max(y_pred, y_true, classes)

    model_path = 'checkpoints/1st-level-cnn-%3d-%.3f-%.2f.hdf5' % (epoch, loss,
                                                                   f_max)
    model.save_weights(model_path)

    return y_true, y_pred, loss, f_max
Пример #8
0
# Sparse Categorical Cross Entropy
t = LabelEncoder()
y_pred = tf.constant([[0.1, 0.1, 0.8], [0.1, 0.4, 0.5], [0.5, 0.3, 0.2],
                      [0.6, 0.3, 0.1]])
y_true = t.fit_transform(['Rain', 'Rain', 'High Changes of Rain', 'No Rain'])
print("transformed label: ", y_true)
y_true = tf.constant(y_true)
loss = sparse_categorical_crossentropy(y_true, y_pred)
loss = K.eval(loss)
print(f'Value of Sparse Categorical Cross Entropy is ', loss)

# hinge loss
y_true = tf.constant([[0., 1.], [0., 0.]])
y_pred = tf.constant([[0.7, 0.3], [0.4, 0.6]])
loss = hinge(y_true, y_pred)
a = K.eval(loss)
print("hinge loss: ", a)


# Custom Loss Function: categorical_crossentropy_with_label_smoothing
def categorical_crossentropy_with_label_smoothing(y_true,
                                                  y_pred,
                                                  label_smoothing=0.1):
    num_classes = math_ops.cast(array_ops.shape(y_true)[1], y_pred.dtype)
    y_true = y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes)
    return categorical_crossentropy(y_true, y_pred)


y_true = tf.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
y_pred = tf.constant([[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]])