Esempio n. 1
0
def square_loss(props, lbls, mask=None):
    """
    compute square loss

    Parameters
    ----------
    props: numpy array, forward pass output
    lbls:  numpy array, ground truth labeling

    Return
    ------
    err:   cost energy
    grdts: numpy array, gradient volumes
    """
    grdts = dict()
    err = 0

    # Applying mask if it exists
    props = utils.mask_dict_vol(props, mask)
    lbls = utils.mask_dict_vol(lbls, mask)

    for name, prop in props.items():
        lbl = lbls[name]

        grdt = prop - lbl
        grdts[name] = grdt * 2

        err += np.sum(np.square(grdt))

    return (props, err, grdts)
Esempio n. 2
0
def get_cls(props, lbls, mask=None):
    """
    compute classification error.

    Parameters
    ----------
    props : dict of array, network propagation output volumes.
    lbls  : dict of array, ground truth

    Returns
    -------
    c : number of classification error
    """
    errors = dict()
    c = 0.0

    #Applying mask if it exists
    props = utils.mask_dict_vol(props, mask)
    lbls = utils.mask_dict_vol(lbls, mask)

    for name, prop in props.iteritems():
        lbl = lbls[name]
        c += np.count_nonzero( (prop>0.5) != (lbl>0.5) )

    return c
Esempio n. 3
0
def square_loss(props, lbls, mask=None):
    """
    compute square loss

    Parameters
    ----------
    props: numpy array, forward pass output
    lbls:  numpy array, ground truth labeling

    Return
    ------
    err:   cost energy
    grdts: numpy array, gradient volumes
    """
    grdts = dict()
    err = 0

    #Applying mask if it exists
    props = utils.mask_dict_vol(props, mask)
    lbls = utils.mask_dict_vol(lbls, mask)

    for name, prop in props.iteritems():
        lbl = lbls[name]

        grdt = prop - lbl
        grdts[name] = grdt * 2

        err += np.sum(np.square( grdt ))

    return (props, err, grdts)
Esempio n. 4
0
def get_cls(props, lbls, mask=None):
    """
    compute classification error.

    Parameters
    ----------
    props : dict of array, network propagation output volumes.
    lbls  : dict of array, ground truth

    Returns
    -------
    c : number of classification error
    """
    errors = dict()
    c = 0.0

    # Applying mask if it exists
    props = utils.mask_dict_vol(props, mask)
    lbls = utils.mask_dict_vol(lbls, mask)

    for name, prop in props.items():
        lbl = lbls[name]
        c += np.count_nonzero((prop > 0.5) != (lbl > 0.5))

    return c
Esempio n. 5
0
def square_square_loss(props, lbls, mask=None, margin=0.2):
    """
    square-square loss (square loss with a margin)
    """
    gradients = dict()
    error = 0

    # Applying mask if it exists
    props = utils.mask_dict_vol(props, mask)
    lbls = utils.mask_dict_vol(lbls, mask)

    for name, propagation in props.items():
        lbl = lbls[name]

        gradient = propagation - lbl
        gradient[np.abs(gradient) <= margin] = 0
        gradients[name] = gradient * 2

        error += np.sum(np.square(gradient))

    return (props, error, gradients)
Esempio n. 6
0
def square_square_loss(props, lbls, mask=None, margin=0.2):
    """
    square-square loss (square loss with a margin)
    """
    gradients = dict()
    error = 0

    #Applying mask if it exists
    props = utils.mask_dict_vol(props, mask)
    lbls = utils.mask_dict_vol(lbls, mask)

    for name, propagation in props.iteritems():
        lbl = lbls[name]

        gradient = propagation - lbl
        gradient[np.abs(gradient) <= margin] = 0
        gradients[name] = gradient * 2

        error += np.sum(np.square( gradient ))


    return (props, error, gradients)
Esempio n. 7
0
def binomial_cross_entropy(props, lbls, mask=None):
    """
    compute binomial cost

    Parameters
    ----------
    props:  dict of network output arrays
    lbls:   dict of ground truth arrays

    Return
    ------
    err:    cost energy
    grdts:  dict of gradient volumes
    """
    grdts = dict()
    err = 0

    #Taking a slightly different strategy with masking
    # to improve the numerical stability of the error output
    entropy = dict()

    #Finding Gradients
    for name, prop in props.iteritems():
        lbl = lbls[name]

        grdts[name] = prop - lbl

        entropy[name] = -lbl*np.log(prop) - (1-lbl)*np.log(1-prop)

    #Applying mask if it exists
    grdts = utils.mask_dict_vol(grdts, mask)
    entropy = utils.mask_dict_vol(entropy, mask)

    for name, vol in entropy.iteritems():
        err += np.sum( vol )

    return (props, err, grdts)
Esempio n. 8
0
def multinomial_cross_entropy(props, lbls, mask=None):
    """
    compute multinomial cross entropy

    Parameters
    ----------
    props:    list of forward pass output
    lbls:     list of ground truth labeling

    Return
    ------
    err:    cost energy
    cls:    classfication error
    grdts:  list of gradient volumes
    """
    grdts = dict()
    cost = 0

    #Taking a slightly different strategy with masking
    # to improve the numerical stability of the error output
    entropy = dict()

    for name, prop in props.iteritems():
        lbl = lbls[name]

        grdts[name] = prop - lbl

        entropy[name] = -lbl * np.log(prop)

    #Applying mask if it exists
    grdts = utils.mask_dict_vol(grdts, mask)
    entropy = utils.mask_dict_vol(entropy, mask)

    for name, vol in entropy.iteritems():
        cost += np.sum( vol )

    return (props, cost, grdts)
Esempio n. 9
0
def binomial_cross_entropy(props, lbls, mask=None):
    """
    compute binomial cost

    Parameters
    ----------
    props:  dict of network output arrays
    lbls:   dict of ground truth arrays

    Return
    ------
    err:    cost energy
    grdts:  dict of gradient volumes
    """
    grdts = dict()
    err = 0

    # Taking a slightly different strategy with masking
    # to improve the numerical stability of the error output
    entropy = dict()

    # Finding Gradients
    for name, prop in props.items():
        lbl = lbls[name]

        grdts[name] = prop - lbl

        entropy[name] = -lbl * np.log(prop) - (1 - lbl) * np.log(1 - prop)

    # Applying mask if it exists
    grdts = utils.mask_dict_vol(grdts, mask)
    entropy = utils.mask_dict_vol(entropy, mask)

    for name, vol in entropy.items():
        err += np.sum(vol)

    return (props, err, grdts)
Esempio n. 10
0
def multinomial_cross_entropy(props, lbls, mask=None):
    """
    compute multinomial cross entropy

    Parameters
    ----------
    props:    list of forward pass output
    lbls:     list of ground truth labeling

    Return
    ------
    err:    cost energy
    cls:    classfication error
    grdts:  list of gradient volumes
    """
    grdts = dict()
    cost = 0

    # Taking a slightly different strategy with masking
    # to improve the numerical stability of the error output
    entropy = dict()

    for name, prop in props.items():
        lbl = lbls[name]

        grdts[name] = prop - lbl

        entropy[name] = -lbl * np.log(prop)

    # Applying mask if it exists
    grdts = utils.mask_dict_vol(grdts, mask)
    entropy = utils.mask_dict_vol(entropy, mask)

    for name, vol in entropy.items():
        cost += np.sum(vol)

    return (props, cost, grdts)