예제 #1
0
def soft_sign(x, constant=False):
    """ Returns the soft sign function x / (1 + |x|).

    Parameters
    ----------
    x : mygrad.Tensor
        Input data.

    constant : boolean, optional (default=False)
        If ``True``, the returned tensor is a constant (it
        does not back-propagate a gradient).

    Returns
    -------
    mygrad.Tensor
        The soft sign function applied to `x` elementwise.

    Examples
    --------
    >>> import mygrad as mg
    >>> from mygrad.nnet.activations import hard_tanh
    >>> x = mg.arange(-5, 6)
    >>> x
    Tensor([-5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5])
    >>> y = soft_sign(x); y
    Tensor([-0.83333333, -0.8       , -0.75      , -0.66666667, -0.5       ,
         0.        ,  0.5       ,  0.66666667,  0.75      ,  0.8       ,
         0.83333333])
    """
    return divide(x, 1 + abs(x), constant=constant)
예제 #2
0
def l1_loss(outputs, targets):
    ''' Returns the L¹ loss Σ|xᵢ - yᵢ| averaged over the number of data points. 

    Parameters
    ----------
    outputs : mygrad.Tensor, shape=(N,)
        The predictions for each of the N pieces of data.

    targets : numpy.ndarray, shape=(N,)
        The correct value for each of the N pieces of data.

    Returns
    -------
    mygrad.Tensor, shape=()
        The average L¹ loss.

    Extended Description
    --------------------
    The L1 loss is given by
    
    .. math::
        \frac{1}{N}\sum\limits_{1}^{N}|x_i - y_i|

    where :math:`N` is the number of elements in `x` and `y`.
    '''
    return mean(abs(outputs - targets))
예제 #3
0
def emotion_test(pic_test):
    allModel = [angry_model,fear_model,happy_model,sad_model,surprise_model,Neutral_model]
    allModelName = ['angry','fearful','happy','sad','surprised','neutral']
    myDic = defaultdict(list)
    ans = np.zeros((6))
    for index,i in enumerate(allModel):
        try:
            encode_test = face_recognition.face_encodings(pic_test)[0].reshape(1,1,128)
            myDic[allModelName[index]] = mg.abs(i(encode_test)[0,0])
        except:
            print("oof")
    ansTrue = []
    myDic['fearful'] += 0.2
    if myDic['angry'] < 1:
        ansTrue += ["angry"]
    if myDic['sad'] < 0.9:
        ansTrue += ["sad"]
        del myDic['happy']
    if myDic['fearful'] < 0.7:
        ansTrue += ["fearful"]
        try:
            del myDic['happy']
        except:
            print("oof")
    if myDic['surprised'] < 0.5:
        ansTrue += ['surprised']
    if min(myDic, key=myDic.get) == 'neutral' and myDic['neutral'] > 0.5:
        del myDic['neutral']
    ansTrue += [min(myDic, key=myDic.get)]
    ansTrue = list(set(ansTrue))
    return ' and '.join(ansTrue)

#Use this to take picture and store: pic_test = take_picture()
#Then do this to get the emotion in text: emotion_test(pic_test)
예제 #4
0
def emotion_test(pic_test):
    '''
    Uses models to detect the emotion of a person
    Parameters:
        pic_test: A picture of a face.
    Return:
    String of one or two emotions
    '''
    allModel = [
        angry_model, fear_model, happy_model, sad_model, surprise_model,
        Neutral_model
    ]
    allModelName = ['angry', 'fearful', 'happy', 'sad', 'surprised', 'neutral']
    model_dic = defaultdict(list)
    for index, i in enumerate(
            allModel
    ):  #Goes through all emotion models and passes the descriptors of the input image into them, then stores them in the dictionary
        try:
            encode_test = face_recognition.face_encodings(pic_test)[0].reshape(
                1, 1, 128)  #Calculates the descriptors of the image using dlib
            model_dic[allModelName[index]] = mg.abs(
                i(encode_test)[0, 0]
            )  #Uses descriptors to calculate the emotional value for each emotion model
        except:
            print("Did not detect face")
    ansTrue = []
    model_dic[
        'fearful'] += 0.2  #Makes fearful less common because it is detected too often
    if model_dic['angry'] < 1:  #Sets the anger threshold
        ansTrue += ["angry"]
    if model_dic[
            'sad'] < 0.9:  #Sets the sadness threshold and makes sure that if sad is picked then happy will not be
        ansTrue += ["sad"]
        del model_dic['happy']
    if model_dic[
            'fearful'] < 0.7:  #Sets the fear threshold and makes sure happy is not deleted twice
        ansTrue += ["fearful"]
        try:
            del model_dic['happy']
        except:
            print("No happy exists")
    if model_dic['surprised'] < 0.5:  #Sets surprise threshold
        ansTrue += ['surprised']
    if min(model_dic, key=model_dic.get) == 'neutral' and model_dic[
            'neutral'] > 0.5:  #If the chosen emotion is neutral with a value above the threshold then delete neutral
        del model_dic['neutral']
    ansTrue += [min(model_dic,
                    key=model_dic.get)]  #Add the most probable emotion
    ansTrue = list(set(ansTrue))  #Delete repeats
    return ' and '.join(ansTrue)  #Join emotions with and
예제 #5
0
def soft_sign(x):
    ''' Returns the soft sign function x/(1 + |x|).

    Parameters
    ----------
    x : mygrad.Tensor
        Input data.

    Returns
    -------
    mygrad.Tensor
        The soft sign function applied to `x` elementwise.
    '''
    return x / (1 + abs(x))
예제 #6
0
def l1_loss(preds,ans):
    l_val = mg.sum(mg.abs(preds-ans))
   # print(l_val)
    row,col = preds.shape
    return l_val/row
예제 #7
0
        truth = y_train[batch_indices]
        # print("pred: ", prediction)
        # print("truth: ", truth)
        loss = softmax_crossentropy(prediction, truth)

        loss.backward()

        optimizer.step()
        loss.null_gradients()

        plotter.set_train_batch({"loss": loss.item()}, batch_size=batch_size)
    plotter.set_train_epoch()

diff = 0
sum = 0

# Tests the model
for i in range(len(y_train)):
    old = x_train[i]
    w = np.ascontiguousarray(np.swapaxes(np.array(old).reshape(1, 78, 50), 0, 1))
    pred = rnn(w)
    true = y_train[i]
    diff += mg.abs(pred - true)
    sum += true

i = 1
old = x_train[i]

w = np.ascontiguousarray(np.swapaxes(np.array(old).reshape(1, 78, 50), 0, 1))
pred = rnn(w)
true = y_train[i]