def testImage(image, faceCoordinates, emotion_classifier):
    """Classifies the emotions in the input image according to the face coordinates
  detected by a face detection engine.

  First calls preprocess and then uses the given emotion_classifier to detect
  emotions in the processed image.

  """
    testImg = preprocess(image, faceCoordinates, return_vector=True)

    # IMPORTANT: scale the image for it to be testable
    test = common.scale(testImg.reshape(1, len(testImg)))
    probs, emotion = emotion_classifier.classify(test)

    # classify returns a vector, as it is made to classify multiple test instances
    # at the same time.
    # We check if emotion is iterable before getting the first element, in case
    # someone uses an api in which a vector is not returned.
    if hasattr(emotion, '__iter__'):
        emotion = emotion[0]

    print "probs"
    print probs
    print "label"
    print emotion

    return emotion
def testImage(image, faceCoordinates, emotion_classifier):
  """Classifies the emotions in the input image according to the face coordinates
  detected by a face detection engine.

  First calls preprocess and then uses the given emotion_classifier to detect
  emotions in the processed image.

  """
  testImg = preprocess(image, faceCoordinates, return_vector=True)

  # IMPORTANT: scale the image for it to be testable
  test = common.scale(testImg.reshape(1, len(testImg)))
  probs, emotion = emotion_classifier.classify(test)

  # classify returns a vector, as it is made to classify multiple test instances
  # at the same time.
  # We check if emotion is iterable before getting the first element, in case
  # someone uses an api in which a vector is not returned.
  if hasattr(emotion, '__iter__'):
    emotion = emotion[0]

  print "probs"
  print probs
  print "label"
  print emotion

  return emotion
Exemple #3
0
def trainNetWithAllData():
    unsupervisedData, data, labels = createTrainingSet()

    print "data.shape"
    print data.shape
    print "labels.shape"
    print labels.shape

    data = common.scale(data)
    unsupervisedData = None

    activationFunction = activationfunctions.Rectified()
    rbmActivationFunctionVisible = activationfunctions.Identity()
    rbmActivationFunctionHidden = activationfunctions.RectifiedNoisy()

    unsupervisedLearningRate = 0.0001
    supervisedLearningRate = 0.001
    momentumMax = 0.99

    print "This is input data shape", data.shape
    print labels.shape

    net = db.DBN(4, [1200, 1500, 1000, len(args.emotions)],
                 binary=False,
                 activationFunction=activationFunction,
                 rbmActivationFunctionVisible=rbmActivationFunctionVisible,
                 rbmActivationFunctionHidden=rbmActivationFunctionHidden,
                 unsupervisedLearningRate=unsupervisedLearningRate,
                 supervisedLearningRate=supervisedLearningRate,
                 momentumMax=momentumMax,
                 nesterovMomentum=True,
                 rbmNesterovMomentum=True,
                 rmsprop=True,
                 miniBatchSize=20,
                 hiddenDropout=0.5,
                 visibleDropout=0.8,
                 momentumFactorForLearningRateRBM=False,
                 firstRBMheuristic=False,
                 rbmVisibleDropout=1.0,
                 rbmHiddenDropout=1.0,
                 preTrainEpochs=10,
                 sparsityConstraintRbm=False,
                 sparsityRegularizationRbm=0.001,
                 sparsityTragetRbm=0.01)

    net.train(data,
              labels,
              maxEpochs=200,
              validation=False,
              unsupervisedData=unsupervisedData)

    with open(args.net_file, "wb") as f:
        pickle.dump(net, f)
    return net
Exemple #4
0
def trainNetWithAllData():
    unsupervisedData, data, labels = createTrainingSet()

    print "data.shape"
    print data.shape
    print "labels.shape"
    print labels.shape

    data = common.scale(data)
    unsupervisedData = None

    activationFunction = activationfunctions.Rectified()
    rbmActivationFunctionVisible = activationfunctions.Identity()
    rbmActivationFunctionHidden = activationfunctions.RectifiedNoisy()

    unsupervisedLearningRate = 0.0001
    supervisedLearningRate = 0.001
    momentumMax = 0.99

    # net = db.DBN(4, [1200, 1500, 1000, len(args.emotions)],
    #            binary=False,
    #            activationFunction=activationFunction,
    #            rbmActivationFunctionVisible=rbmActivationFunctionVisible,
    #            rbmActivationFunctionHidden=rbmActivationFunctionHidden,
    #            unsupervisedLearningRate=unsupervisedLearningRate,
    #            supervisedLearningRate=supervisedLearningRate,
    #            momentumMax=momentumMax,
    #            nesterovMomentum=True,
    #            rbmNesterovMomentum=True,
    #            rmsprop=True,
    #            miniBatchSize=20,
    #            hiddenDropout=0.5,
    #            visibleDropout=0.8,
    #            momentumFactorForLearningRateRBM=False,
    #            firstRBMheuristic=False,
    #            rbmVisibleDropout=1.0,
    #            rbmHiddenDropout=1.0,
    #            preTrainEpochs=10,
    #            sparsityConstraintRbm=False,
    #            sparsityRegularizationRbm=0.001,
    #            sparsityTragetRbm=0.01)
    #
    # net.train(data, labels, maxEpochs=200,
    #           validation=False,
    #           unsupervisedData=unsupervisedData)

    net = cnn.CNN(width=30, height=40, classes=len(args.emotions))

    net.train(data, labels)

    with open(args.net_file, "wb") as f:
        pickle.dump(net, f)
    return net
def trainNetWithAllData():
  unsupervisedData, data, labels = createTrainingSet()

  print "data.shape"
  print data.shape
  print "labels.shape"
  print labels.shape

  data = common.scale(data)
  unsupervisedData = None

  activationFunction = activationfunctions.Rectified()
  rbmActivationFunctionVisible = activationfunctions.Identity()
  rbmActivationFunctionHidden = activationfunctions.RectifiedNoisy()

  unsupervisedLearningRate = 0.0001
  supervisedLearningRate = 0.001
  momentumMax = 0.99

  net = db.DBN(4, [1200, 1500, 1000, len(args.emotions)],
             binary=False,
             activationFunction=activationFunction,
             rbmActivationFunctionVisible=rbmActivationFunctionVisible,
             rbmActivationFunctionHidden=rbmActivationFunctionHidden,
             unsupervisedLearningRate=unsupervisedLearningRate,
             supervisedLearningRate=supervisedLearningRate,
             momentumMax=momentumMax,
             nesterovMomentum=True,
             rbmNesterovMomentum=True,
             rmsprop=True,
             miniBatchSize=20,
             hiddenDropout=0.5,
             visibleDropout=0.8,
             momentumFactorForLearningRateRBM=False,
             firstRBMheuristic=False,
             rbmVisibleDropout=1.0,
             rbmHiddenDropout=1.0,
             preTrainEpochs=10,
             sparsityConstraintRbm=False,
             sparsityRegularizationRbm=0.001,
             sparsityTragetRbm=0.01)

  net.train(data, labels, maxEpochs=200,
            validation=False,
            unsupervisedData=unsupervisedData)

  with open(args.net_file, "wb") as f:
    pickle.dump(net, f)
  return net
def getHyperParamsAndBestNet():
  unsupervisedData, data, labels = createTrainingSet()

  print np.unique(np.argmax(labels, axis=1))

  print "data.shape"
  print data.shape
  print "labels.shape"
  print labels.shape

  print data
  data = common.scale(data)
  unsupervisedData = None

  activationFunction = activationfunctions.Rectified()
  rbmActivationFunctionVisible = activationfunctions.Identity()
  rbmActivationFunctionHidden = activationfunctions.RectifiedNoisy()

  tried_params = []
  percentages = []
  best_index = 0
  index = 0
  best_correct = 0

  # Random data for training and testing
  kf = cross_validation.KFold(n=len(data), n_folds=10)
  for train, test in kf:
    unsupervisedLearningRate = random.uniform(0.0001, 0.2)
    supervisedLearningRate = random.uniform(0.0001, 0.2)
    momentumMax = random.uniform(0.7, 1)

    tried_params += [{'unsupervisedLearningRate': unsupervisedLearningRate,
                      'supervisedLearningRate': supervisedLearningRate,
                      'momentumMax': momentumMax}]

    trainData = data[train]
    trainLabels = labels[train]

    net = db.DBN(4, [1200, 1500, 1000, len(args.emotions)],
               binary=False,
               activationFunction=activationFunction,
               rbmActivationFunctionVisible=rbmActivationFunctionVisible,
               rbmActivationFunctionHidden=rbmActivationFunctionHidden,
               unsupervisedLearningRate=unsupervisedLearningRate,
               supervisedLearningRate=supervisedLearningRate,
               momentumMax=momentumMax,
               nesterovMomentum=True,
               rbmNesterovMomentum=True,
               rmsprop=True,
               miniBatchSize=20,
               hiddenDropout=0.5,
               visibleDropout=0.8,
               momentumFactorForLearningRateRBM=False,
               firstRBMheuristic=False,
               rbmVisibleDropout=1.0,
               rbmHiddenDropout=1.0,
               preTrainEpochs=10,
               sparsityConstraintRbm=False,
               sparsityRegularizationRbm=0.001,
               sparsityTragetRbm=0.01)

    net.train(trainData, trainLabels, maxEpochs=200,
              validation=False,
              unsupervisedData=unsupervisedData)

    probs, predicted = net.classify(data[test])

    actualLabels = labels[test]
    correct = 0

    for i in xrange(len(test)):
      actual = actualLabels[i]
      print probs[i]
      if predicted[i] == np.argmax(actual):
        correct += 1

    percentage_correct = correct * 1.0 / len(test)
    print "percentage correct"
    print percentage_correct

    if percentage_correct > best_correct:
      best_index = index
      best_correct = percentage_correct
      with open(args.net_file, "wb") as f:
        pickle.dump(net, f)

    percentages += [percentage_correct]
    index += 1

  print 'best params'
  print tried_params[best_index]
  print 'precision'
  print best_correct
def trainAndTestNet():
  unsupervisedData, data, labels = createTrainingSet()

  print np.unique(np.argmax(labels, axis=1))

  print "data.shape"
  print data.shape
  print "labels.shape"
  print labels.shape

  # Random data for training and testing
  kf = cross_validation.KFold(n=len(data), k=5)
  for train, test in kf:
    break

  print data
  data = common.scale(data)
  unsupervisedData = None

  activationFunction = activationfunctions.Rectified()
  rbmActivationFunctionVisible = activationfunctions.Identity()
  rbmActivationFunctionHidden = activationfunctions.RectifiedNoisy()

  unsupervisedLearningRate = 0.0001
  supervisedLearningRate = 0.001
  momentumMax = 0.99

  trainData = data[train]
  trainLabels = labels[train]

  net = db.DBN(4, [1200, 1500, 1000, len(args.emotions)],
             binary=False,
             activationFunction=activationFunction,
             rbmActivationFunctionVisible=rbmActivationFunctionVisible,
             rbmActivationFunctionHidden=rbmActivationFunctionHidden,
             unsupervisedLearningRate=unsupervisedLearningRate,
             supervisedLearningRate=supervisedLearningRate,
             momentumMax=momentumMax,
             nesterovMomentum=True,
             rbmNesterovMomentum=True,
             rmsprop=True,
             miniBatchSize=20,
             hiddenDropout=0.5,
             visibleDropout=0.8,
             momentumFactorForLearningRateRBM=False,
             firstRBMheuristic=False,
             rbmVisibleDropout=1.0,
             rbmHiddenDropout=1.0,
             preTrainEpochs=10,
             sparsityConstraintRbm=False,
             sparsityRegularizationRbm=0.001,
             sparsityTragetRbm=0.01)

  net.train(trainData, trainLabels, maxEpochs=200,
            validation=False,
            unsupervisedData=unsupervisedData)

  probs, predicted = net.classify(data[test])

  actualLabels = labels[test]
  correct = 0
  errorCases = []

  for i in xrange(len(test)):
    actual = actualLabels[i]
    print probs[i]
    if predicted[i] == np.argmax(actual):
      correct += 1
    else:
      errorCases.append(i)

  print "correct"
  print correct

  print "percentage correct"
  print correct  * 1.0 / len(test)

  confMatrix = confusion_matrix(np.argmax(actualLabels, axis=1), predicted)
  print "confusion matrix"
  print confMatrix

  with open(args.net_file, "wb") as f:
    pickle.dump(net, f)
  return net
Exemple #8
0
def getHyperParamsAndBestNet():
    unsupervisedData, data, labels = createTrainingSet()

    print np.unique(np.argmax(labels, axis=1))

    print "data.shape"
    print data.shape
    print "labels.shape"
    print labels.shape

    print data
    data = common.scale(data)
    unsupervisedData = None

    activationFunction = activationfunctions.Rectified()
    rbmActivationFunctionVisible = activationfunctions.Identity()
    rbmActivationFunctionHidden = activationfunctions.RectifiedNoisy()

    tried_params = []
    percentages = []
    best_index = 0
    index = 0
    best_correct = 0

    # Random data for training and testing
    kf = cross_validation.KFold(n=len(data), n_folds=10)
    for train, test in kf:
        unsupervisedLearningRate = random.uniform(0.0001, 0.2)
        supervisedLearningRate = random.uniform(0.0001, 0.2)
        momentumMax = random.uniform(0.7, 1)

        tried_params += [{
            'unsupervisedLearningRate': unsupervisedLearningRate,
            'supervisedLearningRate': supervisedLearningRate,
            'momentumMax': momentumMax
        }]

        trainData = data[train]
        trainLabels = labels[train]

        # net = db.DBN(4, [1200, 1500, 1000, len(args.emotions)],
        #            binary=False,
        #            activationFunction=activationFunction,
        #            rbmActivationFunctionVisible=rbmActivationFunctionVisible,
        #            rbmActivationFunctionHidden=rbmActivationFunctionHidden,
        #            unsupervisedLearningRate=unsupervisedLearningRate,
        #            supervisedLearningRate=supervisedLearningRate,
        #            momentumMax=momentumMax,
        #            nesterovMomentum=True,
        #            rbmNesterovMomentum=True,
        #            rmsprop=True,
        #            miniBatchSize=20,
        #            hiddenDropout=0.5,
        #            visibleDropout=0.8,
        #            momentumFactorForLearningRateRBM=False,
        #            firstRBMheuristic=False,
        #            rbmVisibleDropout=1.0,
        #            rbmHiddenDropout=1.0,
        #            preTrainEpochs=10,
        #            sparsityConstraintRbm=False,
        #            sparsityRegularizationRbm=0.001,
        #            sparsityTragetRbm=0.01)
        #
        # net.train(trainData, trainLabels, maxEpochs=200,
        #           validation=False,
        #           unsupervisedData=unsupervisedData)
        #
        # probs, predicted = net.classify(data[test])

        net = cnn.CNN(30, 40, len(args.emotions))

        net.train(trainData, trainLabels)

        probs, predicted = net.classify(data[test])

        actualLabels = labels[test]
        correct = 0

        for i in xrange(len(test)):
            actual = actualLabels[i]
            print probs[i]
            if predicted[i] == np.argmax(actual):
                correct += 1

        percentage_correct = correct * 1.0 / len(test)
        print "percentage correct"
        print percentage_correct

        if percentage_correct > best_correct:
            best_index = index
            best_correct = percentage_correct
            with open(args.net_file, "wb") as f:
                pickle.dump(net, f)

        percentages += [percentage_correct]
        index += 1

    print 'best params'
    print tried_params[best_index]
    print 'precision'
    print best_correct
Exemple #9
0
def trainAndTestNet():
    unsupervisedData, data, labels = createTrainingSet()

    print np.unique(np.argmax(labels, axis=1))

    print "data.shape"
    print data.shape
    print "labels.shape"
    print labels.shape

    # Random data for training and testing
    kf = cross_validation.KFold(n=len(data), k=5)
    for train, test in kf:
        break

    print data
    data = common.scale(data)
    unsupervisedData = None

    activationFunction = activationfunctions.Rectified()
    rbmActivationFunctionVisible = activationfunctions.Identity()
    rbmActivationFunctionHidden = activationfunctions.RectifiedNoisy()

    unsupervisedLearningRate = 0.0001
    supervisedLearningRate = 0.001
    momentumMax = 0.99

    trainData = data[train]
    trainLabels = labels[train]

    # net = db.DBN(4, [1200, 1500, 1000, len(args.emotions)],
    #            binary=False,
    #            activationFunction=activationFunction,
    #            rbmActivationFunctionVisible=rbmActivationFunctionVisible,
    #            rbmActivationFunctionHidden=rbmActivationFunctionHidden,
    #            unsupervisedLearningRate=unsupervisedLearningRate,
    #            supervisedLearningRate=supervisedLearningRate,
    #            momentumMax=momentumMax,
    #            nesterovMomentum=True,
    #            rbmNesterovMomentum=True,
    #            rmsprop=True,
    #            miniBatchSize=20,
    #            hiddenDropout=0.5,
    #            visibleDropout=0.8,
    #            momentumFactorForLearningRateRBM=False,
    #            firstRBMheuristic=False,
    #            rbmVisibleDropout=1.0,
    #            rbmHiddenDropout=1.0,
    #            preTrainEpochs=10,
    #            sparsityConstraintRbm=False,
    #            sparsityRegularizationRbm=0.001,
    #            sparsityTragetRbm=0.01)
    #
    # net.train(trainData, trainLabels, maxEpochs=200,
    #           validation=False,
    #           unsupervisedData=unsupervisedData)
    #
    # probs, predicted = net.classify(data[test])

    net = cnn.CNN(30, 40, len(args.emotions))

    net.train(trainData, trainLabels)

    probs, predicted = net.classify(data[test])

    actualLabels = labels[test]
    correct = 0
    errorCases = []

    for i in xrange(len(test)):
        actual = actualLabels[i]
        print probs[i]
        if predicted[i] == np.argmax(actual):
            correct += 1
        else:
            errorCases.append(i)

    print "correct"
    print correct

    print "percentage correct"
    print correct * 1.0 / len(test)

    confMatrix = confusion_matrix(np.argmax(actualLabels, axis=1), predicted)
    print "confusion matrix"
    print confMatrix

    with open(args.net_file, "wb") as f:
        pickle.dump(net, f)
    return net