Esempio n. 1
0
    def __init__(self, classifier="sf_model"):
        self.classifier = classifier
        self.model_a = ""
        self.model_b = ""

        info("Initializing classifier with " + self.classifier)
        if self.classifier == "single_model":
            self.model_a = mobilenet.generate_mobilenet(input_shape, 7)
            self.model_a.load_weights(
                'Modules/Emotion/models/mobilenet-monster-noweight.h5')

        if self.classifier == "ff_model":
            self.model_a = mobilenet.generate_mobilenet(input_shape, 4)
            self.model_a.load_weights(
                'Modules/Emotion/models/mobilenet-monster-first.h5')
            self.model_b = mobilenet.generate_mobilenet(input_shape, 4)
            self.model_b.load_weights(
                'Modules/Emotion/models/mobilenet-monster-second.h5')

        if self.classifier == "sf_model":
            self.model_a = mobilenet.generate_mobilenet(input_shape, 7)
            self.model_a.load_weights(
                'Modules/Emotion/models/mobilenet-monster-noweight.h5')
            self.model_b = mobilenet.generate_mobilenet(input_shape, 4)
            self.model_b.load_weights(
                'Modules/Emotion/models/mobilenet-monster-second.h5')

        info("Weights loaded. Model initialized!")
Esempio n. 2
0
 def inference(self, image):
     info("Making inference on image...")
     if self.classifier == "single_model":
         result = self.model_a.predict(np.expand_dims(image, axis=0),
                                       batch_size=1,
                                       verbose=0)
         return labels_dict[np.argmax(result[0])]
     elif self.classifier == "ff_model":
         result = self.model_a.predict(np.expand_dims(image, axis=0),
                                       batch_size=1,
                                       verbose=0)
         pred = np.argmax(result[0])
         if pred == 1:
             r = self.model_b.predict(np.expand_dims(image, axis=0),
                                      batch_size=1,
                                      verbose=0)
             pred = np.argmax(r[0]) + 3
         elif pred != 0:
             pred -= 1
         return labels_dict[pred]
     elif self.classifier == "sf_model":
         result = self.model_a.predict(np.expand_dims(image, axis=0),
                                       batch_size=1,
                                       verbose=0)
         pred = np.argmax(result[0])
         if pred in [3, 4, 5, 6]:
             r = self.model_b.predict(np.expand_dims(image, axis=0),
                                      batch_size=1,
                                      verbose=0)
             pred = np.argmax(r[0]) + 3
         return labels_dict[pred]
Esempio n. 3
0
    def save(self):
        core.info("Writing activity attributes")

        with open(os.path.join(self.path, 'activity.data'), 'wb') as f:
            pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)

        core.info("Write successfull")
Esempio n. 4
0
def load_Activity(name):
    core.info("Loading activity attributes")

    with open(name, 'rb+') as f:
        return pickle.load(f)

    core.info("Loaded successfull")
Esempio n. 5
0
def run(behaviorName):
    
    taskId = -1    
    
    if vars.naoConeted:
        vars.posture.goToPosture("Crouch", 1.0)
        taskId = vars.behavior.post.runBehavior(behaviorName)
        vars.posture.goToPosture("Crouch", 1.0)
    else:
        vars.info("NAO not Connected. Cant run motions")
        
    return taskId 
Esempio n. 6
0
def main():

    key = ""
    fa = 0

    w = Weights(0.5, 0.2, 0.3)
    #w = Weights(0.3, 0.1, 0.2 )

    op = OperationalParameters(max_deviation=5.0,
                               max_emotion_count=3,
                               min_number_word=1,
                               max_time2ans=10,
                               min_suc_rate=1)

    while (key != "e"):

        #print "test", normalize(1.75,5)

        rv = core.ReadValues(deviations=random.randint(0, 5),
                             emotionCount=random.randint(0, 3),
                             numberWord=random.randint(0, 1),
                             time2ans=random.randint(0, 10),
                             sucRate=random.randint(0, 1))

        #print "on main", op.max_deviation
        #print "weights", w.alpha
        #print "rv.deviations", rv.deviations

        adpt = AdaptiveSystem(robot=1, path=2, op=op, w=w, rv=rv)

        fc = adpt.adp_function()
        core.info("Fadp: " + str(fc))

        act = adpt.activation_function(fc)
        core.info("Activation: " + str(act))

        key = raw_input("Key: ")

        fa = fc
Esempio n. 7
0
def create_Activity(act, vs=False):
    '''    
	Create a new activity and set all directories
	'''

    if os.path.exists(act.path):
        core.war("Path activity exists")

    else:
        core.info("Starting new activity folder in " + act.path)
        os.makedirs(act.path)
        os.makedirs(os.path.join(act.path, "Vision"))
        os.makedirs(os.path.join(act.path, "Dialog"))
        os.makedirs(os.path.join(act.path, "Users"))
        os.makedirs(os.path.join(act.path, "Logs"))

    core.info("Writing activity attributes")

    if act.vision:
        core.info("Starting Vision Componets for activity: " + act.name)
        act.classes = vs.collect_database(act, camId=1)
        act.ncl = len(act.classes)
        #act.save()
        dp = data_process.Data_process(act.path)
        #dp.buildTrainValidationData()
        #dp.data_aug()
        #dp.generate_model()
        dp.save_best()
        dp.print_classes()

    else:
        core.war("Activity <<" + act.name + ">> has no Vision system required")

    with open(os.path.join(act.path, 'activity.data'), 'wb') as f:
        pickle.dump(act, f, pickle.HIGHEST_PROTOCOL)

    core.info("Writining successfull")
Esempio n. 8
0
    def adp_function(self, fadp_previous_value=0):

        #calculating the alpha vector

        alpha = normalize(self.rv.deviations, self.op.max_deviation)
        core.info("Alpha :" + str(alpha))

        #calculating the beta vector
        beta = (normalize(self.rv.emotionCount, self.op.max_emotion_count) +
                normalize(self.rv.numberWord, self.op.min_number_word)) / 2
        core.info("Beta :" + str(beta))

        #calculating the gama vector
        gama = (normalize(self.rv.time2ans, self.op.max_time2ans) +
                normalize(self.rv.sucRate, self.op.min_suc_rate)) / 2
        core.info("Gama :" + str(gama))

        fadp = self.w.alpha * alpha + self.w.beta * beta + self.w.gama * gama
        core.info("fadp = w.alpha*alpha + w.beta*beta + w.gama*gama")
        core.info(
            str(fadp) + " = " + str(self.w.alpha) + "*" + str(alpha) + " + " +
            str(self.w.beta) + "*" + str(beta) + " + " + str(self.w.gama) +
            "*" + str(gama))

        # fadp(t) = fadp(t-1) + fadp(t)
        fadp = fadp + fadp_previous_value
        core.info("Final: " + str(fadp))

        return fadp
Esempio n. 9
0
    def increase_database(self,
                          act,
                          path_name="./Activities",
                          max_imgs=300,
                          camId=0):

        subId = self.subscribe(camId)

        result = self.robot.camera.getImageRemote(subId)
        #create image
        width = result[0]
        height = result[1]
        image = np.zeros((height, width, 3), np.uint8)

        act_vision_path = os.path.join(path_name, act.name, "Vision")
        coll_path = os.path.join(act_vision_path, 'collected')

        total_files = len(os.listdir(coll_path))

        imgs_per_class = total_files / len(act.classes)

        print "imgs per class : ", imgs_per_class

        #return 0

        for current_class in range(0, act.ncl):

            #diag.say("siga o programa no terminal")

            #cv2.destroyAllWindows()

            core.info("\n\nOperating in class: >> " +
                      act.classes[current_class] + " <<")

            core.info("Digite ESC para inicializar")

            key = 0
            while key != core.ESC:

                # get image
                result = self.robot.camera.getImageRemote(subId)

                if result == None:
                    print 'cannot capture.'
                elif result[6] == None:
                    print 'no image data string.'
                else:

                    # translate value to mat
                    values = map(ord, list(result[6]))
                    i = 0
                    for y in range(0, height):
                        for x in range(0, width):
                            image.itemset((y, x, 0), values[i + 0])
                            image.itemset((y, x, 1), values[i + 1])
                            image.itemset((y, x, 2), values[i + 2])
                            i += 3

                    # show image
                    cv2.imshow("Capturing", image)
                    key = cv2.waitKey(1)

                    if key != -1:
                        print key

                    if key == core.ENTER:
                        self.unsub(subId)
                        exit()

            core.info("Starting capture")

            #cv2.waitKey(1)
            #cv2.destroyAllWindows()
            #cv2.waitKey(1)

            #'''
            counter = imgs_per_class + 1
            while counter < max_imgs + imgs_per_class:

                # get image
                result = self.robot.camera.getImageRemote(subId)

                if result == None:
                    print 'cannot capture.'
                elif result[6] == None:
                    print 'no image data string.'
                else:

                    # translate value to mat
                    values = map(ord, list(result[6]))
                    i = 0
                    for y in range(0, height):
                        for x in range(0, width):
                            image.itemset((y, x, 0), values[i + 0])
                            image.itemset((y, x, 1), values[i + 1])
                            image.itemset((y, x, 2), values[i + 2])
                            i += 3

                    # show image
                    cv2.imshow("Capturing", image)
                    key = cv2.waitKey(1)

                cv2.putText(image, 'Image number ' + str(counter),
                            bottomLeftCornerOfText, font, fontScale, fontColor,
                            lineType)

                counter += 1
                #im=vs.see()
                cv2.imshow("Capturing", image)

                if cv2.waitKey(1) == core.ESC:
                    break

                name = os.path.join(
                    act_vision_path, 'collected',
                    str(current_class)) + "_" + str(
                        counter) + ".jpeg"  # + str(time.ctime()) + ".jpg"
                cv2.imwrite(name, image)

                core.info("Image saved." + name)
                sys.stdout.write("\033[F")  # Cursor up one line
                #core.info("Next	!")

        core.info("Captura concluida com sucesso!")

        self.unsub(subId)

        return True
Esempio n. 10
0
    def collect_database(self,
                         activity,
                         path_name="./Activities",
                         max_imgs=300,
                         camId=0):

        # ---- Verifying Collected path
        if os.path.exists(coll_path):
            choice = raw_input(
                "Collected datavase already exist. Do you want to update it? (y/n):"
            )

            if choice == "y":
                increase_database(activity, path_name, max_imgs, camId)
                return True
            else:
                exit()

            #return False
        else:
            print "No database found. Starting a new one in ", act_path
            os.makedirs(coll_path)

        classes = []

        subId = self.subscribe(camId)

        result = self.robot.camera.getImageRemote(subId)
        #create image
        width = result[0]
        height = result[1]
        image = np.zeros((height, width, 3), np.uint8)

        act_path = os.path.join(path_name, activity.name, "Vision")
        coll_path = os.path.join(act.path, 'collected')

        for sh in range(0, 100):

            #diag.say("siga o programa no terminal")

            #cv2.destroyAllWindows()
            key = 0
            cl = raw_input(
                "Digite o nome o nome da nova classe ou 'fim' para terminar: ")

            if cl == "fim":
                break

            else:

                classes.append(cl)
                #diag.say( "Capturando imagens da classe: " + cl + ". Digite ESC para começar ")

                core.info("Digite ESC para inicializar")

                key = 0
                while key != core.ESC:

                    # get image
                    result = self.robot.camera.getImageRemote(subId)

                    if result == None:
                        print 'cannot capture.'
                    elif result[6] == None:
                        print 'no image data string.'
                    else:

                        # translate value to mat
                        values = map(ord, list(result[6]))
                        i = 0
                        for y in range(0, height):
                            for x in range(0, width):
                                image.itemset((y, x, 0), values[i + 0])
                                image.itemset((y, x, 1), values[i + 1])
                                image.itemset((y, x, 2), values[i + 2])
                                i += 3

                        # show image
                        cv2.imshow("Capturing", image)
                        key = cv2.waitKey(1)
                        print key

                core.info("inicializando Captura")

                #cv2.waitKey(1)
                #cv2.destroyAllWindows()
                #cv2.waitKey(1)

                #'''
                counter = 0
                while counter < max_imgs:

                    # get image
                    result = self.robot.camera.getImageRemote(subId)

                    if result == None:
                        print 'cannot capture.'
                    elif result[6] == None:
                        print 'no image data string.'
                    else:

                        # translate value to mat
                        values = map(ord, list(result[6]))
                        i = 0
                        for y in range(0, height):
                            for x in range(0, width):
                                image.itemset((y, x, 0), values[i + 0])
                                image.itemset((y, x, 1), values[i + 1])
                                image.itemset((y, x, 2), values[i + 2])
                                i += 3

                        # show image
                        cv2.imshow("Capturing", image)
                        key = cv2.waitKey(1)

                    cv2.putText(image, 'Image number ' + str(counter),
                                bottomLeftCornerOfText, font, fontScale,
                                fontColor, lineType)

                    counter += 1
                    #im=vs.see()
                    cv2.imshow("Capturing", image)

                    if cv2.waitKey(1) == core.ESC:
                        break

                    name = os.path.join(
                        act_path, 'collected', str(sh)) + "_" + str(
                            counter) + ".jpeg"  # + str(time.ctime()) + ".jpg"
                    cv2.imwrite(name, image)

                    core.info("Image saved." + name)
                #'''
                #print "Lista de classes",  classes

        #core.shapes=classes

        #print core.shapes

        with open(os.path.join(path_name, activity_name, 'file_classes.csv'),
                  'wb') as csvfile:
            spamwriter = csv.writer(csvfile,
                                    delimiter=' ',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)
            spamwriter.writerow(classes)

        core.info("Captura concluida com sucesso!")

        self.unsub(subId)

        return classes
Esempio n. 11
0
    def _start_classification(self, camId, minNeighbors=5):
        # "import" global variables
        global run_state, camera

        classifier = emotion.Classifier()

        # start some variables
        # number of deviations, time on disattention
        n_deviations = time_disattention = 0
        # static measuring time, dynamic measuring time, time on atention, time for emotion classifier
        static_time = dynamic_time = time_attention = time_emotion = time.time(
        )

        face_cascade = cv2.CascadeClassifier(
            'Modules/haarcascade_frontalface_alt.xml')

        arq = open(
            'AttentionLogs/{:6.0f}all_statistics.dat'.format(time.time()), 'w')

        info("All set. Obtaining images!")
        c = open('emotion_imgs/classifications.txt', 'a+')
        face = None

        while self.run_state != 'stop':

            while self.run_state == 'running':

                image = self.takePicture()

                #create image
                #width = result[0]
                #height = result[1]
                #image = np.zeros((height, width, 3), np.uint8)

                # get image
                #result = camera.getImageRemote(nameId)

                # convert image to grayscale
                image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

                # detect faces using Haar Cascade, last parameter: min_neighbors
                faces = face_cascade.detectMultiScale(image_gray, 1.3,
                                                      minNeighbors)

                # if no faces are detected, updates deviation time
                if len(faces) == 0:
                    dynamic_time = time.time()

                # else, shows on screen the detected face, store the face on
                # a variable to be emotion-classified, and counts deviation time
                else:
                    # runs through all faces found (expected only one, but runs on a loop just to be sure)
                    for (x, y, w, h) in faces:
                        # draws rectangle around the face
                        cv2.rectangle(image, (x, y), (x + w, y + h),
                                      (255, 0, 0), 2)

                        # store the detected face with a few extra pixels, because
                        # the cascade classifier cuts a litte too much
                        face = image_gray[y - 10:y + h + 10, x - 10:x + w + 10]

                        # if a time difference of 0.3 seconds is met, classify the emotion on a face
                        time_diff = dynamic_time - time_emotion
                        try:
                            if (time_diff >= 0.3 and face is not None):
                                info("Face detected. Classifying emotion.")
                                # reshape image to meet the input dimensions
                                face_to_classify = np.stack([face, face, face],
                                                            axis=2)
                                face_to_classify = cv2.resize(
                                    face_to_classify,
                                    input_shape[:2],
                                    interpolation=cv2.INTER_AREA) * 1. / 255
                                # get inference from classifier
                                classified_emotion = classifier.inference(
                                    face_to_classify)
                                # writes emotion on the image, to be shown on screen
                                cv2.putText(image, classified_emotion, (0, 30),
                                            cv2.FONT_HERSHEY_SIMPLEX, 1,
                                            (255, 0, 0), 2, cv2.LINE_AA)
                                # store image on a folder, for future analysis
                                cv2.imwrite(
                                    "emotion_imgs/{}.png".format(dynamic_time),
                                    face)
                                c.write("{} {}\n".format(
                                    dynamic_time, classified_emotion))
                                # reset time
                                time_emotion = time_diff
                                info("Emotion classified: {}".format(
                                    classified_emotion))
                                emotions[classified_emotion] += 1
                        except Exception as e:
                            print(e)
                    # if the time difference meets a threshold, count it as a deviation
                    diff = dynamic_time - static_time
                    if diff > 0.7:
                        # increase the number of deviations detected
                        n_deviations += 1
                        # stores the time of this deviation
                        arq.write("Tempo do desvio: {:.2f}\n".format(diff))
                        deviation_times.append(diff)
                        #increases total disattention time
                        time_disattention += diff
                        info("Deviation detected")
                    static_time = dynamic_time = time.time()

                # show image on screen
                # cv2.imshow('img',image)

                # check if running will continue
                if cv2.waitKey(1) and self.run_state == 'stop':
                    info("Stop detected. Breaking execution!")
                    break

        c.close()

        # clear opencv windows
        cv2.destroyAllWindows()
        # release camera

        # calculate total attention time
        time_attention = time.time() - time_attention - time_disattention
        # write info about the whole session
        totals = "Total number of deviations:{}. Total time on disattention: {:.2f}. Time on attention: {:.2f}\n".format(
            n_deviations, time_disattention, time_attention)
        arq.write(totals)
        arq.close()
        info("Verbose deviation file written.")

        if n_deviations >= 2:
            attention = False

        # write on raw data file
        arq_ret = open('statistics.dat', 'w')
        data = "{}\n{:.2f}\n{:.2f}\n".format(n_deviations, time_disattention,
                                             time_attention)
        arq_ret.write(data)
        arq_ret.close()
        string = 'running'
        info("Raw data deviation file written")