Example #1
0
 def __init__(self, folderpath):
     """Constructor."""
     self.__trainingpath__ = None
     self.__evaluatepath__ = None
     self.__expressions__ = []
     self.__trainingset__ = {}
     self.__evaluateset__ = {}
     subfolders = os.listdir(folderpath)
     # Check for trailing '/'
     if folderpath[-1:] != '/':
         folderpath += '/'
     self.__trainingpath__ = folderpath
     # If we got the training & evaluate subfolders
     if "training" in subfolders:
         self.__trainingpath__ = folderpath+"training/"
         if "evaluate" in subfolders:
             self.__evaluatepath__ = folderpath+"evaluate/"
     # get the classes
     exps = os.listdir(self.__trainingpath__)
     for i, e in enumerate(exps):
         if Exp.from_str(e) is None:
             print("Unknown expression: "+e)
         else:
             self.__expressions__.append(Exp.from_str(e))
     # Init the sets
     for i, e in enumerate(self.__expressions__):
         self.__trainingset__[e] = []
         self.__evaluateset__[e] = []
Example #2
0
 def load_videos(self):
     """Load pictures from the tree structure."""
     exps = os.listdir(self.__trainingpath__)
     for i, e in enumerate(exps):
         if Exp.from_str(e) is None:
             print("Unknown expression: "+e)
         else:
             exp = Exp.from_str(e)
             path = self.__trainingpath__+e+'/'
             for id, f in enumerate(glob.glob(os.path.join(path, "*.avi"))):
                 pics = []
                 cap = cv2.VideoCapture(f, cv2.CAP_FFMPEG)
                 while(cap.isOpened()):
                     ret, frame = cap.read()
                     if ret:
                         frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                         pics.append(frame)
                     else:
                         cap.release()
                 self.__trainingset__[exp].append(pics)
     if self.__evaluatepath__ is not None:
         exps = os.listdir(self.__evaluatepath__)
         for i, e in enumerate(exps):
             if Exp.from_str(e) is None:
                 print("Unknown expression: "+e)
             else:
                 exp = Exp.from_str(e)
                 path = self.__evaluatepath__+e+'/'
                 for id, f in enumerate(glob.glob(os.path.join(path,
                                                               "*.avi"))):
                     pics = []
                     cap = cv2.VideoCapture(f, cv2.CAP_FFMPEG)
                     while(cap.isOpened()):
                         ret, frame = cap.read()
                         if ret:
                             frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                             pics.append(frame)
                         else:
                             cap.release()
                     self.__trainingset__[exp].append(pics)
Example #3
0
 def load_pictures(self):
     """Load pictures from the tree structure."""
     exps = os.listdir(self.__trainingpath__)
     for i, e in enumerate(exps):
         print(e)
         if Exp.from_str(e) is None:
             print("Unknown expression: "+e)
         else:
             exp = Exp.from_str(e)
             path = self.__trainingpath__+e+'/'
             source_map = open(path+"map.csv")
             line = source_map.readline()    # First line, headers, ignore.
             line = source_map.readline()
             while line != '':
                 line = line.split(',')
                 pic = cv2.imread(line[1].replace('\n', ''))
                 pic = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY)
                 self.__trainingset__[exp].append((pic, line[0]))
                 line = source_map.readline()
     if self.__evaluatepath__ is not None:
         exps = os.listdir(self.__evaluatepath__)
         for i, e in enumerate(exps):
             if Exp.from_str(e) is None:
                 print("Unknown expression: "+e)
             else:
                 exp = Exp.from_str(e)
                 path = self.__evaluatepath__+e+'/'
                 for id, f in enumerate(glob.glob(os.path.join(path,
                                                               "*.jpg"))):
                     source_map = open(path+"map.csv")
                     line = source_map.readline()
                     line = source_map.readline()
                     while line != '':
                         line = line.split(',')
                         pic = cv2.imread(line[1].replace('\n', ''))
                         pic = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY)
                         self.__evaluateset__[exp].append((pic, line[0]))
                         line = source_map.readline()
Example #4
0
File: io.py Project: Yepikae/fee
def get_dataset_from_multiple_json(folder_path):
    datas = FlandmarksDataStorage()
    for id, f in enumerate(glob.glob(os.path.join(folder_path, "*.json"))):
        # Open the data file
        src = json.load(open(f, 'r'))["datas"]
        for j, src_elem in enumerate(src):
            data = FlandmarksData(src_elem["file"],
                                  Exp.from_str(src_elem["oclass"]))
            # Save the n frames points
            for pos in range(0, len(src_elem["values"])):
                if src_elem["values"][pos] is not None:
                    fl = FLandmarks()
                    fl.set_points(src_elem["values"][pos]["points"])
                    data.add_flandmarks(fl)
                else:
                    data.add_flandmarks(None)
            datas.add_element(data)
    return datas
Example #5
0
model = load_model(model_path)
labels = [
    Exp.ANGER, Exp.DISGUST, Exp.FEAR, Exp.HAPPINESS, Exp.SADNESS, Exp.SURPRISE,
    Exp.NEUTRAL
]

# Write the header of the output csv file.
output_file = open(output_path, 'w')
output_file.write('filepath,' + (',').join([l.to_str()
                                            for l in labels]) + '\n')

# For each extracted landmarks csv file, we do the prediction.
for f in open_landmarks_files(source_path):
    for filepath, exp, datas in group_line_by_file(f):
        exp = Exp.from_str(exp[0])
        # Retrieve the faces frame by frame
        cap = cv2.VideoCapture(filepath, cv2.CAP_FFMPEG)
        frameid = 0
        frames = []
        while (cap.isOpened()):
            ret, frame = cap.read()
            if ret:
                filepath, frame_id, exp, bounds, points = datas[frameid]
                # The dlib algorithm might not succeeded in finding a face.
                # In such case, the bounding box is equal to None. So we'll do
                # the face extraction on the frame which actually have a
                # bounding box.
                if bounds is not None:
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                    frame = extract_face_frame(frame, bounds, (64, 64), 15)
line = cnn_file.readline()
while line != '':
    line = line.split(',')
    file = line[0]
    file = file.split('/')[-1]
    values = [float(v) * 100 for v in line[1:]]
    values = ["%.2f" % round(v, 2) for v in values]
    file_values_map[file] = [values]
    line = cnn_file.readline()

# Parse the reviewers labels
line = reviewers_file.readline()
reviewers_labels = line.split(',')[24:30]
reviewers_labels = [s.replace('(%)', '') for s in reviewers_labels]
reviewers_labels = [Exp.from_str(s) for s in reviewers_labels]
# We now add the NEUTRAL label to replace the "Undecided" label, for more
# consistance with the cnn labels.
reviewers_labels.append(Exp.NEUTRAL)

line = reviewers_file.readline()
while line != '':
    line = line.split(',')
    file = line[0]
    values = [float(v) for v in line[24:31]]
    values = ["%.2f" % round(v, 2) for v in values]
    if file in file_values_map:
        file_values_map[file].append(values)
    line = reviewers_file.readline()

output_file.write('file')