def affwild2(pathbase, typeExp, outputfolder): pathFilesForTraining = os.path.join(pathbase, 'cropped_aligned') if os.path.exists(outputfolder): shutil.rmtree(outputfolder) os.makedirs(os.path.join(outputfolder, 'train')) os.makedirs(os.path.join(outputfolder, 'val')) os.makedirs(os.path.join(outputfolder, 'test')) setsFile = ['Train_Set', 'Validation_Set'] typesFolder = {'EXP': 'EXPR_Set', 'VA': 'VA_Set', 'AU': 'AU_Set'} for s in setsFile: fullPathFiles = os.path.join(pathbase, 'annotations', typesFolder[typeExp], s) filesToSeparate = getFilesInPath(fullPathFiles) sizeVal = 0 if 'Train' in s: sizeVal = math.floor(len(filesToSeparate) * 0.1) for f in filesToSeparate: folderName = f.split(os.path.sep)[-1][:-4] print("Copying files from %s to dataset" % (folderName)) filesFromVideo = getFilesInPath( os.path.join(pathFilesForTraining, folderName)) if sizeVal > 0 and random.randint(0, 1): shutil.copy( f, os.path.join(outputfolder, 'val', folderName + '.txt')) for fImage in filesFromVideo: if 'jpg' not in fImage: continue fileJpgName = fImage.split(os.path.sep)[-1] shutil.copy( fImage, os.path.join(outputfolder, 'val', folderName + '_' + fileJpgName)) sizeVal -= 1 elif 'Train' in s: shutil.copy( f, os.path.join(outputfolder, 'train', folderName + '.txt')) for fImage in filesFromVideo: if 'jpg' not in fImage: continue fileJpgName = fImage.split(os.path.sep)[-1] shutil.copy( fImage, os.path.join(outputfolder, 'train', folderName + '_' + fileJpgName)) else: shutil.copy( f, os.path.join(outputfolder, 'test', folderName + '.txt')) for fImage in filesFromVideo: if 'jpg' not in fImage: continue fileJpgName = fImage.split(os.path.sep)[-1] shutil.copy( fImage, os.path.join(outputfolder, 'test', folderName + '_' + fileJpgName))
def __init__(self, idl_path, blocksize, transform=None): self.phase = 'test' self.blocksize = blocksize self.filesPath = [] self.transform = transform self.idl_path = idl_path raw_data_loaded = getFilesInPath(self.idl_path) blockFiles = {} for f in raw_data_loaded: fileName = '_'.join(f.split(os.path.sep)[-1].split('_')[:-1]) if not fileName in blockFiles.keys(): blockFiles[fileName] = [] blockFiles[fileName].append(f) for k in blockFiles: blockFiles[k].sort(key=lambda f: int(re.sub('\D', '', f))) if len(blockFiles[k]) < blocksize: continue for nBl in range(len(blockFiles[k]) - 5): if (nBl + self.blocksize) > len(blockFiles[k]): break blockData = blockFiles[k][nBl:nBl + self.blocksize] self.filesPath.append(blockData)
def __init__(self, casmepath, phase, blocksize, transform): self.blocksize = blocksize self.transform = transform self.label = [] self.filesPath = [] raw_data_loaded = getDirectoriesInPath(os.path.join(casmepath, phase)) labelName = ['appex', 'neutral', 'offset', 'onset'] for r in raw_data_loaded: files = getFilesInPath(os.path.join(casmepath, phase, r)) blockFiles = {} for f in files: fileName = '_'.join(f.split(os.path.sep)[-1].split('_')[:-1]) if not fileName in blockFiles.keys(): blockFiles[fileName] = [] blockFiles[fileName].append(f) #qntdeBlock = int(sum([len(blockFiles[k]) for k in blockFiles]) / len(blockFiles.keys()) / self.blocksize) for k in blockFiles: blockFiles[k].sort(key=lambda f: int(re.sub('\D', '', f))) for nBl in range(len(blockFiles[k]) - self.blocksize): if (nBl + self.blocksize) > len(blockFiles[k]): break blockData = blockFiles[k][nBl:nBl + self.blocksize] self.filesPath.append(blockData) self.label.append(labelName.index(r))
def getCASME2BlockData(casmepath, phase, blocksize): labels = [] filesPath = [] raw_data_loaded = getDirectoriesInPath(os.path.join(casmepath, phase)) labelName = raw_data_loaded for r in raw_data_loaded: files = getFilesInPath(os.path.join(casmepath, phase, r)) blockFiles = {} for f in files: fileName = '_'.join(f.split(os.path.sep)[-1].split('_')[:-1]) if not fileName in blockFiles.keys(): blockFiles[fileName] = [] blockFiles[fileName].append(f) # qntdeBlock = int(sum([len(blockFiles[k]) for k in blockFiles]) / len(blockFiles.keys()) / self.blocksize) for k in blockFiles: blockFiles[k].sort(key=lambda f: int(re.sub('\D', '', f))) for nBl in range(len(blockFiles[k]) - blocksize): if (nBl + blocksize) > len(blockFiles[k]): break blockData = blockFiles[k][nBl:nBl + blocksize] filesPath.append(blockData) labels.append(labelName.index(r)) return labels, filesPath
def __init__(self, affData, phase, transform=None): self.transform = transform self.label = [] self.filesPath = [] self.keypointsPath = [] files = getFilesInPath(os.path.join(affData, phase)) for r in files: fileName = r.split(os.path.sep)[-1] roi = int('right' in fileName) dirName = fileName.split('.')[0] if '_' in dirName: dirName = dirName.split('_')[0] dirPath = os.path.join(affData, phase, dirName) if not os.path.exists(dirPath): continue valarrousal = self.loadLabels(r) for frameN, labelValue in enumerate(valarrousal): subjectData = os.path.join( affData, phase, dirName, 'roi_%d_frame_%d.jpg') % (roi, frameN) if os.path.exists(subjectData): self.filesPath.append(subjectData) self.keypointsPath.append( os.path.join(affData, phase, dirName, 'roi_%d_frame_%d.txt') % (roi, frameN)) self.label.append(labelValue)
def openLandmarks(pathInicial, dir, videoName): pathFile = os.path.join(pathInicial, 'landmarks', dir, videoName[:-4]) dirs = getFilesInPath(pathFile) landMarks = {} for d in dirs: idxFrameLand = int(d.split(os.path.sep)[-1][:-4]) landMarks[idxFrameLand] = readFile(d) return landMarks
def main(): parser = argparse.ArgumentParser(description='Organize AffWild1') parser.add_argument('--pathBase', help='Path for faces', required=True) parser.add_argument('--whereTo', help='Where to save', required=True) parser.add_argument('--deleteOld', help='Remove old dataset?', default=1, type=int) args = parser.parse_args() if args.deleteOld: if os.path.exists(args.whereTo): shutil.rmtree(args.whereTo) os.makedirs(args.whereTo) dirsFaces = getDirectoriesInPath(os.path.join(args.pathBase, 'videos')) for d in dirsFaces: print("Extracting from %s" % (d)) if not os.path.exists(os.path.join(args.whereTo, d)): os.makedirs(os.path.join(args.whereTo, d)) filesFace = getFilesInPath(os.path.join(args.pathBase, 'videos', d)) for f in filesFace: fileName = f.split(os.path.sep)[-1] print("Doing video %s" % (fileName)) if not os.path.exists(os.path.join(args.whereTo, d, fileName[:-4])): os.makedirs(os.path.join(args.whereTo, d, fileName[:-4])) faceAnn = openLandmarks(args.pathBase, d, fileName) vcap = cv2.VideoCapture(f) frame_number = 0 sucs = True while sucs: sucs, imgv = vcap.read() if sucs and frame_number in faceAnn.keys(): if not os.path.exists( os.path.join(args.whereTo, d, fileName[:-4], str(frame_number) + '.jpg')): x1, y1, x2, y2 = getBoundingbox(faceAnn[frame_number]) fImage = imgv[y1:y2, x1:x2] if len(fImage) > 0: try: cv2.imwrite( os.path.join(args.whereTo, d, fileName[:-4], str(frame_number) + '.jpg'), fImage) except: print('Error in save frame %d' % (frame_number)) frame_number += 1
def __init__(self, casmepath, phase, transform=None): self.transform = transform self.label = [] self.filesPath = [] raw_data_loaded = getDirectoriesInPath(os.path.join(casmepath, phase)) labelName = ['appex', 'neutral', 'offset', 'onset'] for r in raw_data_loaded: files = getFilesInPath(os.path.join(casmepath, phase, r)) for f in files: self.filesPath.append(f) self.label.append(labelName.index(r))
def __init__(self, casmepath, phase, transform=None): self.transform = transform self.label = [] self.filesPath = [] raw_data_loaded = ['appex', 'neutral', 'offset', 'onset'] labelName = raw_data_loaded for r in raw_data_loaded: files = getFilesInPath(os.path.join(casmepath, phase, r), onlyFiles=True, imagesOnly=True, imageExtesions=('png')) for f in files: self.filesPath.append(f) self.label.append(labelName.index(r))
def main(): fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False) #if os.path.exists('formated_aff'): # shutil.rmtree('formated_aff') face_cascade = cv2.CascadeClassifier( 'cascadeFolder/haarcascade_frontalface_default.xml') #os.makedirs('formated_aff') #os.makedirs(os.path.join('formated_aff', 'Train_Set')) #os.makedirs(os.path.join('formated_aff', 'Validation_Set')) folders = getDirectoriesInPath("aff_dataset") for f in folders: videos = getFilesInPath(os.path.join('aff_dataset', f)) for v in videos: preProcessWithThread(v, f, fa, face_cascade) '''
def __init__(self, affData, phase, transform=None): self.transform = transform self.label = [] self.filesPath = [] self.keypointsPath = [] files = getFilesInPath(os.path.join(affData, phase)) for r in files: fileName = r.split(os.path.sep)[-1] if fileName[-3:] == 'txt': continue featureFilesName = '_'.join(fileName.split('_')[:-1]) + '.txt' indexData = int(fileName.split('_')[-1].split('.')[0]) valarrousal = self.loadLabels( os.path.join(affData, phase, featureFilesName)) self.filesPath.append(r) self.keypointsPath.append( os.path.join(affData, phase, featureFilesName)) self.label.append(valarrousal[indexData])
def __init__(self, idl_path, transform=None): self.phase = 'test' self.transform = transform self.idl_path = idl_path self.file_paths = getFilesInPath(self.idl_path)