def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    wtgs = torch.load('temporal_resnet_50_best_loss.pth.tar')
    rnet = TimeSeriesLearning(4, 15)
    rnet = rnet.to(device)
    valT = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    valDataset = torch.utils.data.DataLoader(IngDiscLearnDataSetBlock(
        'frames_face', transform=valT, blocksize=5),
                                             batch_size=20,
                                             shuffle=False,
                                             num_workers=4)
    rnet.load_state_dict(wtgs['state_dict'])
    classes = getDirectoriesInPath(os.path.join('CASME2_formated', 'train'))
    createFoldersForEmotions(classes)
    blockNumber = 0
    with torch.no_grad():
        rnet.eval()
        for batch_i, (imgs, targets) in enumerate(valDataset):
            outputs = rnet(imgs.cuda())
            _, predicts = torch.max(outputs, 1)
            predictsCPU = predicts.cpu()
            for idx, t in enumerate(targets):
                for ft in t:
                    fileName = ft.split(os.path.sep)[-1]
                    shutil.copyfile(
                        ft,
                        os.path.join(
                            'expressionsEmotions',
                            classes[predictsCPU[idx].item()],
                            'block_' + str(blockNumber) + '_' + fileName))
                blockNumber += 1
    def __init__(self, casmepath, phase, blocksize, transform):
        self.blocksize = blocksize
        self.transform = transform
        self.label = []
        self.filesPath = []
        raw_data_loaded = getDirectoriesInPath(os.path.join(casmepath, phase))
        labelName = ['appex', 'neutral', 'offset', 'onset']
        for r in raw_data_loaded:
            files = getFilesInPath(os.path.join(casmepath, phase, r))
            blockFiles = {}
            for f in files:
                fileName = '_'.join(f.split(os.path.sep)[-1].split('_')[:-1])
                if not fileName in blockFiles.keys():
                    blockFiles[fileName] = []

                blockFiles[fileName].append(f)

            #qntdeBlock = int(sum([len(blockFiles[k]) for k in blockFiles]) / len(blockFiles.keys()) / self.blocksize)
            for k in blockFiles:
                blockFiles[k].sort(key=lambda f: int(re.sub('\D', '', f)))
                for nBl in range(len(blockFiles[k]) - self.blocksize):
                    if (nBl + self.blocksize) > len(blockFiles[k]):
                        break
                    blockData = blockFiles[k][nBl:nBl + self.blocksize]
                    self.filesPath.append(blockData)
                    self.label.append(labelName.index(r))
def getCASME2BlockData(casmepath, phase, blocksize):
    labels = []
    filesPath = []
    raw_data_loaded = getDirectoriesInPath(os.path.join(casmepath, phase))
    labelName = raw_data_loaded
    for r in raw_data_loaded:
        files = getFilesInPath(os.path.join(casmepath, phase, r))
        blockFiles = {}
        for f in files:
            fileName = '_'.join(f.split(os.path.sep)[-1].split('_')[:-1])
            if not fileName in blockFiles.keys():
                blockFiles[fileName] = []

            blockFiles[fileName].append(f)

        # qntdeBlock = int(sum([len(blockFiles[k]) for k in blockFiles]) / len(blockFiles.keys()) / self.blocksize)
        for k in blockFiles:
            blockFiles[k].sort(key=lambda f: int(re.sub('\D', '', f)))
            for nBl in range(len(blockFiles[k]) - blocksize):
                if (nBl + blocksize) > len(blockFiles[k]):
                    break
                blockData = blockFiles[k][nBl:nBl + blocksize]
                filesPath.append(blockData)
                labels.append(labelName.index(r))

    return labels, filesPath
def main():

    parser = argparse.ArgumentParser(description='Organize AffWild1')
    parser.add_argument('--pathBase', help='Path for faces', required=True)
    parser.add_argument('--whereTo', help='Where to save', required=True)
    parser.add_argument('--deleteOld',
                        help='Remove old dataset?',
                        default=1,
                        type=int)
    args = parser.parse_args()

    if args.deleteOld:
        if os.path.exists(args.whereTo):
            shutil.rmtree(args.whereTo)

        os.makedirs(args.whereTo)

    dirsFaces = getDirectoriesInPath(os.path.join(args.pathBase, 'videos'))
    for d in dirsFaces:
        print("Extracting from %s" % (d))
        if not os.path.exists(os.path.join(args.whereTo, d)):
            os.makedirs(os.path.join(args.whereTo, d))

        filesFace = getFilesInPath(os.path.join(args.pathBase, 'videos', d))
        for f in filesFace:
            fileName = f.split(os.path.sep)[-1]
            print("Doing video %s" % (fileName))

            if not os.path.exists(os.path.join(args.whereTo, d,
                                               fileName[:-4])):
                os.makedirs(os.path.join(args.whereTo, d, fileName[:-4]))

            faceAnn = openLandmarks(args.pathBase, d, fileName)
            vcap = cv2.VideoCapture(f)

            frame_number = 0
            sucs = True
            while sucs:
                sucs, imgv = vcap.read()
                if sucs and frame_number in faceAnn.keys():
                    if not os.path.exists(
                            os.path.join(args.whereTo, d, fileName[:-4],
                                         str(frame_number) + '.jpg')):
                        x1, y1, x2, y2 = getBoundingbox(faceAnn[frame_number])
                        fImage = imgv[y1:y2, x1:x2]
                        if len(fImage) > 0:
                            try:
                                cv2.imwrite(
                                    os.path.join(args.whereTo, d,
                                                 fileName[:-4],
                                                 str(frame_number) + '.jpg'),
                                    fImage)
                            except:
                                print('Error in save frame %d' %
                                      (frame_number))
                frame_number += 1
 def __init__(self, casmepath, phase, transform=None):
     self.transform = transform
     self.label = []
     self.filesPath = []
     raw_data_loaded = getDirectoriesInPath(os.path.join(casmepath, phase))
     labelName = ['appex', 'neutral', 'offset', 'onset']
     for r in raw_data_loaded:
         files = getFilesInPath(os.path.join(casmepath, phase, r))
         for f in files:
             self.filesPath.append(f)
             self.label.append(labelName.index(r))
Beispiel #6
0
def main():
    fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D,
                                      flip_input=False)
    #if os.path.exists('formated_aff'):
    #    shutil.rmtree('formated_aff')

    face_cascade = cv2.CascadeClassifier(
        'cascadeFolder/haarcascade_frontalface_default.xml')
    #os.makedirs('formated_aff')
    #os.makedirs(os.path.join('formated_aff', 'Train_Set'))
    #os.makedirs(os.path.join('formated_aff', 'Validation_Set'))
    folders = getDirectoriesInPath("aff_dataset")
    for f in folders:
        videos = getFilesInPath(os.path.join('aff_dataset', f))
        for v in videos:
            preProcessWithThread(v, f, fa, face_cascade)
            '''