示例#1
0
def main(args):
    net = setup(args)
    model_path = os.path.join(args.save_dir, args.ckpt_name)
    if os.path.isfile(model_path):
        #print("=> loading checkpoint '{}'".format(model_path))
        checkpoint = torch.load(model_path)
        start_epoch = checkpoint['epoch']
        net.load_state_dict(checkpoint['net'])
        #print("=> loaded checkpoint '{}' (epoch {})"
        #   .format(model_path, start_epoch))
    else:
        raise ValueError("=> no checkpoint found at '{}'".format(model_path))

    f_path = args.input
    #print('Testing: ' + f_path)
    suffix = f_path.split('.')[-1]
    if suffix.lower() in ['jpg', 'png', 'jpeg', 'bmp', 'tif', 'nef', 'raf']:
        im = cv2.imread(f_path)
        if im is None:
            prob = -1
        else:
            prob, face_info = im_test(net, im, args)
        print(prob)

    elif suffix.lower() in ['mp4', 'avi', 'mov']:
        # Parse video
        imgs, frame_num, fps, width, height = pv.parse_vid(f_path)
        probs = []
        for fid, im in enumerate(imgs):
            #print('Frame: ' + str(fid))
            prob, face_info = im_test(net, im, args)
            probs.append(prob)
            print(prob)
示例#2
0
    def __init__(
        self,
        input_vid_path,
        output_height=300,
    ):
        # Input video
        self.input_vid_path = input_vid_path
        # parse video
        print('Parsing video {}'.format(str(self.input_vid_path)))
        self.imgs, self.frame_num, self.fps, self.img_w, self.img_h = pv.parse_vid(
            str(self.input_vid_path))
        if len(self.imgs) != self.frame_num:
            warnings.warn(
                'Frame number is not consistent with the number of images in video...'
            )
            self.frame_num = len(self.imgs)
        print('Eye blinking solution is building...')

        self._set_up_dlib()

        self.output_height = output_height
        factor = float(self.output_height) / self.img_h

        # Resize imgs for final video generation
        # Resize self.imgs according to self.output_height
        self.aligned_imgs = []
        self.left_eyes = []
        self.right_eyes = []

        self.resized_imgs = []
        print('face aligning...')
        for i, im in enumerate(tqdm(self.imgs)):
            face_cache = lib.align(im[:, :,
                                      (2, 1, 0)], self.front_face_detector,
                                   self.lmark_predictor)
            if len(face_cache) == 0:
                self.left_eyes.append(None)
                self.right_eyes.append(None)
                continue

            if len(face_cache) > 1:
                raise ValueError(
                    '{} faces are in image, we only support one face in image.'
                )

            aligned_img, aligned_shapes_cur = lib.get_aligned_face_and_landmarks(
                im, face_cache)
            # crop eyes
            leye, reye = lib.crop_eye(aligned_img[0], aligned_shapes_cur[0])
            self.left_eyes.append(leye)
            self.right_eyes.append(reye)
            im_resized = cv2.resize(im, None, None, fx=factor, fy=factor)
            self.resized_imgs.append(im_resized)

        # For visualize
        self.plot_vis_list = []
        self.total_eye1_prob = []
        self.total_eye2_prob = []
示例#3
0
def run(input_dir, output_path):
    logging.basicConfig(filename='run.log',
                        filemode='w',
                        format='[%(asctime)s - %(levelname)s] %(message)s',
                        level=logging.INFO)

    f_list = os.listdir(input_dir)
    prob_list = []
    for f_name in f_list:
        # Parse video
        if f_name.split('.')[-1] == 'txt':  # output file with probabilities
            continue
        f_path = os.path.join(input_dir, f_name)
        print('Testing: ' + f_path)
        logging.info('Testing: ' + f_path)
        suffix = f_path.split('.')[-1]
        if suffix.lower() in [
                'jpg', 'png', 'jpeg', 'bmp', 'tif', 'nef', 'raf'
        ]:
            im = cv2.imread(f_path)
            if im is None:
                prob = -1
            else:
                prob = im_test(im)

        elif suffix.lower() in ['mp4', 'avi', 'mov']:
            # Parse video
            imgs, frame_num, fps, width, height = pv.parse_vid(f_path)
            print("Parsing video...")
            print("num of frames:", frame_num, "fps:", fps)
            probs = []
            for fid, im in enumerate(imgs):
                logging.info('Frame ' + str(fid))
                prob = im_test(im)
                if prob == -1:
                    continue
                probs.append(prob)

            # Remove opt out frames
            if probs is []:
                prob = -1
            else:
                prob = np.mean(
                    sorted(probs, reverse=True)[:int(frame_num / 3)])

        logging.info('Prob = ' + str(prob))
        prob_list.append(prob)
        print('Prob: ' + str(prob))

        output_file = open(output_path, "a")
        output = 'Input video: ' + str(input_dir) + '       ' + str(
            f_name) + '     Fake prob: ' + str(prob) + '\n'
        output_file.write(output)
        output_file.close()

    sess.close()
    return prob_list
示例#4
0
文件: demo.py 项目: tanujay/cos429
def run(input_dir):
    logging.basicConfig(filename='run.log',
                        filemode='w',
                        format='[%(asctime)s - %(levelname)s] %(message)s',
                        level=logging.INFO)

    f_list = os.listdir(input_dir)
    prob_list = []
    n = 0
    correct = 0
    for f_name in f_list:
        # Parse video
        f_path = os.path.join(input_dir, f_name)
        print('Testing: ' + f_path)
        logging.info('Testing: ' + f_path)
        suffix = f_path.split('.')[-1]
        if suffix.lower() in [
                'jpg', 'png', 'jpeg', 'bmp', 'tif', 'nef', 'raf'
        ]:
            im = cv2.imread(f_path)
            if im is None:
                prob = -1
            else:
                prob = im_test(im)

        elif suffix.lower() in ['mp4', 'avi', 'mov']:
            # Parse video
            imgs, frame_num, fps, width, height = pv.parse_vid(f_path)
            probs = []
            for fid, im in enumerate(imgs):
                logging.info('Frame ' + str(fid))
                prob = im_test(im)
                if prob == -1:
                    continue
                probs.append(prob)

            # Remove opt out frames
            if probs is []:
                prob = -1
            else:
                prob = np.mean(
                    sorted(probs, reverse=True)[:int(frame_num / 3)])

        #Added
        if prob >= 0 and prob < 0.5:
            correct += 1

        logging.info('Prob = ' + str(prob))
        prob_list.append(prob)
        print('No.' + str(n) + ' Prob: ' + str(prob) + "\nCorrect till now: " +
              str(correct))
        n += 1

    sess.close()
    return prob_list
def run(input_dir):
    logging.basicConfig(filename='run.log',
                        filemode='w',
                        format='[%(asctime)s - %(levelname)s] %(message)s',
                        level=logging.INFO)

    sheetname = str(input_dir).replace('/', '-')  #
    print(sheetname)
    sheet = wb.add_sheet(sheetname)
    sheet.write(0, 0, 'FILE NAME')
    sheet.write(0, 1, 'LABEL')
    sheet.write(0, 2, 'FAKE PROBABILITY')
    row = 1  #initialize row count

    f_list = os.listdir(input_dir)
    prob_list = []
    for f_name in f_list:
        sheet.write(row, 0, str(f_name))  #
        # Parse video
        f_path = os.path.join(input_dir, f_name)
        #f_path = '/home/sampreetha/Projects/Deepfakes/Github References/CVPRW2019_Face_Artifacts/demo/'+f_name
        print(f_path)
        print('Testing: ' + f_path)
        logging.info('Testing: ' + f_path)
        suffix = f_path.split('.')[-1]
        if suffix.lower() in [
                'jpg', 'png', 'jpeg', 'bmp', 'tif', 'nef', 'raf'
        ]:
            im = cv2.imread(f_path)
            if im is None:
                prob = -1
            else:
                prob = im_test(im)

        elif suffix.lower() in ['mp4', 'avi', 'mov']:
            # Parse video
            imgs, frame_num, fps, width, height = pv.parse_vid(f_path)
            probs = []
            for fid, im in enumerate(imgs):
                logging.info('Frame ' + str(fid))
                prob = im_test(im)
                if prob == -1:
                    continue
                probs.append(prob)

            # Remove opt out frames
            if probs is []:
                prob = -1
            else:
                prob = np.mean(
                    sorted(probs, reverse=True)[:int(frame_num / 3)])

        logging.info('Prob = ' + str(prob))
        prob_list.append(prob)
        print('Prob: ' + str(prob))
        sheet.write(row, 2, float(prob))  #
        if (prob == -1):  #
            sheet.write(row, 1, 'REAL')
        elif (prob < 0.5):
            sheet.write(row, 1, 'REAL')
        else:
            sheet.write(row, 1, 'FAKE')
        row += 1  #
    wb.save('Fake_Probability_Artefacts.xls')  #
    sess.close()
    return f_list, prob_list
async def detector_inference(model_name,
                             video_path,
                             model_path,
                             output_path,
                             threshold,
                             cam,
                             cam_model,
                             predit_video,
                             cam_video,
                             start_frame=0,
                             end_frame=None,
                             cuda=False):
    logger.info('Starting: {}'.format(video_path))

    # cam_model = 'GradCAMpp'
    # logger.info('set cam model')
    video_fileid = video_path.split('/')[-1].split('.')[0]
    if predit_video:
        video_fn = predit_video
    else:
        video_fn = f'{output_path}{video_fileid}_{model_name}.mp4'
        logger.info(f'video_fn:{video_fn}')
    if cam_video:
        video_fn_cam = cam_video
    else:
        video_fn_cam = f'{output_path}{video_fileid}_{model_name}_{cam_model}.mp4'
        logger.info(f'video_fn_cam:{video_fn_cam}')

    os.makedirs(output_path, exist_ok=True)

    input_size = 224
    class_idx = 0

    if model_name == 'SPPNet':
        net = load_network_sppnet(model_path, cuda)
    if model_name == 'XceptionNet':
        net = load_network_xception(model_path, cuda)
    if model_name == 'EfficientnetB7':
        net = load_network_efficientnet(model_path, cuda)

    # mp4 file path
    imgs, num_frames, fps, width, height = pv.parse_vid(video_path)
    probs = []
    frame = 0
    logger.info(
        f'num_frames:{num_frames}, fps:{fps}, width:{width}, height:{height}')

    # reader = cv2.VideoCapture(video_path)
    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    writer = None
    writer_cam = None
    writer = cv2.VideoWriter(video_fn, fourcc, fps, (height, width)[::-1])
    writer_cam = cv2.VideoWriter(video_fn_cam, fourcc, fps,
                                 (height, width)[::-1])

    # Frame numbers and length of output video
    frame_num = 0
    assert start_frame < num_frames - 1
    end_frame = end_frame if end_frame else num_frames

    try:
        sample_list = sample_frames(start_frame, end_frame, 30)
        pbar = tqdm(total=end_frame - start_frame)
        for fid, im in enumerate(imgs):
            pbar.update(1)

            if fid in sample_list:
                prob, face_info, cam_im = face_mtcnn(fid, model_name, net, im,
                                                     input_size, cuda)
                bnd_im = draw_face_score(model_name, im, face_info, prob,
                                         threshold)
            else:
                bnd_im = draw_face_score(model_name, im, face_info, prob,
                                         threshold)
            writer.write(bnd_im)
            writer_cam.write(cam_im)

    except Exception as e:
        logger.error(f'generate image:{e}')
    pbar.close()
    if writer is not None:
        writer.release()
        logger.info(f'Finished! Output saved under {output_path}{video_fn}')
    else:
        logger.info('Input video file was empty')
    if writer_cam is not None:
        writer_cam.release()
        logger.info(
            f'Finished! Grad-cam Output saved under {output_path}{video_fn_cam}'
        )
    else:
        logger.info('Input video file was empty')