def test_initial_and_final_frame(news_video, ice_video):
    with closing(VideoSequence(news_video)) as s:
        start = s[0]
        end = s[-1]
    assert_images_not_equal(start, end)
    with closing(VideoSequence(ice_video)) as s:
        start = s[0]
        end = s[-1]
    assert_images_not_equal(start, end)
def __xtest_iteration(news_video, ice_video):
    with closing(VideoSequence(news_video)) as s:
        n = 0
        for _ in s:
            n += 1
        assert n == len(s)
    with closing(VideoSequence(ice_video)) as s:
        n = 0
        for _ in s:
            n += 1
        assert n == len(s)
示例#3
0
def generate_faces(video_path):
    """
        Generate the input data from video for machine learning emotion predict
        Input:
            video_path: the path of video
        Output:
            times : the mapping time of data
            face : the list of faces over frames
    """

    try:
        with closing(VideoSequence(video_path)) as frames:
            times = []
            faces = []
            # Caculate max box in video
            box = get_max_box(frames=frames, nb_frames=10)

            for i in np.arange(0, len(frames), step=NB_STEP_FRAME):
                gray = cv2.cvtColor(np.array(frames[i]), cv2.COLOR_RGB2GRAY)
                cropped_face = crop_faces(gray, box=box)
                times.append(i)
                faces.append(cropped_face)
            return times, faces, len(frames)
    except:
        print('Error')
        return [], [], 0
def test_first_few_frames_differ(news_video):
    with closing(VideoSequence(news_video)) as s:
        last_mean = 0.0
        for idx in range(5):
            print("Frame", idx)
            mean = ImageStat.Stat(s[idx]).mean[0]
            assert mean != last_mean
            assert mean > 0
            last_mean = mean
    def start_tracking(self,
                       video_path,
                       detector_min_conf=0.3,
                       tracker_min_conf=20,
                       aligners=[]):
        frames_faces_dic = {}
        with closing(VideoSequence(video_path)) as frames:
            is_tracking = False

            for idx, frame in enumerate(frames):
                face_rect = None
                tmp_frme_path = "temp.png"
                frame.save(tmp_frme_path)
                input = io.imread(tmp_frme_path)

                if is_tracking:
                    confi = self.object_tracker.update(input)
                    if confi > tracker_min_conf:
                        d = self.object_tracker.get_position()
                        face_rect = dlib.rectangle(int(d.left()), int(d.top()),
                                                   int(d.right()),
                                                   int(d.bottom()))
                    else:
                        is_tracking = False

                if not is_tracking:
                    dets = self.detector(input)
                    #face is detected start tracking
                    if len(dets) > 0:

                        if (hasattr(dets[0], 'confidence')):
                            if dets[0].confidence > detector_min_conf:
                                is_tracking = True
                                face_rect = dets[0].rect
                                self.object_tracker.start_track(
                                    input, face_rect)
                        else:
                            is_tracking = True
                            face_rect = dets[0]
                            self.object_tracker.start_track(input, face_rect)

                if face_rect is not None:
                    frames_faces_dic[idx] = face_rect
                    for aligner in aligners:
                        file_name = str(idx).zfill(5) + ".jpg"
                        aligner.align_face(tmp_frme_path, face_rect, file_name)
        return frames_faces_dic
示例#6
0
 | |  | | |__) | |  | | |__ | |   | |  | | |  | | |__  | |__) |
 | |  | |  _  /| |  | |  __|| |   | |  | | |  | |  __| |  _  / 
 | |__| | | \ \| |__| | |___| |___| |__| | |__| | |____| | \ \ 
  \___\_\_|  \_\_____/|______\_____\____/|_____/|______|_|  \_\
                                                               
                                                                      
        by David Schep ;)
        https://github.com/DavidEncrypted

""")

print "Decoding Started!\n"

dataset = set()

with closing(VideoSequence("hak5qr.mp4")) as frames:
    print "Decoding: ", len(frames), " frames\n\n"
    curframe = 0
    bar = progressbar.ProgressBar(maxval=len(frames),
                                  widgets=[
                                      progressbar.Bar('=', '[', ']'), ' ',
                                      progressbar.Percentage()
                                  ])
    bar.start()
    while curframe < len(frames):
        frame = frames[curframe]
        frame.save("curframe.jpg")

        try:
            result = subprocess.check_output(["zbarimg", "curframe.jpg"],
                                             stderr=subprocess.STDOUT)
def test_invalid_file():
    with pytest.raises(IOError):
        VideoSequence(__file__)
def test_no_such_file():
    with pytest.raises(IOError):
        VideoSequence("does-not-exist")
示例#9
0
).download(folderNumber)

list_of_files = glob.glob(
    folderNumber + '/*')  # * means all if need specific format then *.csv
latest_file = max(list_of_files, key=os.path.getctime)

folderName = folderNumber + "/origImages"
faceFolderName = folderNumber + "/faceImages"

if not os.path.exists(folderName):
    os.makedirs(folderName)

if not os.path.exists(faceFolderName):
    os.makedirs(faceFolderName)

with closing(VideoSequence(latest_file)) as frames:
    for idx, frame in enumerate(frames[:]):
        filename = folderName + "/" + "frame{:04d}.jpg".format(idx)
        frame.save(filename)

        # Load the jpg file into a numpy array
        image = face_recognition.load_image_file(filename)

        # Find all the faces in the image using the default HOG-based model.
        # This method is fairly accurate, but not as accurate as the CNN model and not GPU accelerated.
        # See also: find_faces_in_picture_cnn.py
        face_locations = face_recognition.face_locations(image)

        if (len(face_locations) == 1):
            top, right, bottom, left = face_locations[0]
            faceFilename = faceFolderName + "/" + "frame{:04d}.jpg".format(idx)
示例#10
0
from contextlib import closing
from videosequence import VideoSequence
import pdb

with closing(VideoSequence("got.mp4")) as frames:
    for idx, frame in enumerate(frames[100:]):
        if (idx % 60 == 0):
            frame.save("./frames/frame{:05d}.jpg".format(idx))
示例#11
0
from videosequence import VideoSequence
import os
import glob
import csv
import sys

# This file is for splitting video files.
#Get any command line args, if possible
commandLineArgs = list()
try:
	commandLineArgs = sys.argv
except OSError:
	pass #Ignore

#If command line args are available, the first one is the mp4, and the second one is whether they're andy or not
#If no command line args are present, or not enough, then we can just ask the user
mp4File = str(commandLineArgs[1]) if len(commandLineArgs) > 1 else input("Enter mp4 file: ")
andyornot = int(commandLineArgs[2]) if len(commandLineArgs) > 2 else int(input("Enter if they're andy or not: "))

#Split frames
with closing(VideoSequence(mp4File)) as frames:
	for idx, frame in enumerate(frames[1:]):
		frame.save("images/frame{:}.jpg".format(idx))

#Save labels
with open('labels.csv', 'w') as myfile:
	wrtr = csv.writer(myfile, delimiter=',', quotechar='"')
	for jpgfile in glob.iglob(os.path.join('images/', "*.jpg")):
		wrtr.writerow(andyornot)
		myfile.flush()
示例#12
0
def videotoimage(folder, video, id, vvid):
    cursor.execute(
        "SELECT count(id) from video_collection where instruction_id=%s" %
        (id))
    c = cursor.fetchall()
    c = c[0][0] + 1

    b = video.split(".")[0]
    # print b
    b = str(b)
    print video
    print c
    f = str(folder)
    with closing(VideoSequence("static/img/upload/video/" + video)) as frames:
        for idx, frame in enumerate(frames[0:]):
            s = "static/img/upload/img/" + str(id) + "_" + str(c)
            vid = str(folder)
            s1 = "static/img/upload/video/training/" + vid + "/images/train/" + str(
                id) + "_" + str(c)
            s2 = "static/img/upload/video/training/" + vid + "/images/test/" + str(
                id) + "_" + str(c)
            frame.save(s + "_{:04d}.jpg".format(idx + 1))
            if (idx + 1) <= 90:
                frame.save(s1 + "_{:04d}.jpg".format(idx + 1))
            if (idx + 1) <= 10:
                frame.save(s2 + "_{:04d}.jpg".format(idx + 1))
            x = str(id) + "_" + str(c) + "_{:04d}.jpg".format(idx + 1)
            label = str(id) + "_" + str(c) + "_{:04d}".format(idx + 1)
            order = idx + 1
            insert.training_images(id, vvid, x, label, c, "Finished")
        update.video_collection(c, order, video)
        vv = str(id) + "_step_" + str(c)
        image_to_xml_test(vv, str(f))
        image_to_xml_train(vv, str(f))
        bb = b.split("_")[::-1][0]
        if int(bb) != 1:

            if f in os.listdir('static/img/upload/video/training') and int(
                    bb) == 1:
                image_path = os.path.join(
                    os.getcwd(),
                    'static/img/upload/video/training/{}/images/{}'.format(
                        f, dir))
                xml_df = xml_to_csv(image_path)
                f1 = open(
                    'static/img/upload/video/training/{}/data/{}_labels.csv'.
                    format(f, dir), 'a+')
                xml_df.to_csv(f1, index=None, header=False)
        else:

            for dir in ['test', 'train']:
                image_path = os.path.join(
                    os.getcwd(),
                    'static/img/upload/video/training/{}/images/{}'.format(
                        f, dir))
                xml_df = xml_to_csv(image_path)
                xml_df.to_csv(
                    'static/img/upload/video/training/{}/data/{}_labels.csv'.
                    format(f, dir),
                    index=None)

        os.system(
            "python generate_tfrecord.py --csv_input=static/img/upload/video/training/{}/data/test_labels.csv  --output_path=static/img/upload/video/training/{}/data/test.record --file_name={}"
            .format(f, f, b))
        os.system(
            "python generate_tfrecord.py --csv_input=static/img/upload/video/training/{}/data/train_labels.csv  --output_path=static/img/upload/video/training/{}/data/train.record --file_name={}"
            .format(f, f, b))
        configfile_2_pbtxt(f, b, vv)
        return "success"
def test_slice_ice(ice_video):
    with closing(VideoSequence(ice_video)) as s:
        frames = [s[idx] for idx in range(5, 10)]
        for f1, f2 in zip(frames, s[5:10]):
            assert_images_equal(f1, f2)
def test_size(news_video):
    with closing(VideoSequence(news_video)) as s:
        assert s.width == 352
        assert s.height == 288
def test_duration(news_video, ice_video):
    with closing(VideoSequence(news_video)) as s:
        assert len(s) == 288
    with closing(VideoSequence(ice_video)) as s:
        assert len(s) == 468