def take_attendence(date, filename, course):
	threshold = 0.6
	conn = sqlite3.connect('student.db')
	print('opened succesfully')
	json_file = "Data/dictionary_reverse.json"
	json_rev_file = "Data/dictionary.json"
	with open (json_file) as data_file:
		dictionary = json.load(data_file)

	with open (json_rev_file) as data_file:
		dictionary_rev = json.load(data_file)

	table_name = "attendence"+course
	cursor = conn.execute("select id,name from %s"%(table_name))
	ids = []
	names = {}
	for row in cursor:
		ids.append(row[0])
		names[row[0]] = row[1]

	video_capture = cv2.VideoCapture(filename)

	found_ids = []
	count = -1
	model_name = 'models/trained_model.pkl'
	clf = joblib.load(model_name) 
	while video_capture.isOpened():
		ret, frame = video_capture.read()
		resolution = (768,1368)
		print(frame.shape)
		if frame is None:
			break
		count = count+1
		cv2.resize(frame,resolution)
		frame_other = frame
		if count%30==0:
			print(count)
			rgb_small_frame = frame[:, :, ::-1]
			face_locations = find_faces.face_locations(rgb_small_frame)
			face_recognized = 0


			#New code from here
			font = cv2.FONT_HERSHEY_DUPLEX
			for top, right, bottom, left in face_locations:
				 cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
			string = "Found "+str(len(face_locations))+" Faces in frame number "+str(count)
			cv2.putText(frame, string,(0,700), font, 1.0, (0, 0, 255), 1)
			face_names = []
			cv2.imshow('Video', frame)
			#new code ends here


			face_encodings = find_faces.face_encodings(rgb_small_frame, known_face_locations = face_locations)
			if len(face_encodings)>=1:
				result_all = clf.predict_proba(face_encodings)
				for result in result_all:
					result = list(result)
					index = result.index(max(result))
					if str(index) in dictionary_rev:
						id_no = dictionary_rev[str(index)]
						if id_no in ids:
							print(id_no)
							print(max(result))
							if max(result)>=threshold:
								face_names.append(names[id_no])
								if id_no not in found_ids:
									found_ids.append(id_no)
									face_recognized = face_recognized + 1
							else:
								face_names.append("Unidentified")



			#new code starts from here
			cv2.waitKey(5000)
			cv2.destroyAllWindows()
			for (top, right, bottom, left), name in zip(face_locations, face_names):
				cv2.rectangle(frame_other, (left, top), (right, bottom), (0, 0, 255), 2)
				cv2.rectangle(frame_other, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
				cv2.putText(frame_other, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
			string2 = "Identified "+str(face_recognized)+" Faces out of  "+str(len(face_locations))+" in Frame Number "+str(count)
			cv2.putText(frame_other, string2,(0,250), font, 1.0, (0, 255, 255), 1)
			cv2.imshow('Video', frame_other)
			cv2.waitKey(5000)
			cv2.destroyAllWindows()


			# new code ends here

	date = "\""+date+"\""
	print(found_ids)
	conn.execute("alter table %s\
		add %s INT default 0" %(table_name, date))

	conn.commit()

	for x in found_ids:
		conn.execute("update %s set total_attendence = total_attendence+1 where id=%d"%(table_name,int(x)))
		conn.execute("update %s set %s = 1 where id=%d"%(table_name,date,int(x)))

	conn.commit()

	df = pd.read_sql_query("select * from %s"%(table_name),conn)
	df1 = df.pop('total_attendence')
	df['total_attendence'] = df1
	head = df.columns

	new_head = []
	for x in head:
		new_head.append(convert_date(x))

	df.columns = new_head
	
	csv_file = "CSV/"+table_name+".csv"
	df.to_csv(csv_file, index=False)

	json_file = "Data/"+table_name+".json"
	with open (json_file) as data_file:
		data = json.load(data_file)
	cursor = conn.execute("select id, total_attendence from %s"%(table_name))
	dictionary = {}
	for row in cursor:
		dictionary[row[0]]=row[1]
	for (i,x) in enumerate(data['students']):
		if x['id'] in dictionary:
			data['students'][i]['attendence']=dictionary[x['id']]
			del(dictionary[x['id']])
	for x in dictionary:
		temp={}
		temp['id']=x
		temp['attendence']=dictionary[x]
		data['students'].append(temp)

	with open(json_file, 'w') as outfile:
		json.dump(data, outfile)

	conn.close()
Example #2
0
## FONTE: https://github.com/ageitgey/face_recognition/blob/master/examples/facerec_from_webcam_faster.py
## virtual env: faceRecVideo

import face_recognition
import cv2


# inicializando captura de imagem usando a webcam
video_capture = cv2.VideoCapture(0)

# abrindo imagens para teste
anderson_image = face_recognition.load_image_file("/home/anderson/Imagens/photo_0.jpg")
morpheus_image = face_recognition.load_image_file("/home/anderson/Imagens/morpheus.jpg")

# computando as metricas das faces
anderson_face_encoding = face_recognition.face_encodings(anderson_image)[0]
morpheus_face_encoding = face_recognition.face_encodings(morpheus_image)[0]

# arrays dos encodings e dos respectivos nomes
known_face_encodings = [
    anderson_face_encoding,
    morpheus_face_encoding
]
known_face_names = [
    "Mr. Anderson",
    "Morpheus"
]

# variaveis auxiliares
face_locations = []
face_encodings = []
 def __init__(self, name):
     self.cap = cv2.VideoCapture(name)
     self.q = queue.Queue()
     t = threading.Thread(target=self._reader)
     t.daemon = True
     t.start()
c = conn.cursor()

def assure_path_exists(path):
	dir = os.path.dirname(path)
	if not os.path.exists(dir):
		os.makedirs(dir)


face_id = str(val1)
name = str(val2)

c.execute("INSERT INTO students(UID,student_name,attendance) VALUES(?,?,?)",(face_id,name,'Absent'))
conn.commit()

#start capturing Video
vid_cam = cv2.VideoCapture(0)

#detect object in video stream using Haarcascade Frontal Face
face_detector = cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml')

#initaializing count variabe to count scanned images
count  = 0
assure_path_exists("dataset/")

#start scanning loop
while(True):
	#capture video frame
	_,image_frame = vid_cam.read()
	cv2.imshow('frame',image_frame)

	#convert frame to grayscale
# 视频分解为图片
import numpy as np
import cv2
file = r'E:\testimage\body\Scenery.flv'
cap = cv2.VideoCapture(file)
isOpend = cap.isOpened # 判断是否打开
print(isOpend)
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(fps,width,height)
i = 0
while(isOpend):
    i = i + 1
    (flag,frame) = cap.read()  #读取每一张图片 flag:是否读取成功  frame:图片的内容
    frameInfo = frame.shape
    height2 = frameInfo[0]
    width2 = frameInfo[1]
    matScale = np.float32([[0.5,0,0],[0,0.5,0]])
    dst = cv2.warpAffine(frame,matScale,(int(width2/2),int(height2/2)))
    dst2 = dst[0:144,0:176]
    fileName = 'image' +str(i)+'.jpg'
    print(fileName)
    if flag == True:
        cv2.imwrite(fileName,dst2,[cv2.IMWRITE_JPEG_QUALITY,100])
    else:
        break
print('end!')
Example #6
0
def benchmark(
    model_path,
    video_path,
    tf_config=None,
    resize=None,
    pixels=None,
    cropping=None,
    dynamic=(False, 0.5, 10),
    n_frames=1000,
    print_rate=False,
    display=False,
    pcutoff=0.0,
    display_radius=3,
    cmap="bmy",
    save_poses=False,
    save_video=False,
    output=None,
) -> typing.Tuple[np.ndarray, tuple, bool, dict]:
    """ Analyze DeepLabCut-live exported model on a video:
    Calculate inference time,
    display keypoints, or
    get poses/create a labeled video

    Parameters
    ----------
    model_path : str
        path to exported DeepLabCut model
    video_path : str
        path to video file
    tf_config : :class:`tensorflow.ConfigProto`
        tensorflow session configuration
    resize : int, optional
        resize factor. Can only use one of resize or pixels. If both are provided, will use pixels. by default None
    pixels : int, optional
        downsize image to this number of pixels, maintaining aspect ratio. Can only use one of resize or pixels. If both are provided, will use pixels. by default None
    cropping : list of int
        cropping parameters in pixel number: [x1, x2, y1, y2]
    dynamic: triple containing (state, detectiontreshold, margin)
        If the state is true, then dynamic cropping will be performed. That means that if an object is detected (i.e. any body part > detectiontreshold),
        then object boundaries are computed according to the smallest/largest x position and smallest/largest y position of all body parts. This  window is
        expanded by the margin and from then on only the posture within this crop is analyzed (until the object is lost, i.e. <detectiontreshold). The
        current position is utilized for updating the crop window for the next frame (this is why the margin is important and should be set large
        enough given the movement of the animal)
    n_frames : int, optional
        number of frames to run inference on, by default 1000
    print_rate : bool, optional
        flat to print inference rate frame by frame, by default False
    display : bool, optional
        flag to display keypoints on images. Useful for checking the accuracy of exported models.
    pcutoff : float, optional
        likelihood threshold to display keypoints
    display_radius : int, optional
        size (radius in pixels) of keypoint to display
    cmap : str, optional
        a string indicating the :package:`colorcet` colormap, `options here <https://colorcet.holoviz.org/>`, by default "bmy"
    save_poses : bool, optional
        flag to save poses to an hdf5 file. If True, operates similar to :function:`DeepLabCut.benchmark_videos`, by default False
    save_video : bool, optional
        flag to save a labeled video. If True, operates similar to :function:`DeepLabCut.create_labeled_video`, by default False
    output : str, optional
        path to directory to save pose and/or video file. If not specified, will use the directory of video_path, by default None

    Returns
    -------
    :class:`numpy.ndarray`
        vector of inference times
    tuple
        (image width, image height)
    bool
        tensorflow inference flag
    dict
        metadata for video

    Example
    -------
    Return a vector of inference times for 10000 frames:
    dlclive.benchmark('/my/exported/model', 'my_video.avi', n_frames=10000)

    Return a vector of inference times, resizing images to half the width and height for inference
    dlclive.benchmark('/my/exported/model', 'my_video.avi', n_frames=10000, resize=0.5)

    Display keypoints to check the accuracy of an exported model
    dlclive.benchmark('/my/exported/model', 'my_video.avi', display=True)

    Analyze a video (save poses to hdf5) and create a labeled video, similar to :function:`DeepLabCut.benchmark_videos` and :function:`create_labeled_video`
    dlclive.benchmark('/my/exported/model', 'my_video.avi', save_poses=True, save_video=True)
    """

    ### load video

    cap = cv2.VideoCapture(video_path)
    ret, frame = cap.read()
    n_frames = (n_frames if (n_frames > 0) and
                (n_frames < cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1) else
                (cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1))
    n_frames = int(n_frames)
    im_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
               cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    ### get resize factor

    if pixels is not None:
        resize = np.sqrt(pixels / (im_size[0] * im_size[1]))
    if resize is not None:
        im_size = (int(im_size[0] * resize), int(im_size[1] * resize))

    ### create video writer

    if save_video:
        colors = None
        out_dir = (output if output is not None else os.path.dirname(
            os.path.realpath(video_path)))
        out_vid_base = os.path.basename(video_path)
        out_vid_file = os.path.normpath(
            f"{out_dir}/{os.path.splitext(out_vid_base)[0]}_DLCLIVE_LABELED.avi"
        )
        fourcc = cv2.VideoWriter_fourcc(*"DIVX")
        fps = cap.get(cv2.CAP_PROP_FPS)
        vwriter = cv2.VideoWriter(out_vid_file, fourcc, fps, im_size)

    ### check for pandas installation if using save_poses flag

    if save_poses:
        try:
            import pandas as pd

            use_pandas = True
        except:
            use_pandas = False
            warnings.warn(
                "Could not find installation of pandas; saving poses as a numpy array with the dimensions (n_frames, n_keypoints, [x, y, likelihood])."
            )

    ### initialize DLCLive and perform inference

    inf_times = np.zeros(n_frames)
    poses = []

    live = DLCLive(
        model_path,
        tf_config=tf_config,
        resize=resize,
        cropping=cropping,
        dynamic=dynamic,
        display=display,
        pcutoff=pcutoff,
        display_radius=display_radius,
        display_cmap=cmap,
    )

    poses.append(live.init_inference(frame))
    TFGPUinference = True if len(live.outputs) == 1 else False

    iterator = range(n_frames) if (print_rate) or (display) else tqdm(
        range(n_frames))
    for i in iterator:

        ret, frame = cap.read()

        if not ret:
            warnings.warn(
                "Did not complete {:d} frames. There probably were not enough frames in the video {}."
                .format(n_frames, video_path))
            break

        start_pose = time.time()
        poses.append(live.get_pose(frame))
        inf_times[i] = time.time() - start_pose

        if save_video:

            if colors is None:
                all_colors = getattr(cc, cmap)
                colors = [
                    ImageColor.getcolor(c, "RGB")[::-1] for c in
                    all_colors[::int(len(all_colors) / poses[-1].shape[0])]
                ]

            this_pose = poses[-1]
            for j in range(this_pose.shape[0]):
                if this_pose[j, 2] > pcutoff:
                    x = int(this_pose[j, 0])
                    y = int(this_pose[j, 1])
                    frame = cv2.circle(frame, (x, y),
                                       display_radius,
                                       colors[j],
                                       thickness=-1)

            if resize is not None:
                frame = cv2.resize(frame, im_size)
            vwriter.write(frame)

        if print_rate:
            print("pose rate = {:d}".format(int(1 / inf_times[i])))

    if print_rate:
        print("mean pose rate = {:d}".format(int(np.mean(1 / inf_times))))

    ### gather video and test parameterization

    # dont want to fail here so gracefully failing on exception --
    # eg. some packages of cv2 don't have CAP_PROP_CODEC_PIXEL_FORMAT
    try:
        fourcc = decode_fourcc(cap.get(cv2.CAP_PROP_FOURCC))
    except:
        fourcc = ""

    try:
        fps = round(cap.get(cv2.CAP_PROP_FPS))
    except:
        fps = None

    try:
        pix_fmt = decode_fourcc(cap.get(cv2.CAP_PROP_CODEC_PIXEL_FORMAT))
    except:
        pix_fmt = ""

    try:
        frame_count = round(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    except:
        frame_count = None

    try:
        orig_im_size = (
            round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
        )
    except:
        orig_im_size = None

    meta = {
        "video_path": video_path,
        "video_codec": fourcc,
        "video_pixel_format": pix_fmt,
        "video_fps": fps,
        "video_total_frames": frame_count,
        "original_frame_size": orig_im_size,
        "dlclive_params": live.parameterization,
    }

    ### close video and tensorflow session

    cap.release()
    live.close()

    if save_video:
        vwriter.release()

    if save_poses:

        cfg_path = os.path.normpath(f"{model_path}/pose_cfg.yaml")
        ruamel_file = ruamel.yaml.YAML()
        dlc_cfg = ruamel_file.load(open(cfg_path, "r"))
        bodyparts = dlc_cfg["all_joints_names"]
        poses = np.array(poses)

        if use_pandas:

            poses = poses.reshape(
                (poses.shape[0], poses.shape[1] * poses.shape[2]))
            pdindex = pd.MultiIndex.from_product(
                [bodyparts, ["x", "y", "likelihood"]],
                names=["bodyparts", "coords"])
            pose_df = pd.DataFrame(poses, columns=pdindex)

            out_dir = (output if output is not None else os.path.dirname(
                os.path.realpath(video_path)))
            out_vid_base = os.path.basename(video_path)
            out_dlc_file = os.path.normpath(
                f"{out_dir}/{os.path.splitext(out_vid_base)[0]}_DLCLIVE_POSES.h5"
            )
            pose_df.to_hdf(out_dlc_file, key="df_with_missing", mode="w")

        else:

            out_vid_base = os.path.basename(video_path)
            out_dlc_file = os.path.normpath(
                f"{out_dir}/{os.path.splitext(out_vid_base)[0]}_DLCLIVE_POSES.npy"
            )
            np.save(out_dlc_file, poses)

    return inf_times, im_size, TFGPUinference, meta
Example #7
0
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from tkinter import *
import cv2 as cv
from PIL import Image, ImageTk, ImageGrab,ImageEnhance
import threading
from urllib import request
import ssl
from bs4 import BeautifulSoup
import tkinter.font as tkFont
from tkinter.filedialog import askdirectory
from tkinter import simpledialog
import time
# 摄像机设置
# 0是代表摄像头编号,只有一个的话默认为0
capture = cv.VideoCapture(0)

def closecamera():
    capture.release()
def button1():
    global headimage#大头照
    global halfimage#半身照
    name, idnum = getnameandID()
    if idnum=="IDCardNo'].value='":
        idnum = simpledialog.askstring('手动输入身份证', '无法读取,请输入', initialvalue="请输入身份证号码")
    ref, frame = capture.read()
    cvimage = cv.cvtColor(frame, cv.COLOR_BGR2RGBA)
    face_cascade = cv.CascadeClassifier('./config.xml')
    faces = face_cascade.detectMultiScale(cvimage, 1.15,3) #人脸检测
    try:
        x=faces[0][0]
import cv2

vidcap = cv2.VideoCapture('BadApple.mp4')
success,image = vidcap.read()
count = 0

AsciVid = []

while success:
    image = cv2.resize(image,(150,50))
    chars = []
    for i in image:
        for j in i:
            if(sum(j)<191):
                chars.append(' ')
            elif (sum(j)<382):
                chars.append('.')
            elif (sum(j)<573):
                chars.append('*')            
            else:
                chars.append('@')
        chars.append('\n')

    AsciVid.append(''.join(chars))
    f = open("./Frames/Frame%d.txt"%(count), "w")
    f.write(''.join(chars))
    f.close()
    success,image = vidcap.read()
    count += 1
    print("converting..........")
Example #9
0
    def __init__(self):
        self._windowManager = WindowManager('Cameo', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0),
                                              self._windowManager, True)

        self._curveFilter = filters.BGRPortraCurveFilter()
Example #10
0
    def _extract(self):
        ntrials = len(self.bpod_trials)

        cam_times = []
        n_frames = 0
        n_out_of_sync = 0
        for ind in np.arange(ntrials):
            # get upgoing and downgoing fronts
            pin = np.array(self.bpod_trials[ind]['behavior_data']
                           ['Events timestamps'].get('Port1In'))
            pout = np.array(self.bpod_trials[ind]['behavior_data']
                            ['Events timestamps'].get('Port1Out'))
            # some trials at startup may not have the camera working, discard
            if np.all(pin) is None:
                continue
            # if the trial starts in the middle of a square, discard the first downgoing front
            if pout[0] < pin[0]:
                pout = pout[1:]
            # same if the last sample is during an upgoing front, always
            # put size as it happens last
            pin = pin[:pout.size]
            frate = np.median(np.diff(pin))
            if ind > 0:
                """
                assert that the pulses have the same length and that we don't miss frames during
                the trial, the refresh rate of bpod is 100us
                """
                test1 = np.all(np.abs(1 - (pin - pout) / np.median(pin - pout)) < 0.1)
                test2 = np.all(np.abs(np.diff(pin) - frate) <= 0.00011)
                if not all([test1, test2]):
                    n_out_of_sync += 1
            # grow a list of cam times for ech trial
            cam_times.append(pin)
            n_frames += pin.size

        if n_out_of_sync > 0:
            _logger.warning(f"{n_out_of_sync} trials with bpod camera frame times not within"
                            f" 10% of the expected sampling rate")

        t_first_frame = np.array([c[0] for c in cam_times])
        t_last_frame = np.array([c[-1] for c in cam_times])
        frate = 1 / np.nanmedian(np.array([np.median(np.diff(c)) for c in cam_times]))
        intertrial_duration = t_first_frame[1:] - t_last_frame[:-1]
        intertrial_missed_frames = np.int32(np.round(intertrial_duration * frate)) - 1

        # initialize the full times array
        frame_times = np.zeros(n_frames + int(np.sum(intertrial_missed_frames)))
        ii = 0
        for trial, cam_time in enumerate(cam_times):
            if cam_time is not None:
                # populate first the recovered times within the trials
                frame_times[ii: ii + cam_time.size] = cam_time
                ii += cam_time.size
            if trial == (len(cam_times) - 1):
                break
            # then extrapolate in-between
            nmiss = intertrial_missed_frames[trial]
            frame_times[ii: ii + nmiss] = (cam_time[-1] + intertrial_duration[trial] /
                                           (nmiss + 1) * (np.arange(nmiss) + 1))
            ii += nmiss
        # import matplotlib.pyplot as plt
        # plt.plot(np.diff(frame_times))
        """
        if we find a video file, get the number of frames and extrapolate the times
         using the median frame rate as the video stops after the bpod
        """
        video_file = list(self.session_path.joinpath(
            'raw_video_data').glob('_iblrig_leftCamera*.mp4'))
        if video_file:
            cap = cv2.VideoCapture(str(video_file[0]))
            nframes = cap.get(cv2.CAP_PROP_FRAME_COUNT)
            if nframes > len(frame_times):
                to_app = (np.arange(int(nframes - frame_times.size),) + 1
                          ) / frate + frame_times[-1]
                frame_times = np.r_[frame_times, to_app]
        assert(np.all(np.diff(frame_times) > 0))  # negative diffs implies a big problem
        return frame_times
def main():
    num_frames = 80
    vgg_model = '/DB/rhome/yhu/tensorflow/video_to_sequence/VGG_ILSVRC_19_layers.caffemodel'
    vgg_deploy = '/DB/rhome/yhu/tensorflow/video_to_sequence/VGG_ILSVRC_19_layers_deploy.prototxt'
    video_path = '/DATA2/data/yhu/YouTubeClips'
    video_save_path = '/DATA2/data/yhu/attn_YouTubeFeats'
    attn_video_path = '/DATA2/data/yhu/attn_video_YouTubeFeats'
    #video_path = '/DATA2/data/yhu/hollywood/videoclips'
    #video_save_path = '/DATA2/data/yhu/hollywood/videoFeats'
    videos = os.listdir(video_path)
    video_generated = os.listdir(video_save_path)
    videos = filter(lambda x: x.endswith('avi'), videos)

    cnn = CNN(model=vgg_model, deploy=vgg_deploy, width=224, height=224)
    #cnn = CNN(width=224, height=224)

    for video in videos:
        print video
        #if video == "yREFkmrrYiw_51_57.avi":
            #if video+'.npy' in video_generated:
            #    pass
            #else:
        if os.path.exists( os.path.join(video_save_path, video+'.npy') ):
            print "Already processed ... "
            continue

        video_fullpath = os.path.join(video_path, video)
        try:
            cap  = cv2.VideoCapture( video_fullpath )
            print "video_fullpath: ",video_fullpath
        except:
            pass

        frame_count = 0
        frame_list = []

        while True:
            ret, frame = cap.read()
            #print ret,frame
            if ret is False:
                break

            frame_list.append(frame)
            frame_count += 1

        count=0
        lack = 80 - frame_count
        #frame_list = np.array(frame_list)
        print "frame_count: ", frame_count

        if frame_count > 80:
            frame_list = np.array(frame_list)
            frame_indices = np.linspace(0, frame_count, num=num_frames, endpoint=False).astype(int)
            frame_list = frame_list[frame_indices]
        elif frame_count <80:
            frame = frame_list[-1]
            while count<lack:
                frame_list.append(frame)
                count += 1
                #print count
            frame_list = np.array(frame_list)
        else:
            frame_list = np.array(frame_list)

        print frame_list.shape

        #ipdb.set_trace()
        
        cropped_frame_list = np.array(map(lambda x: preprocess_frame(x), frame_list))
        all_attn_frames = cnn.get_attention(cropped_frame_list)
        #cropped_frame_list = cnn.get_attention(cropped_frame_list)

        attns = all_attn_frames.max(axis=1)
        for x in range(0,all_attn_frames.shape[0]):
            attns[x] = (attns[x] - attns[x].min()) / attns[x].max()
        for x in range(0,cropped_frame_list.shape[-1]):
            cropped_frame_list[:,:,:,x] = np.multiply(cropped_frame_list[:,:,:,x], attns)

        #ipdb.set_trace()
        #attn_save_path = os.path.join(attn_video_path, video + '.npy')
        #np.save(attn_save_path, cropped_frame_list)
        #np.save(attn_save_path, attns)
        
        feats = cnn.get_features(cropped_frame_list)
        #print feats

        save_full_path = os.path.join(video_save_path, video + '.npy')
        np.save(save_full_path, feats)
Example #12
0
# if a video path was not supplied, grab the reference
# to the gray
if not args.get("video", False):
	# initialize the camera and grab a reference to the raw camera capture
	camera = PiCamera()
	camera.resolution = (frameWidth, frameHeight)
	camera.framerate = 5
	rawCapture = PiRGBArray(camera, size=(frameWidth, frameHeight))
	# allow the camera to warmup
	time.sleep(0.1)


# otherwise, load the video
else:
	camera = cv2.VideoCapture(args["video"])


## Function that removes all unwanted colors defined in the boundaries list from image
def removeUnwantedColor(originalImage,updateImage):
	"""Returns the original image without the unwanted colors"""
	
	# Define list of colors boundaries	
	boundaries = [	# Define all unwanted colors here
			([0,60,190],[190,190,255]),			# Red
			([170,150,0],[255,255,190]), 		# Green
			# ([85,170,210],[225,235,235]), 	# Yellow
			
			# Color White needs to be define here
			([245,245,245],[255,255,255])]	# White
Example #13
0
orig_mask = imgMustache[:,:,3]
 
# Create the inverted mask for the filter
orig_mask_inv = cv2.bitwise_not(orig_mask)
 
# Convert filter image to BGR
# and save the original image size (used later when re-sizing the image)
imgMustache = imgMustache[:,:,0:3]
origMustacheHeight, origMustacheWidth = imgMustache.shape[:2]
 
#-----------------------------------------------------------------------------
#       Main program loop
#-----------------------------------------------------------------------------
 
# collect video input from first webcam on system
video_capture = cv2.VideoCapture('/home/pi/find_our_faces.mp4')
iter = 0
 
while True:
    iter = iter + 1
    # Capture video feed
    #video_capture = cv2.VideoCapture(video_capture) 
    ret, frame = video_capture.read()
    # print imgMustache.shape
    # Create greyscale image from the video feed
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
 
    # Detect faces in input video stream
    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.1,
Example #14
0
from io import BytesIO
from PIL import Image, ImageDraw
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, SnapshotObjectType, OperationStatusType
from msrest.authentication import CognitiveServicesCredentials

# Configs
attri = ['emotion', 'age']
# KEY = '06bfe4c9841a4acfb7926f707c18bc91'
# ENDPOINT = 'https://centralus.api.cognitive.microsoft.com'
KEY = '19844e61112344d597448b416a259cc4'
ENDPOINT = 'https://centralus.api.cognitive.microsoft.com/'

face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
webcam = cv2.VideoCapture(1)
time.sleep(1)
PERSON_GROUP_ID = 'myteamsfaces'
TARGET_PERSON_GROUP_ID = str(uuid.uuid4())
# Debug helpers


def getRectangle(faceDictionary):
    rect = faceDictionary.face_rectangle
    left = rect.left
    top = rect.top
    bottom = left + rect.height
    right = top + rect.width
    return ((left, top), (bottom, right))

def get_video_metadata(raw_video):

	# load COCO labels
	labels = open(os.path.dirname(os.path.realpath(__file__)) + '/yolov3-coco/coco-labels', 'r').read().strip().split('\n')

	# load DNN and layers using config and weights
	net = cv2.dnn.readNetFromDarknet(os.path.dirname(os.path.realpath(__file__)) + '/yolov3-coco/yolov3.cfg', os.path.dirname(os.path.realpath(__file__)) + '/yolov3-coco/yolov3.weights')

	layers = net.getLayerNames()
	layers = [layers[uol[0] - 1] for uol in net.getUnconnectedOutLayers()]

	try:
		begin = time.time()

		# try to read video file
		capture = cv2.VideoCapture(raw_video.file.path)
		data = {'process': {}, 'metadata': {}, 'timestamps': [], 'objects': {}}
	
		# get simple video metadata
		frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
		fps = int(capture.get(cv2.CAP_PROP_FPS))
		duration = frames / fps

		data['metadata']['frames'] = frames
		data['metadata']['fps'] = fps
		data['metadata']['duration'] = duration

		height = None
		width = None

		timestamp = 0

		# loop over intervals of time
		while timestamp <= numpy.floor(duration):

			# set video position and read a frame
			capture.set(cv2.CAP_PROP_POS_FRAMES, int(timestamp * fps))
			loaded, frame = capture.read()

			if not loaded:
				break

			if width is None or height is None:
				height = frame.shape[0]
				width = frame.shape[1]

				data['metadata']['resolution'] = {"height": height, "width": width}

			# pass data through DNN
			blob = cv2.dnn.blobFromImage(frame, 1 / 255, (416, 416), swapRB=True, crop=False)
			net.setInput(blob)
			outputs = net.forward(layers)
		
			confidences = []
			class_ids = []

			# process outputs
			for output in outputs:
				for detection in output:
					scores = detection[5:]
					class_id = numpy.argmax(scores)
					confidence = scores[class_id]

					# filter out weak detections
					if confidence > 0.5:
						confidences.append(float(confidence))
						class_ids.append(class_id)

			# update response
			objects = [labels[id] for id in class_ids]
			data['timestamps'].append({'objects': objects, 'confidences': confidences})

			for object in objects:
				if object not in data['objects']:
					data['objects'][object] = []

				if timestamp not in data['objects'][object]:
					data['objects'][object].append(timestamp)

			timestamp += 1

		capture.release()
		end = time.time()

		data['process']['success'] = True
		data['process']['duration'] = end - begin
		data['process']['threshold'] = 0.5

	except:
		data['process']['success'] = False

	finally:
		# output as dict
		return data
import cv2
import numpy as np

kamera=cv2.VideoCapture(0)

while True:
    ret,kare=kamera.read()      #kameranın çalışıp çalışmadığını kontrol eder.

    bolge=kare[0:200,0:200]

    cv2.imshow("Video",kare)        
    cv2.imshow("Bolge",bolge)

    if cv2.waitKey(25) & 0xFF ==('q'):
        break

kamera.release()
    

cv2.destroyAllWindows()
 def confirm_change_camera(self):
     path = self.rtsp_editline.text()
     self.cap = cv2.VideoCapture(path)
    def run(self):
        # capture from web cam
        cap = cv2.VideoCapture(0)
        sess = tf.Session()
        minsize = 25  # minimum size of face
        threshold = [0.6, 0.7, 0.7]  # three steps's threshold
        factor = 0.709  # scale factor
        face_detected = False
        self.timer_started = False
        self.save_faces = False
        save_face_with_timer = False
        timer_start = 0
        sec = 0
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
            while True:
                ret, cv_img = cap.read()
                if not ret:
                    break
                img = cv_img[:, :, 0:3]
                boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
                if boxes.shape[0] == 0:
                    face_detected = False
                else:
                    face_detected = True
                date_time = datetime.datetime.now()

                # timer part
                if self.timer_started:
                    timer_start = time.time()
                    self.timer_started = False
                if timer_start and not face_detected:
                    self.app.le.setText("FAIL")
                    timer_start = 0
                    self.timer_started = True
                if timer_start and (time.time() - timer_start) > sec:
                    self.app.le.setText("Wait for {} seconds...".format(5 - sec))
                    sec = sec + 1
                if timer_start and face_detected and (time.time() - timer_start) > 5.0:
                    timer_start = 0
                    save_face_with_timer = True

                for i in range(boxes.shape[0]):
                    pt1 = (int(boxes[i][0]), int(boxes[i][1]))
                    pt2 = (int(boxes[i][2]), int(boxes[i][3]))
                    crop_img = cv_img[pt1[1]:pt2[1], pt1[0]:pt2[0]]
                    clean_img = cv_img
                    if self.save_faces:
                        img_path = os.path.join(APP_FACES_DIR, 'face_{}_date_{}-{}-{}-{}-{}.png'.format(i + 1,
                                                                                                        date_time.month,
                                                                                                        date_time.day,
                                                                                                        date_time.hour,
                                                                                                        date_time.minute,
                                                                                                        date_time.second))
                        cv2.imwrite(img_path, crop_img)
                        if i + 1 == boxes.shape[0]:
                            self.save_faces = False
                    if save_face_with_timer and not i:
                        img_path = os.path.join(APP_TIMER_DIR, 'timer_face_date_{}-{}-{}-{}-{}.png'.format(date_time.month,
                                                                                                           date_time.day,
                                                                                                           date_time.hour,
                                                                                                           date_time.minute,
                                                                                                           date_time.second))
                        cv2.imwrite(img_path, clean_img)
                        self.app.le.setText("SUCCESS")
                        sec = 0
                    cv2.rectangle(cv_img, pt1, pt2, color=(255, 255, 0))
                if ret:
                    self.change_pixmap_signal.emit(cv_img)
                    if save_face_with_timer:
                        time.sleep(1)
                        save_face_with_timer = False
Example #19
0
import cv2
 
# Simplify control of parameters using a parameter interpreter
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
 
#we will use USB camera
if args.get("video", None) is None:
	vs = VideoStream(src=0).start()
	time.sleep(2.0)
 
#If you don’t find the camera ,check if there is video locally
else:
	vs = cv2.VideoCapture(args["video"])
# initialize
firstFrame = None

while True:
    #Set the first frame to comparsion frame
	frame = vs.read()
	frame = frame if args.get("video", None) is None else frame[1]
	text = "Unoccupied"
	if frame is None:
		break
 
	# Redefine the size of the frame, grayscale conversion
	frame = imutils.resize(frame, width=500)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	gray = cv2.GaussianBlur(gray, (21, 21), 0)
stereocalib_flags |= cv2.CALIB_FIX_INTRINSIC
# stereocalib_flags |= cv2.CALIB_USE_INTRINSIC_GUESS
# stereocalib_flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
stereocalib_flags |= cv2.CALIB_FIX_FOCAL_LENGTH
# stereocalib_flags |= cv2.CALIB_FIX_ASPECT_RATIO
# stereocalib_flags |= cv2.CALIB_SAME_FOCAL_LENGTH
stereocalib_flags |= cv2.CALIB_ZERO_TANGENT_DIST
# stereocalib_flags |= cv2.CALIB_RATIONAL_MOD EL
# stereocalib_flags |= cv2.CALIB_FIX_K1
# stereocalib_flags |= cv2.CALIB_FIX_K2
# stereocalib_flags |= cv2.CALIB_FIX_K3
# stereocalib_flags |= cv2.CALIB_FIX_K4
# stereocalib_flags |= cv2.CALIB_FIX_K5
# stereocalib_flags |= cv2.CALIB_FIX_K6

left = cv2.VideoCapture(4)
right = cv2.VideoCapture(2)


def nothing(x):
    pass


print("Ищем доску")
while len(r_imgs) + len(l_imgs) < len(photos_lst):
    r_img_name = str(img_num) + "_veb_r.png"
    l_img_name = str(img_num) + "_veb_l.png"

    img_r = cv2.imread(os.path.join(imgs_folder, r_img_name))
    img_l = cv2.imread(os.path.join(imgs_folder, l_img_name))
    r_imgs.append(img_r)
Example #21
0
import cv2
import numpy as np
import matplotlib.pyplot as plt


img= cv2.VideoCapture(0)
while True:
    ret, image = img.read()
    # BGR ' yi HSV ye çevirdik
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    # HSV nin içindeki renk aralıgını belirledik
    #lower_blue = np.array([160,0,0])
    #upper_blue = np.array([180,255,255])
    boundaries=[([20,0,0],[30,255,255])]
    # Yukarıda belırledıgımız eşik değerlerini gray goruntunun içinde eşleştirdik.
    for (lower,upper) in boundaries:
        lower=np.array(lower,dtype="uint8")
        upper=np.array(upper,dtype="uint8")
        mask = cv2.inRange(hsv, lower, upper)
        # bitwise and operatörü ile de ana goruntude yukarıda buldugumuz mask'i aldık.
        res = cv2.bitwise_and(image,image, mask= mask)
        #ayarladıgımız 3 görüntüyü gösterdik
    res=cv2.cvtColor(res,cv2.COLOR_BGR2GRAY) #maskelenen görüntüyü gray uzayına çeviriyoruz.
    blurred=cv2.GaussianBlur(res,(7,7),0) #Gürültüyü azaltmak için görüntüyü blurluyoruz
    ret,th1=cv2.threshold(res,40,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) #Gri tonlamalı görüntüden ikili görüntü oluşturduk.
    ret1,th2=cv2.threshold(th1,127,255,cv2.THRESH_TOZERO) 
    _,contours,hierarchy=cv2.findContours(th2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #Görüntüde kontuar oluşturduk

    
    if len(contours)>0:
        centroid=max(contours,key=cv2.contourArea) #Kontuar alanı belirleniyor.
Example #22
0
            (left, right, top, bottom) = (int(xmin * 500), int(xmax * 500), int(ymin * 500), int(ymax * 500))
            crop_img = image[top:bottom, left:right]
            crop_img = cv2.cvtColor(crop_img,cv2.COLOR_BGR2RGB)
        else:
            crop_img = None

    return crop_img

image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')

# Open video file
video = cv2.VideoCapture(PATH_TO_VIDEO)

HOST='192.168.0.10'

c = socket(AF_INET, SOCK_STREAM)
c.connect((HOST,3000))
print('ok')

i=1
k=0
l =0
while(video.isOpened()):
    ret, frame = video.read()
    frame = Rotate(frame, 270)
    frame = cv2.resize(frame, dsize=(500,500), interpolation = cv2.INTER_AREA)
    frame_expanded = np.expand_dims(frame, axis=0)
Example #23
0
# Importación de librerías
import numpy as np
import cv2

SumUmbral = 0

# Capturamos el vídeo
cap = cv2.VideoCapture("./FravAttack/NIK/NIR/USER/USUARIO_000_NIR.avi")

# Llamada al método
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG(history=200, nmixtures=5, backgroundRatio=0.7, noiseSigma=0)

# Deshabilitamos OpenCL, si no hacemos esto no funciona
cv2.ocl.setUseOpenCL(False)

while (True):
    # Leemos el siguiente frame
    ret, frame = cap.read()

    # Si hemos llegado al final del vídeo salimos
    if not ret:
        break

    # Aplicamos el algoritmo
    fgmask = fgbg.apply(frame)

    # Copiamos el umbral para detectar los contornos
    contornosimg = fgmask.copy()

    # Buscamos contorno en la imagen
    contornos, hierarchy = cv2.findContours(contornosimg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
faceProto="opencv_face_detector.pbtxt"
faceModel="opencv_face_detector_uint8.pb"
ageProto="age_deploy.prototxt"
ageModel="age_net.caffemodel"
genderProto="gender_deploy.prototxt"
genderModel="gender_net.caffemodel"

MODEL_MEAN_VALUES=(78.4263377603, 87.7689143744, 114.895847746)
ageList=['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
genderList=['Male','Female']

faceNet=cv2.dnn.readNet(faceModel,faceProto)
ageNet=cv2.dnn.readNet(ageModel,ageProto)
genderNet=cv2.dnn.readNet(genderModel,genderProto)

video=cv2.VideoCapture(args.image if args.image else 0)
padding=20
while cv2.waitKey(1)<0:
    hasFrame,frame=video.read()
    if not hasFrame:
        cv2.waitKey()
        break

    resultImg,faceBoxes=highlightFace(faceNet,frame)
    if not faceBoxes:
        print("No face detected")

    for faceBox in faceBoxes:
        face=frame[max(0,faceBox[1]-padding):
                   min(faceBox[3]+padding,frame.shape[0]-1),max(0,faceBox[0]-padding)
                   :min(faceBox[2]+padding, frame.shape[1]-1)]
Example #25
0
import numpy as np
import cv2

import time

fire_cascade = cv2.CascadeClassifier('fire_detection.xml')
#fire_detection.xml file & this code should be in the same folder while running the code

cap = cv2.VideoCapture(0)
while 1:
    #ser1.write('0')
    ret, img = cap.read()
    #cv2.imshow('imgorignal',img)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    fire = fire_cascade.detectMultiScale(img, 1.2, 5)
    for (x, y, w, h) in fire:
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
        roi_gray = gray[y:y + h, x:x + w]
        roi_color = img[y:y + h, x:x + w]
        print('Fire is detected..!')

        time.sleep(0.2)

    cv2.imshow('img', img)

    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break

cap.release()
cv2.destroyAllWindows()
Example #26
0
import numpy as np
import cv2

video = "http://*****:*****@192.168.1.7:8081/"  # 此处@后的ipv4 地址需要改为app提供的地址
cap = cv2.VideoCapture(video)


while(True):
    # Capture frame-by-frame
    ret, frame = cap.read()

    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Display the resulting frame
    cv2.imshow('frame',frame)
    cv2.imshow('gray',gray)
    if cv2.waitKey(20) & 0xFF == ord('q'):
        break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
import cv2

vidcap=cv2.VideoCapture('dati/interferenza.mp4')

conteggio=0
successo,immagine=vidcap.read()
while successo:
	successo,imaggine=vidcap.read()
	cv2.imwrite("dati/fotogrammi/%d.jpg" % conteggio, imaggine)
	if cv2.waitKey(10)==27: break
	conteggio=conteggio+1
Example #28
0
    indices = cv2.dnn.NMSBoxes(boxes, confidences, CONFIDENCE_THRESHOLD,
                               NMS_THRESHOLD)
    for i in indices:
        i = i[0]
        box = boxes[i]
        left = box[0]
        top = box[1]
        width = box[2]
        height = box[3]
        drawPred(classIds[i], confidences[i], left, top, left + width,
                 top + height)


if (args.video):
    outputFile = "output/yolo_output.avi"
    cap = cv2.VideoCapture(args.video)
else:
    cap = cv2.VideoCapture(0)

vid_writer = cv2.VideoWriter(outputFile,
                             cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,
                             (round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                              round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))

while cv2.waitKey(1) < 0:
    hasFrame, frame = cap.read()
    if not hasFrame:
        print('DOne!!')
        print("Output file is stored as ", outputFile)
        cv2.waitKey(3000)
        break
Example #29
0
import cv2, time

video = cv2.VideoCapture(0)

check, frame = video.read()

print("==check==")
print(check)

print("==frame==")
print(frame)

time.sleep(5)

print("Releasing Video")
video.release()
cv2.imshow("MyFrame", frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #30
0
	def __init__(self, video):
		self.video = cv2.VideoCapture(video)
		self.frame = None
		self.faceDetector = FaceDetector()
		self.handTracker = HandTracker()