Exemplo n.º 1
0
    def Viewer(self):
        check_dst = Dst_Check()
        check_ipv4 = check_dst.Ip_Check(self.ipv4)
        if check_ipv4 == "Opened":
            port = "554"
            url = "rtsp://{0}:{1}@{2}:{3}".format(self.usr, self.pas,
                                                  self.ipv4, port)
            vcap = cv2.VideoCapture(url)
            if not vcap.isOpened():
                os.system("cls")
                print("[!] Wrong User&Password")

            else:

                while (True):
                    data, frame = vcap.read()
                    cv2.imshow("VIEWER", frame)
                    key = cv2.waitKey(1)
                    if key == 27:
                        sys.exit(1)
                cv2.release()
                cv2.destroyWindow("VIEWER")

        else:
            print("[!] Wrong IP Desination")
Exemplo n.º 2
0
 def testCut(cls):
     lImg = cv2.imread(cls._img)
     lPartImg = lImg[200:300,630:800]
     cv2.imshow('origin', lImg)
     cv2.imshow('cut', lPartImg)
     cv2.waitKey(0)
     cv2.release()
     cv2.destroyAllWindows()
Exemplo n.º 3
0
    def testRoiOption(cls):
        lImg = cv2.imread(cls._img)
        lEye = lImg[10:30, 10:30]
        lImg[40:40, 50:50] = lEye

        cv2.imshow("rio", lEye)
        cv2.waitKey(0)
        cv2.release()
        cv2.destroyAllWindows()
Exemplo n.º 4
0
def main():
	print 'Args:' , str(sys.argv)
	for x in range(len(sys.argv)):
		if(sys.argv[x] == '-c'):
			ncam = int(sys.argv[x+1])
	vs = VisionSystem(ncam)
	self.vidcap.release()
	cv2.release()
	cv2.destroyAllWindows()
def ConnectCamera():
	cap = cv2.VideoCapture(-1)
	while(True):
		ret, frame = cap.read()
		cv2.imshow('original',frame)
		frame = frame[220:720,100 :1100]
		# NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
		gray = ImageProcessing(frame)
		cv2.imshow('frame',gray)
		if cv2.waitKey(40) & 0xFF == ord('q'):
			break

	cv2.release()
	cv2.destroyWindow('frame')
Exemplo n.º 6
0
 def runVideo():
     lVHandle = cv2.VideoCapture(0)
     if lVHandle.isOpened():
         lVHandle.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
         lVHandle.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
     while True:
         lRet, lFrame = lVHandle.read()
         if lRet is False:
             break
         cv2.imshow('720p', lFrame)
         if cv2.waitKey(1) & 0xFF == ord('q'):
             break
     cv2.release()
     cv2.destroyAllWindows()
Exemplo n.º 7
0
 def testSplit(cls):
     lImg = cv2.imread(cls._img)
     lr = lImg[:, :, 0]
     # lg= lImg[:,:,1]
     # lb= lImg[:,:,2]
     lImg[:, :, 0] = 0
     lImg[:, :, 1] = 0
     # lImg[:,:,2] = 0
     cv2.imshow('r', lImg)
     # cv2.imshow('g',lg)
     # cv2.imshow('b',lb)
     cv2.waitKey(0)
     cv2.release()
     cv2.destroyAllWindows()
Exemplo n.º 8
0
    def testWebm(cls):
        lVideo = os.path.join(cls._tarDir, u"ShareMedia/video/Japanin8K.webm")
        lVideo1 = os.path.join(cls._tarDir, u"ShareMedia/video/test1.mp4")
        lVHandle = cv2.VideoCapture(lVideo,cv2.CAP_FFMPEG)
        while True:

            lRet ,lFrame = lVHandle.read()
            if lRet is False:
                break
            lScope = cv2.resize(lFrame,(1280,720))
            cv2.imshow('8k', lScope)

            if cv2.waitKey(33) > -1:
                break
        cv2.release()
        cv2.destroyAllWindows()
Exemplo n.º 9
0
def showcam():
    img_file = "lenna.png"

    while True:

        img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)
        thresh_np = np.zeros_like(img)  # 원본과 동일한 크기의 0으로 채워진 이미지
        for x in range(128, 193):
            thresh_np[img == x] = 255  # 127보다 큰 값만 255로 변경
        print(thresh_np)
        cv2.imshow('gray2', img)
        cv2.imshow('thr', thresh_np)
        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break
    cv2.release()
    cv2.destroyAllWindows()
Exemplo n.º 10
0
 def testVideoCut(cls):
     lVHandle = cv2.VideoCapture(cls._video, cv2.CAP_FFMPEG)
     while True:
         lRet, lFrame = lVHandle.read()
         if lRet is False:
             break
         lFrame = cv2.rotate(lFrame, cv2.ROTATE_90_CLOCKWISE)
         lDstFrame, lW, lH = MainRun.CutImage(lFrame, cls._xLeft,
                                              cls._xRight, cls._yTop,
                                              cls._yButton)
         lMinRate = 0.8
         lScope = cv2.resize(lDstFrame,
                             (int(lW * lMinRate), int(lH * lMinRate)))
         cv2.imshow('video', lScope)
         if cv2.waitKey(18) > -1:
             break
     cv2.release()
     cv2.destroyAllWindows()
     pass
Exemplo n.º 11
0
    def detect_face(self,img):
        face_cascade = cv2.CascadeClassifier(self.CASE_PATH)

        image = cv2.imread(img)

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(
            gray,
            scaleFactor=1.2,
            minNeighbors=10,
            minSize=(64, 64)
        )
        # placeholder for cropped faces
        face_imgs = np.empty((len(faces), self.face_size, self.face_size, 3))
        for i, face in enumerate(faces):
            face_img, cropped = self.crop_face(frame, face, margin=10, size=self.face_size)
            (x, y, w, h) = cropped
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 200, 0), 2)
            face_imgs[i, :, :, :] = face_img
        if len(face_imgs) > 0:
            features_faces = self.model.predict(face_imgs)
            # P = imagenet_utils.decode_predictions(features_faces)
            # for (i, (imagenetID, label, prob)) in enumerate(P[0]):
            #    print("{}. {}: {:.2f}%".format(i + 1, label, prob * 100))

            if features_faces[0][1] > 1:
               print("accuracy: 0.994569")
            else:
            	print("accuracy:",features_faces[0][1])
            predicted_names = [self.identify_face(features_face) for features_face in features_faces]
        # draw results
        for i, face in enumerate(faces):
            label = "{}".format(predicted_names[i])
            self.draw_label(frame, (face[0], face[1]), label)

        cv2.imshow('Keras Faces', frame)

        if cv2.waitKey(5) == 27:  # ESC key press
            break
        # When everything is done, release the capture
        cv2.release()
        cv2.destroyAllWindows()
Exemplo n.º 12
0
def videoSteam():
    frontCam = cv2.VideoCapture(0)  #camera for maneuvering the field
    digCam = cv2.VideoCapture(1)  #camera to tell how digging is working
    backCam = cv2.VideoCapture(2)  #camera for backing up and docking to hopper
    while True:
        ret = None
        frame = None
        if cam == 2:
            ret, frame = backCam.read()  #capture a frame from the back cam
        elif cam == 1:
            ret, frame = digCam.read()
        else:
            ret, frame = frontCam.read()  #default is the front
        cv2.imshow(
            'frame',
            frame)  #TODO send frame over socket and display on laptop instead
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break  #remove this if statement when displaying on laptop
    cv2.release()
    cv2.destroyAllWindows()
Exemplo n.º 13
0
def main():
    running = True
    while running:
        suc, frame = cap.read()
        if suc:
            frame = cv2.flip(frame, 1)
            results = hands.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
            hand_landmarks = results.multi_hand_landmarks
            if hand_landmarks and len(hand_landmarks) == 2:
                image_height, image_width, _ = frame.shape
                annotated_image = frame.copy()
                for hand_landmark in hand_landmarks:
                    controller.update(frame, hand_landmark)
            cv2.imshow('main',frame)
        key = cv2.waitKey(1)
        if key == ord('a'):
            break
        if key == 32:
            print( len(hand_landmarks))
    cv2.release()
    cv2.destroyAllWindows()
    def connect_Socket(self):
        IP = 'localhost'
        PORT = 5555

        try:
            self.con.connect((IP, PORT))
            print self.con.getsockname()

            while True:
                lenght = self.recvall(self.con, 16)
                if lenght == None:
                    break

                buf = self.recvall(self.con, int(lenght))
                data = np.fromstring(buf, dtype='uint8')
                decimg = cv2.imdecode(data, 1)

                cv2image = cv2.cvtColor(decimg, cv2.COLOR_BGR2RGBA)
                current_image = Image.fromarray(cv2image)
                current_image = current_image.resize([1000, 610],
                                                     Image.ANTIALIAS)
                imgtk = ImageTk.PhotoImage(image=current_image)

                self.panel.imgtk = imgtk
                self.panel.config(image=imgtk)
                self.panel.update()

                if (cv2.waitKey(30) & 0xFF == ord('q')):
                    self.con.send('Quit')
                    break
                else:
                    self.con.send('OK')

            cv2.release()
            self.con.close()
            cv2.destroyAllWindows()
        except:
            pass
Exemplo n.º 15
0
def main():
    global refPt, tempPosition
    
    args = parse_args()
    model_file, num_layers, IMAGE_SIZE = loadConfig(args.cfg)
    
    transform_image = False
    use_webcam = False
    gpus = ''
    use_crop = False
    min_confidence_threshold = 0.5
    

    if args.image_file:
        image_file = args.image_file   
    if args.save_transform_image:
        transform_image = args.save_transform_image
    if args.use_webcam:
        use_webcam = args.use_webcam
    if args.gpus:
        gpus = args.gpus
    if args.use_crop_mode:
        use_crop = args.use_crop_mode
    if args.min_confidence_threshold:
        min_confidence_threshold = np.float(args.min_confidence_threshold)
        
    model = eval('get_pose_net')(
        num_layers, is_train=False
    )
    
    if model_file:
        print('=> loading model from {}'.format(model_file))
        model.load_state_dict(torch.load(model_file))
        if len(gpus) != 0:
            GPUS = [int(i) for i in gpus.split(',')]
            model = torch.nn.DataParallel(model, device_ids=GPUS).cuda()
    else:
        print('Error')
        return
        
    if use_webcam == False:
        ## Load an image
        data_numpy = cv2.imread(image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
        if data_numpy is None:
            raise ValueError('Fail to read image {}'.format(image_file))
        print(data_numpy.shape)
        
        if use_crop == True:
            cv2.namedWindow("image")
            cv2.setMouseCallback("image", click_and_crop)
            
            while True:
                key = cv2.waitKey(1) & 0xFF
             
                if len(refPt) == 2:
                    temp = data_numpy.copy()
                    cv2.rectangle(temp, refPt[0], refPt[1], (0, 255, 0), 2)
                    cv2.imshow("image", temp)
                    cv2.waitKey(1) & 0xFF
                    break
                elif len(refPt) == 1:
                    temp = data_numpy.copy()
                    cv2.rectangle(temp, refPt[0], tempPosition, (0, 255, 0), 2)
                    cv2.imshow("image", temp)
                else:
                    cv2.imshow("image", data_numpy)
                    
            data_numpy = data_numpy[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
            
        input = cv2.resize(data_numpy, (IMAGE_SIZE[0], IMAGE_SIZE[1]))

        # vis transformed image
        if transform_image == True:
            copyInput = input.copy()
            cv2.rectangle(copyInput, (np.int(IMAGE_SIZE[0]/2 + IMAGE_SIZE[0]/4), np.int(IMAGE_SIZE[1]/2 + IMAGE_SIZE[1]/4)), 
                                     (np.int(IMAGE_SIZE[0]/2 - IMAGE_SIZE[0]/4), np.int(IMAGE_SIZE[1]/2 - IMAGE_SIZE[1]/4)), (255,0,0), 2)
            cv2.imwrite('transformed.jpg', copyInput)

        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225]),
            ])
        input = transform(input).unsqueeze(0)
        
        # switch to evaluate mode
        model.eval()
        with torch.no_grad():
            # compute output heatmap
            output = model(input)
            coords, maxvals = get_max_preds(output.clone().cpu().numpy())
            print(maxvals)
            cv2.waitKey(1000) & 0xFF
            image = data_numpy.copy()
            for i in range(coords[0].shape[0]):
                mat = coords[0,i]
                x, y = int(mat[0]), int(mat[1])
                if maxvals[0, i] >= min_confidence_threshold:
                    cv2.circle(image, (np.int(x*data_numpy.shape[1]/output.shape[3]), 
                          np.int(y*data_numpy.shape[0]/output.shape[2])), 2, (0, 0, 255), 2)
                   
            cv2.imwrite('result.jpg', image)
            cv2.imshow('result.jpg', image)
            cv2.waitKey(2000) & 0xFF
        
        print('Success')
    else:
        sample = cv2.imread('sample.png', -1)
        alpha_s = sample[:, :, 3] / 255.0
        alpha_l = 1.0 - alpha_s
        cap = cv2.VideoCapture(0)
        while(True):
            ret, data_numpy = cap.read()
            if not ret: break
                            
            input = cv2.resize(data_numpy, (IMAGE_SIZE[0], IMAGE_SIZE[1]))

            # vis transformed image
            if transform_image == True:
                copyInput = input.copy()
                cv2.rectangle(copyInput, (np.int(IMAGE_SIZE[0]/2 + IMAGE_SIZE[0]/4), np.int(IMAGE_SIZE[1]/2 + IMAGE_SIZE[1]/4)), 
                                         (np.int(IMAGE_SIZE[0]/2 - IMAGE_SIZE[0]/4), np.int(IMAGE_SIZE[1]/2 - IMAGE_SIZE[1]/4)), (255,0,0), 2)
                cv2.imwrite('transformed.jpg', copyInput)

            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225]),
                ])
            input = transform(input).unsqueeze(0)
            
            # switch to evaluate mode
            model.eval()
            with torch.no_grad():
                # compute output heatmap
                output = model(input)
                coords, maxvals = get_max_preds(output.clone().cpu().numpy())
                image = data_numpy.copy()
                badPoints = 0
                for i in range(coords[0].shape[0]):
                    mat = coords[0,i]
                    x, y = int(mat[0]), int(mat[1])
                    if maxvals[0, i] >= min_confidence_threshold:
                        cv2.circle(image, (np.int(x*data_numpy.shape[1]/output.shape[3]), 
                              np.int(y*data_numpy.shape[0]/output.shape[2])), 2, (0, 0, 255), 2)
                    if maxvals[0, i] <= 0.4:
                        badPoints += 1
                if badPoints >= coords[0].shape[0]/3:
                    cv2.rectangle(image, (np.int(data_numpy.shape[1]/2 + data_numpy.shape[1]/4), np.int(data_numpy.shape[0]/2 + data_numpy.shape[0]/4)), 
                                         (np.int(data_numpy.shape[1]/2 - data_numpy.shape[1]/4), np.int(data_numpy.shape[0]/2 - data_numpy.shape[0]/4)), (255,0,0), 2)
                    for c in range(0, 3):
                        image[10:10+sample.shape[0], 10:10+sample.shape[1], c] = (alpha_s * sample[:, :, c] +
                                  alpha_l * image[10:10+sample.shape[0], 10:10+sample.shape[1], c])
                cv2.imshow('result', image)
            
            cv2.waitKey(10)
            #if cv2.waitKey(1) & 0xFF == ord('q'): break

        cv2.release()
        cv2.destroyAllWindows()
Exemplo n.º 16
0
 def test(cls):
     lImg = cv2.imread(cls._img)
     cv2.imshow('test', lImg)
     cv2.waitKey(0)
     cv2.release()
     cv2.destroyAllWindows()
Exemplo n.º 17
0
#image
'''
import cv2
face_cascade=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
img=cv2.imread('test.jpg')

gray =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=face_cascade.detectMultiScale(gray,1.1,4)
for(x,y,w,h) in faces:
    cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),3)

cv2.imshow('img',img)
cv2.waitKey()
'''
#video
'''
import cv2
face_cascade=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap=cv2.VideoCapture('testv.mp4')
while cap.isOpened():
    _,img=cap.read()
    gray =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    faces=face_cascade.detectMultiScale(gray,1.1,4)
    for(x,y,w,h) in faces:
        cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),3)
    cv2.imshow('banu',img)
    if cv2.waitKey(1) & 0xFF==ord('q'):
        break
cv2.release()
cv2.destroyAllWindows()
'''
Exemplo n.º 18
0
def cleanup():
 """
 Prepare for shutting down.
 """
 cv2.release()
 cv2.destroyAllWindows()
def Destructor():
	cv2.release()
	cv2.destroyAllWindows()
Exemplo n.º 20
0
def Capture_Webcam_Image():
    cam = cv2.VideoCapture(CAM_Index)
    cam.set(CV_CAP_PROP_FRAME_WIDTH, Frame_Width_Resolution)
    cam.set(CV_CAP_PROP_FRAME_HEIGHT, Frame_Height_Resolution)

    #IF you want to train on video instead of webcam feed.
    #    cam = cv2.VideoCapture('GirlsLikeYou.mp4')
    #    cam.set(CV_CAP_PROP_FRAME_WIDTH,Frame_Width_Resolution)
    #    cam.set(CV_CAP_PROP_FRAME_HEIGHT,Frame_Height_Resolution)

    image_counter = 0

    capture_time = time.time() + timer_delay_capture
    while True:
        ret, original_frame = cam.read()
        #remove this line
        original_frame = cv2.resize(
            original_frame, (Frame_Width_Resolution, Frame_Height_Resolution),
            interpolation=cv2.INTER_LINEAR)

        #        cv2.imshow('frame',original_frame)
        frame = original_frame[:, :, ::-1]
        cv2.imshow("webcam image", original_frame)
        if not ret:
            cv2.release()
            cv2.destroyAllWindows()
            break
        key = cv2.waitKey(1)

        #when escape is pressed it will exit the training.
        if key % 256 == 27:
            print("Escape pressed, closing....")
            cam.release()
            cv2.destroyAllWindows()
            print('connection closed')
            break

        #you can explicitly press spacebar to capture current frame and send it to the server.
        #Used for debugging purposes.
        elif key % 256 == 32:
            #Space pressed
            s = socket.socket()  # Create a socket object
            s.connect((SERVER_IP, port))

            img_name = host + str(image_counter) + ".jpg"
            cv2.imwrite(img_name, frame)
            image_counter = image_counter + 1
            print("capturing image: ", img_name)
            Send_Image_To_Server(img_name, s)
            s.close()

        #sending images through a timer
        currentTime = time.time()
        if (currentTime > capture_time):
            s = socket.socket()  # Create a socket object
            s.connect((SERVER_IP, port))
            img_name = host + str(image_counter) + ".jpg"
            image_save_path = dir_client_image_dump + "/" + img_name
            cv2.imwrite(image_save_path, frame)
            image_counter = image_counter + 1
            print("capturing image: ", img_name)
            Send_Image_To_Server(image_save_path, s)
            s.close()
            capture_time = currentTime + timer_delay_capture
Exemplo n.º 21
0
import cv2
cap=cv2.VideoCapture(0)
count=0
while True:
    ret,frame=cap.read()
    if ret:
        cv2.imshow("window",frame)

    key=cv2.waitKey(1)
    if ord('q')==0xff & key:
        break
    if ord('c') == 0xff & key:
        cv2.imwrite("{}.png".format(count),frame)
        count+=1
cap=cv2.release()
cv2.destroyAllWindows()
Exemplo n.º 22
0
	def stop(self):
	    self.vidcap.release()
	    cv2.release()
	    cv2.destroyAllWindows()
Exemplo n.º 23
0
 def close(self):
     cv2.release()
def main():
    global refPt, tempPosition
    args = parse_args()

    transform_image = False
    use_webcam = False
    gpu = False
    use_crop = False
    min_confidence_threshold = 0.5

    if args.model_file:
        model_xml = args.model_file
        model_bin = os.path.splitext(model_xml)[0] + ".bin"
    if args.image_file:
        image_file = args.image_file
    if args.save_transform_image:
        transform_image = args.save_transform_image
    if args.use_webcam:
        use_webcam = args.use_webcam
    if args.gpu:
        gpu = args.gpu
    if args.use_crop_mode:
        use_crop = args.use_crop_mode
    if args.min_confidence_threshold:
        min_confidence_threshold = np.float(args.min_confidence_threshold)

    if model_xml:
        print("Loading network files:\n\t{}\n\t{}".format(
            model_xml, model_bin))
        net = IENetwork(model=model_xml, weights=model_bin)
        net.batch_size = 1
    else:
        print('Error')
        return
    if gpu == True:
        plugin = IEPlugin('GPU')
    else:
        plugin = IEPlugin('CPU')

    # if plugin.device == "CPU":
    # supported_layers = plugin.get_supported_layers(net)
    # not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
    # if len(not_supported_layers) != 0:
    # log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
    # format(plugin.device, ', '.join(not_supported_layers)))
    # log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
    # "or --cpu_extension command line argument")
    # sys.exit(1)
    # assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    # assert len(net.outputs) == 1, "Sample supports only single output topologies"

    input_blob = next(iter(net.inputs))
    print(net.inputs['input_1'].shape)
    print("Loading model to the plugin")
    exec_net = plugin.load(network=net)
    print("Loaded")
    IMAGE_SIZE[0] = net.inputs['input_1'].shape[2]
    IMAGE_SIZE[1] = net.inputs['input_1'].shape[3]
    del net

    if use_webcam == False:
        ## Load an image
        data_numpy = cv2.imread(
            image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
        if data_numpy is None:
            raise ValueError('Fail to read image {}'.format(image_file))
        print(data_numpy.shape)

        if use_crop == True:
            cv2.namedWindow("image")
            cv2.setMouseCallback("image", click_and_crop)

            while True:
                key = cv2.waitKey(1) & 0xFF

                if len(refPt) == 2:
                    temp = data_numpy.copy()
                    cv2.rectangle(temp, refPt[0], refPt[1], (0, 255, 0), 2)
                    cv2.imshow("image", temp)
                    cv2.waitKey(1) & 0xFF
                    break
                elif len(refPt) == 1:
                    temp = data_numpy.copy()
                    cv2.rectangle(temp, refPt[0], tempPosition, (0, 255, 0), 2)
                    cv2.imshow("image", temp)
                else:
                    cv2.imshow("image", data_numpy)

            data_numpy = data_numpy[refPt[0][1]:refPt[1][1],
                                    refPt[0][0]:refPt[1][0]]

        input = cv2.resize(data_numpy, (IMAGE_SIZE[0], IMAGE_SIZE[1]))

        # vis transformed image
        if transform_image == True:
            copyInput = input.copy()
            cv2.rectangle(copyInput,
                          (np.int(IMAGE_SIZE[0] / 2 + IMAGE_SIZE[0] / 4),
                           np.int(IMAGE_SIZE[1] / 2 + IMAGE_SIZE[1] / 4)),
                          (np.int(IMAGE_SIZE[0] / 2 - IMAGE_SIZE[0] / 4),
                           np.int(IMAGE_SIZE[1] / 2 - IMAGE_SIZE[1] / 4)),
                          (255, 0, 0), 2)
            cv2.imwrite('transformed.jpg', copyInput)

        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])
        input = transform(input).unsqueeze(0)

        # switch to evaluate mode

        # compute output heatmap
        output = exec_net.infer(inputs={input_blob: input})['output1']
        coords, maxvals = get_max_preds(output)
        print(maxvals)
        cv2.waitKey(1000) & 0xFF
        image = data_numpy.copy()
        for i in range(coords[0].shape[0]):
            mat = coords[0, i]
            x, y = int(mat[0]), int(mat[1])
            if maxvals[0, i] >= min_confidence_threshold:
                cv2.circle(image,
                           (np.int(x * data_numpy.shape[1] / output.shape[3]),
                            np.int(y * data_numpy.shape[0] / output.shape[2])),
                           2, (0, 0, 255), 2)

        cv2.imwrite('result.jpg', image)
        cv2.imshow('result.jpg', image)
        cv2.waitKey(2000) & 0xFF

        print('Success')
    else:
        sample = cv2.imread('sample.png', -1)
        alpha_s = sample[:, :, 3] / 255.0
        alpha_l = 1.0 - alpha_s
        cap = cv2.VideoCapture(0)
        while (True):
            ret, data_numpy = cap.read()
            if not ret: break

            input = cv2.resize(data_numpy, (IMAGE_SIZE[0], IMAGE_SIZE[1]))

            # vis transformed image
            if transform_image == True:
                copyInput = input.copy()
                cv2.rectangle(copyInput,
                              (np.int(IMAGE_SIZE[0] / 2 + IMAGE_SIZE[0] / 4),
                               np.int(IMAGE_SIZE[1] / 2 + IMAGE_SIZE[1] / 4)),
                              (np.int(IMAGE_SIZE[0] / 2 - IMAGE_SIZE[0] / 4),
                               np.int(IMAGE_SIZE[1] / 2 - IMAGE_SIZE[1] / 4)),
                              (255, 0, 0), 2)
                cv2.imwrite('transformed.jpg', copyInput)

            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ])
            input = transform(input).unsqueeze(0)

            # compute output heatmap
            output = exec_net.infer(inputs={input_blob: input})['output1']
            coords, maxvals = get_max_preds(output)
            image = data_numpy.copy()
            badPoints = 0
            for i in range(coords[0].shape[0]):
                mat = coords[0, i]
                x, y = int(mat[0]), int(mat[1])
                if maxvals[0, i] >= min_confidence_threshold:
                    cv2.circle(
                        image,
                        (np.int(x * data_numpy.shape[1] / output.shape[3]),
                         np.int(y * data_numpy.shape[0] / output.shape[2])), 2,
                        (0, 0, 255), 2)
                if maxvals[0, i] <= 0.4:
                    badPoints += 1
            if badPoints >= coords[0].shape[0] / 3:
                cv2.rectangle(
                    image,
                    (np.int(data_numpy.shape[1] / 2 + data_numpy.shape[1] / 4),
                     np.int(data_numpy.shape[0] / 2 +
                            data_numpy.shape[0] / 4)),
                    (np.int(data_numpy.shape[1] / 2 - data_numpy.shape[1] / 4),
                     np.int(data_numpy.shape[0] / 2 -
                            data_numpy.shape[0] / 4)), (255, 0, 0), 2)
                for c in range(0, 3):
                    image[10:10 + sample.shape[0], 10:10 + sample.shape[1],
                          c] = (alpha_s * sample[:, :, c] +
                                alpha_l * image[10:10 + sample.shape[0],
                                                10:10 + sample.shape[1], c])
            cv2.imshow('result', image)

            cv2.waitKey(10)
        #if cv2.waitKey(1) & 0xFF == ord('q'): break

        cv2.release()
        cv2.destroyAllWindows()
Exemplo n.º 25
0
def remote(url, names):
    file1 = open("admin_files/logs.txt", "a+")
    file2 = open("admin_files/mobile_no.txt", "r")

    data = file2.read()
    file2.close()

    recognizer = cv2.face.LBPHFaceRecognizer_create()
    recognizer.read('trainer.yml')
    cascadePath = 'haarcascade_frontalface_default.xml'
    faceCascade = cv2.CascadeClassifier(cascadePath)

    font = cv2.FONT_HERSHEY_SIMPLEX

    id = 0

    #Variable to counter valid and invalid
    valid = 0
    invalid = 0

    flag = 0
    while (flag == 0):

        site = requests.get(url)
        img_arr = np.array(bytearray(site.content), dtype=np.uint8)
        img = cv2.imdecode(img_arr, -1)

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        faces = faceCascade.detectMultiScale(gray,
                                             scaleFactor=1.2,
                                             minNeighbors=3,
                                             minSize=(10, 10))

        for (x, y, w, h) in faces:

            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
            id, confidence = recognizer.predict(gray[y:y + h, x:x + w])
            text = ""
            if (confidence < 48):
                valid += 1
                text = names[id]
                if (valid >= 60):

                    cv2.putText(img, str("Logged to system"), (x + 5, y - 5),
                                font, 1, (255, 255, 255), 2)
                    cv2.putText(img, str("Paused for few minutes.."),
                                (x + 5, y + 5 + 270), font, 1, (255, 255, 255),
                                2)
                    cv2.imshow('camera', img)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        flag = 1
                        break

                    x = datetime.datetime.now()
                    x = x.strftime("%m/%d/%Y, %H:%M:%S")
                    msg = "\n " + text + " logged at " + x
                    file1.write(msg)
                    valid = 0
                    invalid = 0

                    time.sleep(3)

                else:
                    cv2.putText(img, str("Detected " + text), (x + 5, y - 5),
                                font, 1, (255, 255, 255), 2)
                    cv2.imshow('camera', img)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        flag = 1
                        break

            else:
                invalid += 1
                if (invalid >= 150):
                    cv2.putText(
                        img,
                        str("Cannot detect the face system will be alerted.."),
                        (x + 5, y - 5), font, 1, (255, 255, 255), 2)
                    cv2.imshow('camera', img)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        flag = 1
                        break

                    alerts.alert(data)
                    invalid = 0
                    valid = 0
                else:
                    cv2.putText(img, str("Detecting.."), (x + 5, y - 5), font,
                                1, (255, 255, 255), 2)
                    cv2.imshow('camera', img)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        flag = 1
                        break

        cv2.imshow('camera', img)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.release()
    cv2.destroyAllWindows()

    file1.close()
Exemplo n.º 26
0
import rospy
from sensor_msgs.msg import Image
import cv2 as cv
from cv_bridge import CvBridge, CvBridgeError

if __name__ == "__main__":

    rospy.init_node('VideoPublisher', anonymous=True)
    bridge = CvBridge()
    VideoRaw = rospy.Publisher('/camera/rgb/image_raw', Image, queue_size=2)
    rate = rospy.Rate(1)
    cam = cv.VideoCapture(
        '/home/ismayil/catkin_ws/src/ui_interpretation/Data/images/video.avi')
    if (cam.isOpened() == False):
        print("Error opening video stream of file")
    while (cam.isOpened()):
        meta, frame = cam.read()
        if meta == True:

            try:
                msg_frame = bridge.cv2_to_imgmsg(frame)
                VideoRaw.publish(msg_frame, "bgr8")
            except CvBridgeError as e:
                print(e)
            cv.imshow("goruntu", frame)
            cv.waitKey(3)
            #rate.sleep()

    cv.release()
    cv.DestroyAllWindows()
    img=path
#    cascade = cv2.CascadeClassifier("/home/epierce/Documents/haarcascade_frontalface_alt.xml")
    rects = cascade.detectMultiScale(img, 1.05, 4, cv2.cv.CV_HAAR_SCALE_IMAGE, (20,20))

    if len(rects) == 0:
        return [], img
    rects[:, 2:] += rects[:, :2]
    return rects, img

def box(rects, img):
    for x1, y1, x2, y2 in rects:
        cv2.rectangle(img, (x1, y1), (x2, y2), (127, 255, 0), 2)
    #cv2.imwrite('/home/epierce/Documents/detected.jpg', img);

# rects, img = detect("/home/epierce/Documents/faces.jpg")
# box(rects, img)
while(1):
    _,f = cap.read()
    if i%5==0:
        rects, img = detect(f)
        box(rects, img)
        cv2.imshow("Video",img)
    i=i+1
    key = cv2.waitKey(20)

    if key == 27:
        break
 
cv2.destroyAllWindows()
cv2.release()
import cv2

cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc,20.0, (640,480))

while(cap.isOpened()):
    ret, frame = cap.read()
    print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
    out.write(frame)
    cv2.imshow('frame',gray)
    if cv2.waitKey(1)==ord('q'):
        break
cv2.release()
out.release()
cv2.destroyAllWindows()
Exemplo n.º 29
0
''' How to start webcam 
Webcam is not image only loop infinite run 
And how to  live online 

'''
import cv2
#Videocapture function through video capture
cap =cv2.VideoCapture(0)#cap variable through video is capture

while True :# infinite loop 
 	ret,frame =cap.read( ) #ret (return value) variable through value is hold and cap read the value

 	cv2.imshow('Our Live sketch',frame)

 	if cv2.waitKey(1)==13:

 		break #Means break is stop the camera value 
cv2.release() #release basically camera port release the all the port

cv2.destroyAllWindows()