Ejemplo n.º 1
0
def locate_thumbnail(thumbnail_filename, source_filename, display=False, save_visualization=False,
                     save_reconstruction=False, reconstruction_format="jpg"):
    thumbnail_basename, thumbnail_image = open_image(thumbnail_filename)
    source_basename, source_image = open_image(source_filename)

    logging.info("Attempting to locate %s within %s", thumbnail_filename, source_filename)
    kp_pairs = match_images(thumbnail_image, source_image)

    if len(kp_pairs) >= 4:
        title = "Found %d matches" % len(kp_pairs)
        logging.info(title)

        H, mask = find_homography(kp_pairs)

        new_thumbnail, corners, rotation = reconstruct_thumbnail(thumbnail_image, source_image, kp_pairs, H)

        print(json.dumps({
            "master": {
                "source": source_filename,
                "dimensions": {
                    "height": source_image.shape[0],
                    "width": source_image.shape[1],
                }
            },
            "thumbnail": {
                "source": thumbnail_filename,
                "dimensions": {
                    "height": thumbnail_image.shape[0],
                    "width": thumbnail_image.shape[1],
                }
            },
            "bounding_box": {
                "height": corners[0][1] - corners[0][0],
                "width": corners[1][1] - corners[1][0],
                "x": corners[1][0],
                "y": corners[0][0],
            },
            "rotation_degrees": rotation
        }))

        if save_reconstruction:
            new_filename = "%s.reconstructed.%s" % (thumbnail_basename, reconstruction_format)
            cv2.imwrite(new_filename, new_thumbnail)
            logging.info("Saved reconstructed thumbnail %s", new_filename)
    else:
        logging.warning("Found only %d matches; skipping reconstruction", len(kp_pairs))
        new_thumbnail = corners = H = mask = None

    if display or save_visualization:
        vis_image = visualize_matches(source_image, thumbnail_image, new_thumbnail, corners, kp_pairs, mask)

    if save_visualization:
        vis_filename = "%s.visualized%s" % os.path.splitext(thumbnail_filename)
        cv2.imwrite(vis_filename, vis_image)
        logging.info("Saved match visualization %s", vis_filename)

    if display:
        cv2.imshow(title, vis_image)
        cv2.waitKey()
        cv2.destroyAllWindows()
Ejemplo n.º 2
0
def main():

    for fname in glob("left/*/*/ein/sceneModel/model.yml"):
        print fname

        f = open(fname) 

        lines = []
        # ignore the %YAML:1.0, because the python parser doesn't handle 1.0.
        f.readline() 

        for line in f:
            # for some reason the python parser doesn't like this line either.
            if "background_pose" in line:
                continue
            lines.append(line)
        data = "\n".join(lines)
        
        ymlobject = yaml.load(data)
        #print ymlobject
        scene = ymlobject["Scene"]
        observed_map = GaussianMap.fromYaml(scene["observed_map"])
        image = observed_map.toImage()
        cv2.imwrite("observed.png", image)
        cv2.imshow("observed map", image)
        
        
        
        
        print "observed map: ", observed_map.width, "x", observed_map.height
        dimage = readMatFromYaml(scene["discrepancy_magnitude"])
        cv2.imshow("discrepancy magnitude", dimage)

        cv2.waitKey(0)
        cv2.destroyAllWindows()
Ejemplo n.º 3
0
def black_field(a, b, img):
    #print a, b
    #print b-5, b+5, a-5, b+5
    cropped_img = img[b-5:b+5, a-5:a+5]
    cv2.imshow('kropd', cropped_img)
    #print 255-cv2.mean(cropped_img)[0]
    return 255-cv2.mean(cropped_img)[0]
Ejemplo n.º 4
0
def find_target(image_to_analyze, trackbar_window,
                image_to_draw_on=None, indicator_color=(0, 0, 0),
                adjustment_ratio=1):
    lower, upper = trackbar_window.get_range()
    median = cv2.medianBlur(image_to_analyze, 15)
    mask = cv2.inRange(median, lower, upper)
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, None, iterations=2)
    cv2.imshow(trackbar_window.window_name, mask)

    # Find contours in the mask and initialize the current (x, y) center of the ball
    contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
    center = None
    radius = None

    # Proceed only if at least one contour was found.
    if len(contours) > 0:
        # Find the largest contour in the mask, then use it to
        # compute the minimum enclosing circle and centroid.
        c = max(contours, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        center = (int(adjustment_ratio*x), int(adjustment_ratio*y))
        radius = int(adjustment_ratio*radius)
    if center is not None and image_to_draw_on is not None:
        draw_largest_contour(image_to_draw_on, center, radius, color=indicator_color)
    return center, radius
def build_study_images():
    avaiable_chars = string.digits

    captcha.get_captcha(chars=avaiable_chars).save('captcha.png')
    img = cv2.imread('captcha.png')
    new_img, sub_images = split_symbols(img)
    cv2.imshow("all_img", new_img)

    output_dir = os.path.join(PROJECT_ROOT, "training")

    avaiable_keys = map(ord, avaiable_chars)

    for sub_img in sub_images:
        cv2.imshow("char", sub_img)
        while True:
            key = cv2.waitKey(0) % 256

            if key == 27:
                sys.exit() # ESC

            elif key in avaiable_keys:
                char = chr(key)
                print key, char
                save_char(sub_img, char, output_dir)
                break
Ejemplo n.º 6
0
def main():
    files = glob.glob("./scans/*.jpg")
    files += glob.glob("./scans/*.jpeg")
    for f in files:
        reset_stats()
        print "Processing: " + f.split("/")[len(f.split("/")) - 1]

        schedule = Schedule()
        schedule.load_data()
        if schedule.get_has_schedule():
            scan_image(f, schedule)

            print "Sheet ok? ",
            while True:
                cv2.imshow("image", cv2.resize(img, (446, 578)))
                cv2.moveWindow("image", 0, 0)
                # user_in = raw_input()
                key = cv2.waitKey(-1)
                if key == ord("y"):
                    print "Sheet ok... Dumping data"
                    dump_stats()
                    os.remove(f)
                    break
                elif key == ord("n"):
                    print "Marking to redo"
                    #os.rename(f, "./scans/redo/" + f.split("/")[len(f.split("/")) - 1])
                    break
                elif key == ord("q"):
                    exit(0)
                else:
                    continue
            cv2.destroyAllWindows()
        else:
            print "Unable to load schedule... Aborting"
Ejemplo n.º 7
0
	def captureImage(self):
		position = [0, 0]
		velocity = [0, 0]

		frame = self.cap.read()[1]

		if frame != None:
			frame = cv2.flip(frame, 1)

			self.frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

			threshold_mask = self.createMultipleThresholds(self.frame_hsv)


			contour = self.getLargestContour(threshold_mask)
			if type(contour) != int:
				cv2.drawContours(frame, contour, -1, (0, 255, 255), 2)
				position = self.getContourMoment(contour)
				cv2.circle(frame, (position[0], position[1]), 5, (0,0,255), -1)

			# calculate velocity
			velocity = [position[0] - self.oldPosition[0], position[1] - self.oldPosition[1]]
			# print velocity

			cv2.imshow("Frame", frame)
			cv2.waitKey(10)

		self.oldPosition = position
		return [position, velocity]
def main(argv):
    args = str(sys.argv[1])
    hogParams = {'hitThreshold': -.5, 'scale': 1.05}
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

    video = cv2.VideoCapture(args)

    ret, frame = video.read()

    while(ret):

        cimg = np.copy(frame)
        people, w = hog.detectMultiScale(frame, **hogParams)
        filetered = []
        for ri, r in enumerate(people):
            for qi, q in enumerate(people):
                if ri != qi and inside(r, q):
                    print "break"
                    break
            else:
                filetered.append(r)
        # draw_detections(frame, people)
        draw_detections(cimg, filetered, 1)
        cv2.imshow('detected people', cimg)
        cv2.waitKey(2)

        ret, frame = video.read()

    cv2.destroyAllWindows()
    video.release()
Ejemplo n.º 9
0
	def display(self):
		""" Displays current frame with rectangles and boxes"""
		allface = self.getFaces()
		for i in range(len(self.visibleFaceList)):
			if len(self.visibleFaceList[i].getPrevPositions()) > self.cleanThresh:
				self.showRectangle(self.visibleFaceList[i].getPosition(),self.visibleFaceList[i].getID())
		cv2.imshow("show", self.frameImage)
Ejemplo n.º 10
0
    def draw_motion(self, im=None, draw_outliers=False):

        show_image = False

        if im is None:
            im = self.result_image
            show_image = True

        if draw_outliers:
            print("points shape: " + str(self.prev_points.shape[0]) + " outliers " +
                  str(self.outliers.shape))

        for i in range(0, self.prev_points.shape[0]):
            prev_pt = self.prev_points[i]
            next_pt = self.next_points[i]

            color = np.array([0, 255, 0])

            if draw_outliers:
                id = 2*i
                if (self.outliers[id] or self.outliers[id+1]) and draw_outliers:
                    color = np.array([0, 0, 255])
                else:
                    color = np.array([0, 255, 0])

            cv2.circle(im, (prev_pt[0], prev_pt[1]), 2, color, -1)
            cv2.line(im, (prev_pt[0], prev_pt[1]), (next_pt[0], next_pt[1]), np.array([255, 0, 0]), 1)

        if show_image:
            cv2.imshow("", im)
            cv2.waitKey(0)

        return im
Ejemplo n.º 11
0
def main():
    cap = cv2.VideoCapture(0)
    disto = cycle(funcs)

    time = 0
    while(True):
        # Capture frame-by-frame
        ret, frame = cap.read()
        if time % 3 == 0:       #chooses how long to change the distortion
            distfunc = disto.next()
        frm = distfunc(frame)

        # Our operations on the frame come here
        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Display the resulting frame
        cv2.imshow('frame',frm)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        time += int(random.random() * 10)       #add a random element to the distortion

        if time / 50  > 1:
            disto = cycle(funcs)                ## reshuffle the effects so it won't repeat so much.
            time = 0

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 12
0
def show_img(data, suffix, show):
    global wnd_x
    global wnd_y

    if data == None:
        return

    image = data.astype(np.uint8)

    if image.shape[0] == 6:
        img = image.transpose(1,2,0)
        img1 = img[:,:,:3]
        img2 = img[:,:,3:]
    else:
        img1 = image[0]
        img2 = image[1]

    filename = 'img-1-'+suffix+'.jpg'
    cv2.imwrite(filename, img1)
    if show:
       cv2.imshow("IMG 1", img1)
       cv2.moveWindow("IMG 1", wnd_x, wnd_y)

    filename = 'img-2-'+suffix+'.jpg'
    cv2.imwrite(filename, img2)
    if show:
       cv2.imshow("IMG 2", img2)
       wnd_x+=300
       cv2.moveWindow("IMG 2",  wnd_x, wnd_y)
       wnd_x=10 
       wnd_y+=300
Ejemplo n.º 13
0
def colour_picker(colourSTR ="(unspecified)", colourGrabWindowSize = 5, colourHueWindowSize = 40, colourSaturationWindowSize= 40, colourValueWindowSize =40):
    global cam
    global hsv

    print "Right click the ",colourSTR," blob. Hit escape when done."
    cv2.namedWindow("image") 
    cv2.setMouseCallback("image", mouseCallBack, param=None)
    try:
        while True:
            #Get image from webcam and convert to greyscale
            ret, img = cam.read()
            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            
            
            cv2.imshow("image", img)        
            
            cMinY = max(0, rightButtonY - colourGrabWindowSize)
            cMaxY = min(len(hsv) - 1, rightButtonY + colourGrabWindowSize)
            cMinX = max(0, rightButtonX - colourGrabWindowSize)
            cMaxX = min(len(hsv[0]) - 1, rightButtonX + colourGrabWindowSize)          
            
            cHue = int(npy.mean(hsv[redMinY:redMaxY, redMinX:redMaxX, 0]))
                        
            
            cSaturation = int(npy.mean(hsv[redMinY:redMaxY, redMinX:redMaxX, 1]))
            cValue = int(npy.mean(hsv[redMinY:redMaxY, redMinX:redMaxX, 2]))
            
            #Sleep infinite loop for ~10ms
            #Exit if user presses <Esc>
            if cv2.waitKey(10) == 27:
                break
    
    finally:
        cv2.destroyWindow("image")
        return np.array(cHue, cSaturation, cValue)
Ejemplo n.º 14
0
def erode(imageName,iter):
    """Takes image name of image and erodes it"""
    image = cv2.imread(imageName)
    dilated = cv2.erode(image,None, iterations=iter)
    cv2.imshow("OUTPUT", dilated)
    cv2.imshow("INPUT", image)
    cv2.waitKey(0)
Ejemplo n.º 15
0
def verb_showfilters(argv):
	"""Dump source code of session"""
	
	f  = KITNNFile(argv[2])
	s  = f.getSession(argv[3]).d["snap/1"]
	
	fine   = s["data/2" ][...]
	medium = s["data/8" ][...]
	coarse = s["data/14"][...]
	
	w  = 3+(16*7+15*3)+3
	h  = 3+( 9*7+ 8*3)+3
	
	img= np.zeros((h,w,3), dtype="uint8")
	
	for i in xrange(9):
		for j in xrange(16):
			n     = i*16+j
			if i in [0,1,2]:
				card  = fine  [n- 0]
			elif i in [3,4,5]:
				card  = medium[n-48]
			elif i in [6,7,8]:
				card  = coarse[n-96]
			card -= np.min(card)
			card /= np.max(card)
			card  = card.transpose(1,2,0)
			
			img[3+i*10:3+i*10+7, 3+j*10:3+j*10+7] = 255*card
	
	img = cv2.resize(img, (0,0), None, 8, 8, cv2.INTER_NEAREST)
	cv2.imshow("Filters", img)
	cv2.imwrite("Filters.png", img)
	cv2.waitKey()
Ejemplo n.º 16
0
    def run(self):
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            vis = self.frame.copy()
            if playing:
                tracked = self.tracker.track(self.frame)
                for tr in tracked:
                    cv2.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2)
                    for (x, y) in np.int32(tr.p1):
                        cv2.circle(vis, (x, y), 2, (255, 255, 255))

            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.tracker.clear()
            if ch == 27:
                break
Ejemplo n.º 17
0
def coolBlack():
    IMAGE_WEIGHT = 0.5

    image = cv2.imread("G:/Filters/wasim.jpg",0)
    black = cv2.imread("G:/Filters/black5.jpg",0)
    black = cv2.resize(black, image.shape[::-1])

    res1 = cv2.addWeighted(image, IMAGE_WEIGHT, black, 1 - IMAGE_WEIGHT, 1)


    #NORMALIZE IMAGES
    image = np.float32(image)
    black = np.float32(black)

    image /= 255
    black /= 200

    res = image*black

    cv2.imshow("RES", res)
    cv2.waitKey(0)

    fname = "G:/Filtes/temp.jpg"
    cv2.imwrite(fname, res)
    res = cv2.imread(fname, 0)

    cv2.imshow("BLACK", res)
    cv2.waitKey(0)
def get_images_and_labels(path):
    # Append all the absolute image paths in a list image_paths
    # We will not read the image with the .sad (the sad face) extension in the training set
    # Rather, we will use them to test our accuracy of the training
    image_paths = [os.path.join(path, f) for f in os.listdir(path)]
    # images will contains face images
    images = []
    # labels will contains the label that is assigned to the image
    labels = []
    for image_path in image_paths:
        # Read the image and convert to grayscale
        image_pil = Image.open(image_path).convert('L')
        # Convert the image format into numpy array
        image = np.array(image_pil, 'uint8')
        # Get the label of the image
        nbr = int(os.path.split(image_path)[1].split(".")[0].replace("subject",""))
        # Detect the face in the image
        faces = faceCascade.detectMultiScale(frame,
    										scaleFactor=1.5,
    										minNeighbors=6,
    										minSize=(30, 30),
    										flags = cv2.cv.CV_HAAR_SCALE_IMAGE)
        # If face is detected, append the face to images and the label to labels
        for (x, y, w, h) in faces:
            images.append(image[y: y + h, x: x + w])
            labels.append(nbr)
            cv2.imshow("Adding faces to traning set...", image[y: y + h, x: x + w])
            cv2.waitKey(50)
    # return the images list and labels list
    return images, labels
Ejemplo n.º 19
0
def main():

    cap = cv2.VideoCapture(1)
    if not cap.isOpened():
        print("Capture could not be opened successfully.") 

    while True:
        
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        _, img = cap.read()

        alpha = cv2.getTrackbarPos('Contrast', 'image')
        #Sliders can't be lower than 0, so starting at 50, then subtracting
        beta = cv2.getTrackbarPos('Brightness', 'image') - 50
        print beta

        toggle = cv2.getTrackbarPos(switch, 'image')
        segmented = False if toggle == 0 else True
        
        #trans_img = cv2.add(mul_img, b_array)
        trans_img = (alpha * img)       
        #trans_img = np.where(trans_img + beta >= 0, trans_img + beta, 0)
        

        if segmented:
            gray = cv2.cvtColor(trans_img, cv2.COLOR_BGR2GRAY)
            _, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
            cv2.imshow('image', binary)
        else:
            cv2.imshow('image', trans_img)
Ejemplo n.º 20
0
    def plot_place_cell_id_on_map(self,map_data,place_cell_id):    
        # Plot red box where vehicle is....    
        min_x=place_cell_id[1].min()
        min_y=place_cell_id[2].min()
        ptp_y=place_cell_id[2].ptp()
        
        map_out=np.copy(map_data); # FORCE COPY SO IT DOESNT KEEP OLD MOVES!!!!!
        map_out=self.flip_rotate_color_image(map_out,0,False)
        # Loop through each place id
        for  current_place in range(0,place_cell_id[0].size): 
#            sq=self.squares_grid[place_cell_id[1][current_place]-min_x,place_cell_id[2][current_place]-min_y,:]        
            # Flipping this in y-plane            
            sq=self.squares_grid[place_cell_id[1][current_place]-min_x,np.absolute(place_cell_id[2][current_place]-min_y-ptp_y),:]        
            # Place number at bottom of square in middle.... 
            x_pos=sq[0]#+np.round(np.diff([sq[2],sq[0]])/2)
            y_pos=self.pixel_width-sq[1]+np.round(np.diff([sq[3],sq[1]])/2)
            cv2.putText(map_out, str(int(place_cell_id[0][current_place])), (int(x_pos),int(y_pos)), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(0,0,255),1);
      
        
       
        textsize=cv2.getTextSize('N',cv2.FONT_HERSHEY_SIMPLEX,0.8,2)
        #cv2.fillConvexPoly(map_out,np.abs(np.array([[self.pixel_width,0],[self.pixel_width,0],[self.pixel_width,0]])-self.arrow[('heading')]),(0,0,255))
        cv2.putText(map_out, 'N', (self.pixel_width-int((textsize[0][1]/2)+10),int(30+(textsize[1]/2))), cv2.FONT_HERSHEY_SIMPLEX, 0.8,(0,0,255),2); 
                 
        cv2.imshow(self.place_cell_map,map_out)
        return map_out
Ejemplo n.º 21
0
	def convertData(self, bagfile):
		seq = 0
		bridge = CvBridge()
		while(True):
			# Capture frame-by-frame
			ret, cvImage = self.capture.read()
			try:
				imageMsg = bridge.cv2_to_imgmsg(cvImage, "bgr8") # TODO: format spec as cmd option?
			except CvBridgeError, e:
				print e

			# creating ros message
			seq = seq + 1

			imageMsg.header.seq = seq
			# TODO: temporary hack, time sync/source is needed
			imageMsg.header.stamp =  rospy.Time.from_sec(time.time()) 

			# write message to bag file
			bagfile.write(self.topic, imageMsg, imageMsg.header.stamp)

			# this is not so important for conversion
			if self.showFrames == True:
				cv2.imshow('frame', cvImage)
				if cv2.waitKey(1) & 0xFF == ord('q'):
					break
Ejemplo n.º 22
0
	def run(self):
		runFlag = True
		cv2.namedWindow("TurtleCam 9000", 1)
		while(runFlag):
			image, timesImageServed = self.robot.getImage()
			with self.lock:
				if timesImageServed > 30:
					if self.stalled == False:
						print "Camera Stalled!"
					self.stalled = True
				else:
					self.stalled = False


			frame = self.mcs.update(image.copy())
			cv2.imshow("TurtleCam 9000", frame)

			code = chr(cv2.waitKey(10) & 255)

			if code == 't':
				cv2.imwrite("/home/macalester/catkin_ws/src/speedy_nav/res/captures/cap-" + str(datetime.now()) + ".jpg", image)
				print "Image saved!"
			if code == 'q':
				break

			with self.lock:
				runFlag = self.runFlag
Ejemplo n.º 23
0
    def object_detection(self, depth_array):
        """
        Function to detect objects from the depth image given
        :return:
        """
        self.detect_arm()

        # Perform thresholding on the image to remove all objects behind a plain
        ret, bin_img = cv2.threshold(depth_array, 0.3, 1, cv2.THRESH_BINARY_INV)

        # Erode the image a few times in order to separate close objects
        element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        err_img = cv2.erode(bin_img, element, iterations=20)

        # Create a new array of type uint8 for the findContours function
        con_img = np.array(err_img, dtype=np.uint8)

        # Find the contours of the image and then draw them on
        contours, hierarchy = cv2.findContours(con_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        cv2.drawContours(con_img, contours, -1, (128, 255, 0), 3)

        for x in range(0, len(contours)):
            x, y, w, h = cv2.boundingRect(contours[x])
            cv2.rectangle(con_img, (x, y), ((x+w), (y+h)), (255, 0, 127), thickness=5, lineType=8, shift=0)

        # Show the colour images of the objects
        # self.show_colour(contours)

        # Show the Depth image and objects images
        cv2.imshow('Contours', con_img)
        cv2.imshow("Depth", bin_img)
        cv2.waitKey(3)
Ejemplo n.º 24
0
def dewarp(imagedir):
    # Loading from json file
    C = CameraParams.fromfile(os.path.join(imagedir, "params.json"))
    K = C.K
    D = C.D
    print("Loaded camera parameters from " + os.path.join(imagedir, "params.json"))

    for f in file_list(imagedir, ['jpg', 'jpeg', 'png']):
        print(f)
        colour = cv2.imread(f)
        grey = cv2.cvtColor(colour, cv2.COLOR_BGR2GRAY)

        h, w = grey.shape[:2]
        newcameramtx, roi=cv2.getOptimalNewCameraMatrix(K, D, (w,h), 1, (w,h))
        mapx, mapy = cv2.initUndistortRectifyMap(K, D, None, newcameramtx, (w,h), 5)
        dewarped = cv2.remap(grey, mapx, mapy, cv2.INTER_LINEAR)

        x, y, w, h = roi
        dewarped = dewarped[y:y+h, x:x+w]
        grey = cv2.resize(grey, (0,0), fx=0.5, fy=0.5) 
        dewarped = cv2.resize(dewarped, (0,0), fx=0.5, fy=0.5) 

        cv2.imshow("Original", grey )
        cv2.imshow("Dewarped", dewarped)
        cv2.waitKey(-1)
Ejemplo n.º 25
0
def get_info_part(li):
    li_info = li.get_information_part()
    if li_info is not None:
        li_info = cv2.resize(li_info, (info_size, info_size))
        cv2.imshow('li_info', li_info)
        cv2.moveWindow('li_info', int(1280*FRAME_SIZE_FACTOR), 0)
    return li_info
Ejemplo n.º 26
0
def realisticTexturemap(H_G_M, scale):
    map_img = cv2.imread('Images/ITUMap.bmp')
    point = getMousePointsForImageWithParameter(map_img, 1)[0]

    texture = cv2.imread('Images/ITULogo.jpg')
    #texture = cv2.cvtColor(texture,cv2.COLOR_BGR2GRAY)
    H_T_M = np.zeros(9).reshape(3,3)
    H_T_M[0][0] = scale
    H_T_M[1][1] = scale

    H_T_M[0][2] = point[0]
    H_T_M[1][2] = point[1]

    H_T_M[2][2] = 1

    H_M_G = np.linalg.inv(H_G_M)

    H_T_G = np.dot(H_M_G, H_T_M)

    fn = "GroundFloorData/sunclipds.avi"
    cap = cv2.VideoCapture(fn)
    #load Tracking data
    running, frame = cap.read()
    while running:
        running, frame = cap.read()
        h,w,d = frame.shape

        warped_texture = cv2.warpPerspective(texture, H_T_G,(w, h))

        result = cv2.addWeighted(frame, .6, warped_texture, .4, 50)

        cv2.imshow("Result", result)
        cv2.waitKey(0)
Ejemplo n.º 27
0
def textureMapGroundFloor():
    #create H_T_G from first frame of sequence
    texture = cv2.imread('Images/ITULogo.jpg')

    fn = "GroundFloorData/sunclipds.avi"
    sequence = cv2.VideoCapture(fn)
    running, frame = sequence.read()

    h_t_g, calibration_points = SIGBTools.getHomographyFromMouse(texture, frame, -4)
    print h_t_g
    #fig = figure()
    while running:
        running, frame = sequence.read()

        if not running:
            return

        #texture map
        h,w,d = frame.shape
        warped_texture = cv2.warpPerspective(texture, h_t_g,(w, h))
        result = cv2.addWeighted(frame, .7, warped_texture, .3, 50)

        #display
        cv2.imshow("Texture Mapping", result)
        cv2.waitKey(1)
Ejemplo n.º 28
0
    def show_colour(self, cnt):
        """
        Use the objects found to show them in colour
        :return:
        """
        # Go through each rectangle and display the rgb
        length = len(cnt)

        # Create an array of size the amount of rectangles
        crop_rgb = []
        for i in range(0, length):
            crop_rgb.append(1)

        for x in range(0, length):
            x, y, w, h = cv2.boundingRect(cnt[x])

            # Try to crop the rgb image for each box
            try:
                crop_rgb[x] = self.rgb_img[y:y+h, x:x+w]
            except:
                pass

        for x in range(0, length):
            name = "Cropped " + str(x)
            cv2.imshow(name, crop_rgb[x])
        cv2.waitKey(3)
Ejemplo n.º 29
0
def calibrateSharpening():
    frame = cv2.imread("failed_frame_224.png")
    new_frame = sharpen(frame)
    found, _ = cv2.findChessboardCorners(new_frame, (9,6))
    print found
    cv2.imshow("sharpened", new_frame)
    cv2.waitKey(0)
Ejemplo n.º 30
0
def main():
    cv.NamedWindow("original", cv.CV_WINDOW_AUTOSIZE)
    cv.NamedWindow("keyboard", cv.CV_WINDOW_AUTOSIZE)

    cam = cv2.VideoCapture(0)
    cam.set(cv.CV_CAP_PROP_FRAME_WIDTH, 600)
    cam.set(cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
    cam.set(cv.CV_CAP_PROP_FPS, 24)
    ret, frame = cam.read()

    player = sound.SoundPlayer('./resources/s1.sf2')
    
    while True:
        ret, frame = cam.read()

        keyboard_image = KeyboardRecognizer(frame).get_keyboard()
        if keyboard_image is not None:
            notes = KeysRecognizer(keyboard_image, 'keyboard').get_pressed_keys()
            player.play_notes(notes)

        cv2.imshow('original', frame)
        
        if cv2.waitKey(1) == 27:  # Escape code
            break

    cv2.destroyAllWindows()
def show_images(images):
    for i, image in enumerate(images):
        cv2.imshow("image ", image)
        #waitKey(0) will display the window infinitely until any keypress
    cv2.waitKey(0)
Ejemplo n.º 32
0
# disable(model, "all", 4)
# disable(model, 1, 7)
# disable(model, 2, 7)
# disable(model, 3, 6)

# disable(model, all, 7)
# disable_weights(model, 0, 7)
# disable_weights(model, 1, 7)
# disable_weights(model, 2, 7)
# disable_weights(model, 3, 7)
# disable_weights(model, 4, 7)
# disable_weights(model, 5, 7)

print("\nCustom performance")
testPerformance(model, data[INDICES_TEST], iClasses[INDICES_TEST])
##################

torch.save(model.state_dict(), MODELDIR + "/modified.model")

img = renderKernels(model)
cv2.imwrite(MODELDIR + "/modified.png", img)
cv2.imshow("Kernels of model", img)

weights = model.fc.weight.data.numpy()
weights = weights.reshape((NCLASSES, NCHANNELS, 4 * 4))
img = render3dBarCharts(weights, 10, 2, 200)
cv2.imwrite(MODELDIR + "/modifiedWeights.png", img)
cv2.imshow("Weights of model", img)

cv2.waitKey(0)
Ejemplo n.º 33
0
import time
import numpy as np

images = glob.glob("/home/pi/Desktop/cone/real/*.jpg")
#print images

images.sort()
tmp_list = images[0].split('/')
title = tmp_list[-1]

image = cv2.imread(images[0])

height, width, layers = image.shape

video = cv2.VideoWriter('/home/pi/Desktop/cone/video.avi',
                        cv2.cv.CV_FOURCC('M', 'J', 'P', 'G'), 1, (640, 240))

for name in images:
    tmp_list = name.split('/')
    title = tmp_list[-1]
    image_real = cv2.imread("/home/pi/Desktop/cone/real/" + title)
    image_filtered = cv2.imread("/home/pi/Desktop/cone/filtered/" + title)
    vis = np.concatenate((image_real, image_filtered), axis=1)
    time.sleep(0.1)

    cv2.imshow("vis", vis)
    key = cv2.waitKey(1) & 0xFF
    video.write(vis)

video.release()
Ejemplo n.º 34
0
dt = DecisionTreeClassifier()
training_classifier(dt, train_data, test_data, atributes, labels)

# Calculando acurácia no dataset de teste
'''predictions = gnb.predict(test_data[atributes])
accuracy = metrics.accuracy_score(predictions,test_data[labels])
confusion_matrix = metrics.confusion_matrix(test_data[labels], predictions)
print('Acurácia: {}'.format(accuracy))
print('Matriz: {}'.format(confusion_matrix))'''

img = cv2.imread('images/image1.png')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
blur3 = cv2.GaussianBlur(hsv, (3,3), 0)
blur5 = cv2.GaussianBlur(hsv, (5,5), 0)
blur7 = cv2.GaussianBlur(hsv, (7,7), 0)
height, width, channels = hsv.shape
mask = np.zeros_like(img)

for j in range(height):
    for i in range(width):
        pixel = [np.array([hsv.item(j,i,0), hsv.item(j,i,1), hsv.item(j,i,2), blur3.item(j,i,0), blur5.item(j,i,0), blur7.item(j,i,0)], dtype=np.float32)]
        results = dt.predict(pixel)
        if(results == 1):
            mask[j, i] = [0, 0, 255]

mask = cv2.medianBlur(mask, 5)
img = cv2.addWeighted(img, 0.8, mask, 0.5, 0.2)
cv2.imshow('result', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Ejemplo n.º 35
0
        # h,mask = cv2.findHomography(srcPoints=pts1,dstPoints=pts2,method=cv2.RANSAC, ransacReprojThreshold=5.0)
        # height, width, channels = res.shape
        # im1Reg = cv2.warpPerspective(note, h, (width, height))
        # mask2 = np.zeros(res.shape, dtype=np.uint8)
        # roi_corners2 = np.int32(positions2)
        # channel_count2 = res.shape[2]
        # ignore_mask_color2 = (255,) * channel_count2
        # cv2.fillConvexPoly(mask2, roi_corners2, ignore_mask_color2)
        # mask2 = cv2.bitwise_not(mask2)
        # masked_image2 = cv2.bitwise_and(res, mask2)
        # res = cv2.bitwise_or(im1Reg, masked_image2)
        # --------------------------------------------------------------------------

    # Result output

    cv2.imshow(win_name, res)
    key = cv2.waitKey(1)
    if key == 27:    # Esc, 종료
            break
    elif key == ord(' '): # Set img1 by setting ROI to space bar
        x,y,w,h = cv2.selectROI(win_name, frame, False)
        if w and h:
            img1 = frame[y:y+h, x:x+w]
            gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
            kp, desc = detector.detectAndCompute(gray1, None)
            tup = [kp,desc,img1]
            templates.append(tup)



else:
Ejemplo n.º 36
0
    ax2.plot(x_axis, speed_right, 'b-')
    ax3.plot(x_axis, speed_left, 'r-')
    ax3.plot(x_axis, speed_right, 'b-')
    #ax4.bar(index, optical_direction, color='r', label='degree')
    plt.draw()
    
    # updare cx_neurons
    velocity = np.array([sl, sr])
    tl2, cl1, tb1, tn1, tn2, memory, cpu4, cpu1, motor = update_cells(
            heading=heading_list[frame_num]/180.0*np.pi, velocity=velocity, tb1=tb1, memory=memory, cx=cx)
    angle, distance = cx.decode_cpu4(cpu4)
    angle_list[frame_num] = angle/np.pi*180.0
    distance_list[frame_num] = distance

    # show frames
    cv2.imshow('vedio', cv2.resize(draw_flow(next, flow), (0,0), fx=3.0, fy=3.0))
    #print('Frame number: ', frame_num)
    ch = cv2.waitKey(5) & 0xFF 
    if ch == ord('q'):
        break
    elif ch == ord(' '):
        while True:
            ch = cv2.waitKey(5) & 0xFF
            if ch == ord(' ') or ch == ord('q'):
                break
            time.sleep(0.2)
    prvs = next
    start_time = time.time()


ax1.clear()
Ejemplo n.º 37
0
model_cl.load_weights(model_path)
logits = model_cl.output[0]
logits = tf.argmax(logits, -1)
model = models.Model(model_cl.input, logits)
cam = cv2.VideoCapture("test.mp4")

start1= time.time()
counter = 0
while(True):
    counter =counter + 1
    start = time.time()
    ret = cam.grab()
    if(counter % 4==0):
        ret, frame = cam.retrieve()
    else:
        continue
    # frame = frame[150:,...]
    img = cv2.resize(frame, (800,288),interpolation =cv2.INTER_NEAREST)
    # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # pred = model(np.expand_dims(gray, 0), training=False)
    pred = model.predict(np.expand_dims(img-train_mean_channels, 0))
    print(np.shape(pred))
    
    
    cv2.imshow("pred", np.uint8(pred[0]) * 255)
    cv2.imshow("image", img)
    key = cv2.waitKey(3)
    if(key == 27):
        cv2.destroyAllWindows()
        break
    print((time.time()-start))
Ejemplo n.º 38
0
def StationEntryView(request, station_no):

    if request.method == 'POST':
        try:
            entry = request.POST.get("first")
            entry = int(entry)
            return redirect('core:station_entry', entry)
        except:
            pass

        try:
            exit = request.POST.get("second")
            exit = int(exit)
            return redirect('core:station_exit', exit)
        except:
            pass
    station = models.Station.objects.get(station_no=station_no)
    pics = list(models.Person.objects.all().values_list('pic', flat=True))
    known_face_names = list(models.Person.objects.all().values_list('name', flat=True))
    uids = list(models.Person.objects.all().values_list('uid', flat=True))
    known_face_encodings = []
    for pic in pics:
        image = face_recognition.load_image_file(os.path.join(base_dir, str(pic)))
        image_encoding = face_recognition.face_encodings(image)[0]
        known_face_encodings.append(image_encoding)

    i = 0

    video_capture = cv2.VideoCapture(0)
    face_locations = []
    face_encodings = []
    face_names = []
    face_uids = []
    process_this_frame = True

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()

        try:
            # Resize frame of video to 1/4 size for faster face recognition processing
            small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
            # small_frame = frame

            # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
            rgb_small_frame = small_frame[:, :, ::-1]

            # Only process every other frame of video to save time
            if process_this_frame:
                # Find all the faces and face encodings in the current frame of video
                face_locations = face_recognition.face_locations(rgb_small_frame)
                face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)

                face_names = []
                face_uids = []
                for face_encoding in face_encodings:
                    # See if the face is a match for the known face(s)
                    matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=0.5)
                    name = "Unknown"
                    uid = -1
                    face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
                    best_match_index = np.argmin(face_distances)
                    if matches[best_match_index]:
                        name = known_face_names[best_match_index]
                        # print("pehchaan liya", name)
                        uid = uids[best_match_index]
                        person = models.Person.objects.get(uid=uid)
                        log = models.Log.objects.get_or_create(person=person, entry_station=station, status=0)
                    face_names.append(name)
                    face_uids.append(uid)
            i += 1
            process_this_frame = not process_this_frame

            # Display the results
            for (top, right, bottom, left), name, uid in zip(face_locations, face_names, face_uids):
                # Scale back up face locations since the frame we detected in was scaled to 1/4 size
                top *= 4
                right *= 4
                bottom *= 4
                left *= 4

                # Draw a box around the face
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

                # Draw a label with a name below the face
                cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(frame, str(name + ':' + str(uid)), (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
        except:
            # print(frame)
            pass
        # Display the resulting image
        cv2.imshow('Video', frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()

    return render(request, 'core/index.html')
Ejemplo n.º 39
0
def show_cam(camera=0, bw=False, mirrorlr=False, mirrorud=False, hsv=False, detect_region=False):
    cam = cv2.VideoCapture(camera)
    try:
        while True:
            img_available, img = cam.read()


            if img_available==True:
                # convert the image to grayscale
                img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                img = cv2.normalize(img,img,0,255,cv2.NORM_MINMAX)
                img = cv2.addWeighted( img, 1.5, np.zeros(img.shape, img.dtype), 0, 0)

                
                if detect_region:
                    box = detect_qr(img)
                    # draw a bounding box arounded the detected QR code 
                    # cv2.drawContours(img, [box], -1, (0, 255, 0), 3)

                    # Set area outside region of interest to white
                    mask = np.zeros_like(img) # Create mask where white is what we want, black otherwise
                    for bx in box:
                        cv2.fillConvexPoly(mask, np.array(bx), 255)
                    out = 255*np.ones_like(img)
                    out[mask == 255] = img[mask == 255]
                    img = out


                if hsv:
                    img = cv2.normalize(img,img,0,255,cv2.NORM_MINMAX)
                    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)


                    # define range of bw color in HSV
                    sat_limit = 40
                    lower_bw = np.array([0,0,0])
                    upper_bw = np.array([255,sat_limit,255])

                    # Threshold the HSV image to get only white colors
                    mask = cv2.inRange(img_hsv, lower_bw, upper_bw)
                    # Bitwise-AND mask and original image
                    img = cv2.bitwise_and(img,img, mask= mask)
                
                # Cast to BW image
                if bw:
                    # Up contrast
                    img = cv2.addWeighted(img, 3, np.zeros(img.shape, img.dtype), 0, 0)

                    # blur noise
                    img = cv2.bilateralFilter(img,5,75,75)

                    
                    # blur = cv2.GaussianBlur(img,(5,5),0)
                    # _, img = cv2.threshold(img,0,255,cv2.THRESH_OTSU)
                    _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
                    # _, img = cv2.threshold(img, img.mean()/2, 255, cv2.THRESH_TOZERO)
                    # _, img = cv2.threshold(img, 200, , cv2.THRESH_TOZERO_INV)



                decoded = decode(img)
                if len(decoded) > 0:
                    print(decoded)

                # Flip image if necessary
                if mirrorlr:
                    img = cv2.flip(img,1)
                if mirrorud:
                    img = cv2.flip(img,0)

                cv2.imshow('my cam', img)


            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
    finally:
        cam.release()
        cv2.destroyAllWindows()
# Remove small contours, removes noise
count = [x for x in count if cv2.contourArea(x) > 90]

# Get reference object dimensions
ref_object = count[0]
surroundings = cv2.minAreaRect(ref_object)
surroundings = cv2.boxPoints(surroundings)
surroundings = np.array(surroundings, dtype="int")
surroundings = perspective.order_points(surroundings)
(tl, tr, br, bl) = surroundings
pixel_dist = euclidean(tl, tr)
#Can adjust this number based on leftmost object size
dist_in_cm = 2.0
#Uses pixels for measurement
pixel_per_cm = pixel_dist / dist_in_cm

# Draw all contours that were counted
drawCoutour(count, pixel_per_cm, image)
#Check utlis for definition
show_images([image])

#Can extend for video capture eventually
cap = cv2.VideoCapture(0)

while True:
    ret, frame = cap.read()  #returns ret and the frame
    cv2.imshow('frame', frame)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
Ejemplo n.º 41
0
    def read_images(self, folder):
        i = 0
        images = []
        images_fail = []
        for file in os.listdir(folder):
            if re.search(r'.*_left', file) == None:
                continue

            image1 = cv2.imread(folder + "/" + file)
            if image1 is None:
                break

            file_right = re.sub(r'_left', '_right', file)
            image2 = cv2.imread(folder + "/" + file_right)
            if image2 is None:
                break

            gray_l = cv2.extractChannel(image1, 1)
            gray_r = cv2.extractChannel(image2, 1)

            # Find the chess board corners
            ret_l, corners_l = cv2.findChessboardCorners(
                gray_l, (8, 6), None, cv2.CALIB_CB_ADAPTIVE_THRESH)
            if not ret_l:
                print("left fail")
                images_fail.append(file)
                continue
            ret_r, corners_r = cv2.findChessboardCorners(
                gray_r, (8, 6), None, cv2.CALIB_CB_ADAPTIVE_THRESH)
            if not ret_r:
                print("right fail")
                images_fail.append(file)
                continue

            images.append(file)
            if ret_l and ret_r:
                # If found, add object points, image points (after refining them)
                self.objpoints.append(self.objp)

                rt = cv2.cornerSubPix(gray_l, corners_l, (11, 11), (-1, -1),
                                      self.criteria)
                self.imgpoints_l.append(corners_l)

                # Draw and display the corners
                cv2.drawChessboardCorners(gray_l, (8, 6), corners_l, ret_l)

                rt = cv2.cornerSubPix(gray_r, corners_r, (11, 11), (-1, -1),
                                      self.criteria)
                self.imgpoints_r.append(corners_r)

                # Draw and display the corners
                cv2.drawChessboardCorners(gray_r, (8, 6), corners_r, ret_r)

                cv2.imshow("Image Left", gray_l)
                cv2.imshow("Image Right", gray_r)

            key = cv2.waitKey(1)
            if key == ord('q'):
                break
            if key == ord('a'):
                return

        img_shape = gray_r.shape
        self.shape = img_shape

        print(f"Fails: {images_fail}", file=sys.stderr)

        print("Starting camera calibration", file=sys.stderr)
        flags = 0
        # flags |= cv2.CALIB_FIX_INTRINSIC
        # flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
        # flags |= cv2.CALIB_USE_INTRINSIC_GUESS
        # flags |= cv2.CALIB_FIX_FOCAL_LENGTH
        # flags |= cv2.CALIB_FIX_ASPECT_RATIO
        # flags |= cv2.CALIB_ZERO_TANGENT_DIST
        # flags |= cv2.CALIB_RATIONAL_MODEL
        # flags |= cv2.CALIB_SAME_FOCAL_LENGTH
        #flags |= cv2.CALIB_FIX_K3
        #flags |= cv2.CALIB_FIX_K4
        #flags |= cv2.CALIB_FIX_K5
        #flags |= cv2.CALIB_FIX_K6

        rt, self.M1, self.d1, self.r1, self.t1, sdi, sde, pve = cv2.calibrateCameraExtended(
            self.objpoints, self.imgpoints_l, img_shape, None, None)
        print("Reprojection error left: " + str(rt), file=sys.stderr)
        j = 0
        for image in images:
            print(f"{image}: {pve[j,0]}", file=sys.stderr)
            j += 1
        rt, self.M2, self.d2, self.r2, self.t2, sid, sde, pve = cv2.calibrateCameraExtended(
            self.objpoints, self.imgpoints_r, img_shape, None, None)
        print("Reprojection error right: " + str(rt), file=sys.stderr)
        j = 0
        for image in images:
            print(f"{image}: {pve[j,0]}", file=sys.stderr)
            j += 1

        print("Starting stereo camrea calibration", file=sys.stderr)
        self.camera_model = self.stereo_calibrate(img_shape)
Ejemplo n.º 42
0
def StationExitView(request, station_no):
    if request.method == 'POST':
        try:
            entry = request.POST.get("first")
            entry = int(entry)
            return redirect('core:station_entry', entry)
        except:
            pass

        try:
            exit = request.POST.get("second")
            exit = int(exit)
            return redirect('core:station_exit', exit)
        except:
            pass
    station = models.Station.objects.get(station_no=station_no)
    pics = list(models.Person.objects.all().values_list('pic', flat=True))
    known_face_names = list(models.Person.objects.all().values_list('name', flat=True))
    uids = list(models.Person.objects.all().values_list('uid', flat=True))
    known_face_encodings = []
    for pic in pics:
        # print(pic)
        image = face_recognition.load_image_file(os.path.join(base_dir, str(pic)))
        image_encoding = face_recognition.face_encodings(image)[0]
        known_face_encodings.append(image_encoding)

    i = 0

    video_capture = cv2.VideoCapture(0)
    face_locations = []
    face_encodings = []
    face_names = []
    face_uids = []
    process_this_frame = True

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()

        try:
            # Resize frame of video to 1/4 size for faster face recognition processing
            small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

            # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
            rgb_small_frame = small_frame[:, :, ::-1]

            # Only process every other frame of video to save time
            if process_this_frame:
                # Find all the faces and face encodings in the current frame of video
                face_locations = face_recognition.face_locations(rgb_small_frame)
                face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)

                face_names = []
                face_uids = []
                for face_encoding in face_encodings:
                    # See if the face is a match for the known face(s)
                    matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=0.5)
                    name = "Unknown"
                    uid = -1
                    face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
                    best_match_index = np.argmin(face_distances)
                    if matches[best_match_index]:
                        name = known_face_names[best_match_index]
                        uid = uids[best_match_index]
                        person = models.Person.objects.get(name=name)
                        try:
                            log = models.Log.objects.get(person=person, status=0)
                            log.exit_station = station
                            log.exit_datetime = timezone.now()
                            log.status = 1
                            log.fare = 20
                            log.save()
                            try:
                                subject = 'Thanks for Travelling in Yugant Express'
                                message = person.name + ', thanks for travelling.\n From Station : {} \n Entry time : {} \n To station : {}\n Exit Time : {} \n Your Fare : {}'.format(
                                    log.entry_station, log.entry_datetime, log.exit_station, log.exit_datetime, log.fare)
                                from_email = settings.EMAIL_HOST_USER
                                to_email = [person.email]
                                email = EmailMessage(subject=subject, from_email=from_email, to=to_email, body=message)
                                email.send()
                                # print('email sent')
                            except:
                                # print('failed to send email')
                                pass
                        except:
                            # print('Chada hi nhi tha... cheating krta h yeh !! fine lo')
                            pass
                    # print(name)
                    face_names.append(name)
                    face_uids.append(uid)
            i += 1
            process_this_frame = not process_this_frame

            # Display the results
            for (top, right, bottom, left), name, uid in zip(face_locations, face_names, face_uids):
                # Scale back up face locations since the frame we detected in was scaled to 1/4 size
                top *= 4
                right *= 4
                bottom *= 4
                left *= 4

                # Draw a box around the face
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

                # Draw a label with a name below the face
                cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(frame, str(name + ':'+ str(uid)), (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
        except:
            # print("Cannot read frame")
            pass
        # Display the resulting image
        cv2.imshow('Video', frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()

    return render(request, 'core/index.html')
Ejemplo n.º 43
0
import cv2

cap=cv2.VideoCapture(0)
#0 for camera

while(cap.isOpened()):
	ret,frame=cap.read()
	#True and Frame

	if(ret==True):
		cv2.imshow('FrameTitle',frame) 

		print("WIDTH:HEIGHT:",cap.get(cv2.CAP_PROP_FRAME_WIDTH),cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
		if(cv2.waitKey(1)&0xFF==ord('q')):
			break
	else:
		print("Cannot Capture")
		break
else:
	print("Path is wrong")

#When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Ejemplo n.º 44
0
#c0ntours are used for shape analysis 
import numpy as np
import cv2


img = cv2.imread('opencv-logo.png')
img1 = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)


ret ,thresh = cv2.threshold(img1,127,255,0)
contours ,hirechy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
print("NO of Countours" + str(len(contours)))
print(contours[0])

cv2.drawContours(img,contours,-1,(0,255,0),3)


cv2.imshow('Image',img)
cv2.imshow('Image GRAY',img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
Ejemplo n.º 45
0
webcam.start()

while True:
    # get image from webcam
    img = webcam.get_current_frame()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # adapted to inner corners
    ret, corners = cv2.findChessboardCorners(gray, (NROW, NCOL), None)

    if ret == True:
        print('I find you')
        cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
        # Find the rotation and translation vectors.
        try:
            rvecs, tvecs, _ = cv2.solvePnPRansac(objp, corners, mtx, dist)
        except:
            _, rvecs, tvecs = cv2.solvePnP(objp, corners, mtx, dist)

        # project 3D points to image plane
        imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
        img = drawcube(img, corners, imgpts)
        if img is not None:
            cv2.imshow('img', img)
            cv2.waitKey(10)
        else:
            print('but I cannot display!')
    else:
        cv2.imshow('img', img)
        cv2.waitKey(10)

Ejemplo n.º 46
0
    ret, frame = video_capture.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    k = cv2.waitKey(1)
    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.5,
        minNeighbors=5,
        minSize=(30, 30),
        flags=cv2.CASCADE_SCALE_IMAGE
    )

    # Draw a rectangle around the faces
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

    # Display the resulting frame
    cv2.imshow('FaceDetection', frame)

    if k % 256 == 27:  # ESC Pressed
        break
    elif k % 256 == 32:
        # SPACE pressed
        img_name = "facedetect_webcam_{}.png".format(img_counter)
        cv2.imwrite(img_name, frame)
        print("{} written!".format(img_name))
        img_counter += 1

# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
Ejemplo n.º 47
0
            cv2.line(img, (puntos[i%L], puntos[(i+1)%L]), (puntos[(i+2)%L], puntos[(i+3)%L]), (0, 255, 0))
    cv2.imshow('puntos', img)

def Callback(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN:
        puntos.append(x)
        puntos.append(y)
        DibujarPuntos()
        print(f'({x}, {y})')

def MostrarPuntos():
    print(puntos)

cv2.namedWindow('puntos')
cv2.setMouseCallback('puntos', Callback)
cv2.imshow('puntos', img_original)

key = chr(cv2.waitKey(0))
while key != 'q':
    if key == 'm':
        MostrarPuntos()
    elif key == 'r':
        puntos = []
        cv2.imshow('puntos', img_original)
    elif key == 'd':
        if len(puntos) > 0:
            puntos = puntos[:-2]
            DibujarPuntos()

    key = chr(cv2.waitKey(0))
Ejemplo n.º 48
0
import cv2
capture = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
capture.set(cv2.CAP_PROP_FRAME_WIDTH,  3840)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)

writer_1 = cv2.VideoWriter('3840x1080_cam12.mp4', fourcc, 30.0, (3840,1080))

while True:
	ret, frame = capture.read()
	image = frame.copy()
	image_1 = image[0:1080,	0:1920]
	image_2 = image[0:1080, 1920:3840]

	writer_1.write(image)

	image_1 = cv2.resize(image_1, (960, 540))
	image_2 = cv2.resize(image_2, (960, 540))

	cv2.imshow("video1", image_1)
	cv2.imshow("video2", image_2)

	if cv2.waitKey(1) > 0: break

capture.release()
cv2.destroyAllWindows()
Ejemplo n.º 49
0
import  cv2
import numpy as np 


#载入并显示图片'
img=cv2.imread('./python/a.png')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_green = np.array([35, 43, 46])
upper_green = np.array([80, 255, 255])
mask = cv2.inRange(hsv, lower_green, upper_green)
res = cv2.bitwise_and(img, img, mask=mask)
cv2.imshow('img',res)
#灰度化
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#输出图像大小,方便根据图像大小调节minRadius和maxRadius
print(img.shape)
cv2.imshow('gray',gray)
 
th2 = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
            cv2.THRESH_BINARY,11,2)
cv2.imshow('binary', th2)
 
ret,thresh1 = cv2.threshold(gray,200,255,cv2.THRESH_BINARY)
# cv2.imshow('thresh1', thresh1)
 
canny = cv2.Canny(thresh1,40,80)
cv2.imshow('Canny', canny)
 
 
canny=cv2.blur(canny,(3,3))
cv2.imshow('blur',canny)
Ejemplo n.º 50
0
                default=True,
                help="turn on bounding box")
args = vars(ap.parse_args())

detector = yoloDetection(args["yolo"], args["input"], args["confidence"],
                         args["threshold"], args["bbox"])

detector.prepareModel('ayush')

imgPath = 'images/'
images = os.listdir(imgPath)
print(images)

# for image in images:
# 	img = cv2.imread(imgPath+image)

# 	out = detector.runInference(img)

# 	if args["bbox"] is True:
# 		cv2.imshow(''.format(image), out)
# 	else:
# 		print(out)

img = cv2.imread(imgPath + images[2])
out = detector.runInference(img)
if args["bbox"] is True:
    cv2.imshow('frame', out)
    cv2.waitKey()
else:
    print(out)
Ejemplo n.º 51
0
    
    # Extract edges
    canny_edges = cv2.Canny(img_gray_blur, 10, 70)
    
    # Do an invert binarize the image 
    ret, mask = cv2.threshold(canny_edges, 70, 255, cv2.THRESH_BINARY_INV)
    return mask


# Initialize webcam, cap is the object provided by VideoCapture
# It contains a boolean indicating if it was sucessful (ret)
# It also contains the images collected from the webcam (frame)
cap = cv2.VideoCapture(0)

while True:
    ret, frame = cap.read()
    cv2.imshow('Our Live Sketcher', sketch(frame))
    if cv2.waitKey(1) == 13: #13 is the Enter Key
        break
        
# Release camera and close windows
cap.release()
cv2.destroyAllWindows()      


# In[ ]:




    frame = imutils.resize(frame, width=400)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # detect faces in the grayscale frame
    rects = detector(gray, 0)

    # loop over the face detections
    for rect in rects:
        # determine the facial landmarks for the face region, then
        # convert the facial landmark (x, y)-coordinates to a NumPy
        # array
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)

        # loop over the (x, y)-coordinates for the facial landmarks
        # and draw them on the image
        for (x, y) in shape:
            cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)

    # show the frame
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF

    # if the `q` key was pressed, break from the loop
    if key == ord("q"):
        break

# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
Ejemplo n.º 53
0
import cv2
import glob

images = glob.glob("*.jpg")
for image in images:
    print(image)
    img = cv2.imread(image,0)
    resized_im = cv2.resize(img,(100,100))
    cv2.imshow("Hey",resized_im)
    cv2.imwrite("resized_"+image,resized_im)
    cv2.waitKey(500)
    cv2.destroyAllWindows()
Ejemplo n.º 54
0
    'ncols']  #*************     MENTION NO. OF COLS **********************
start_of_table = int(annotate_dict['page 1']['Start of Table'][1] * 1000 / h)

new_lst = list()
new_lst2 = list()

for x in new_crd:
    if colfilter(x, rgb, NO_OF_COLS, start_of_table) == int(NO_OF_COLS):
        new_lst.append(x)
    else:
        new_lst2.append(x)

#print(new_lst)
tmp3 = np.copy(rgb)
for crds in new_lst:
    x, y, x1, y1 = crds
    sub_image = tmp3[y - 2:y1 + 2, x - 2:x1 + 2]
    d = pytesseract.image_to_data(sub_image,
                                  output_type=Output.DICT,
                                  lang='eng',
                                  config='--psm 6')
    cv2.rectangle(tmp3, (x - 1, y - 1), (x1 + 1, y1 + 1), (0, 0, 255), 1)
    for t in d['text']:
        print(t, end='  ')
    print()
cv2.imshow("Img", tmp3)
cv2.waitKey()
cv2.imwrite('output.png', tmp3)

get_text(annotate_dict, np.copy(rgb), w, h)
Ejemplo n.º 55
0
import cv2
import ktb

k = ktb.Kinect()

while True:
    color_frame = k.get_frame()

    cv2.imshow('frame', color_frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
Ejemplo n.º 56
0
def visual(
    image_id,
    flag,
    pairs_info,
    score_HOI,
    score_interact,
    score_obj_box,
    score_per_box,
    score_REL,
    score_HOI_pair,
    ground_truth,
):
    start = 0
    for batch in range(len(image_id)):
        this_image = int(image_id[batch])
        a = helpers_pre.get_compact_detections(this_image, flag)
        person_bbxn = a['person_bbx']
        obj_bbxn = a['objects_bbx']
        this_batch_pers = int(pairs_info[batch][0])
        this_batch_objs = int(pairs_info[batch][1])
        increment = this_batch_pers * this_batch_objs
        ground_truth_this_batch = ground_truth[start:start + increment]
        score_HOI_this_batch = score_HOI[start:start + increment]
        start += increment
        if flag == 'train':

            cur_obj_path_s = OBJ_PATH_train_s \
                + 'COCO_train2014_%.12i.json' % this_image

            image_dir_s = image_dir_train + '/COCO_train2014_%.12i.jpg' \
                % this_image
        elif flag == 'test':

            cur_obj_path_s = OBJ_PATH_test_s \
                + 'COCO_val2014_%.12i.json' % this_image
            image_dir_s = image_dir_test + '/COCO_val2014_%.12i.jpg' \
                % this_image
        elif flag == 'val':
            cur_obj_path_s = OBJ_PATH_train_s \
                + 'COCO_train2014_%.12i.json' % this_image
            image_dir_s = image_dir_val + '/COCO_train2014_%.12i.jpg' \
                % this_image
        with open(cur_obj_path_s) as fp:
            detections = json.load(fp)
        img_H = detections['H']
        img_W = detections['W']
        person_bbx = np.array([img_W, img_H, img_W, img_H],
                              dtype=float) * person_bbxn
        obj_bbx = np.array([img_W, img_H, img_W, img_H], dtype=float) \
            * obj_bbxn
        img = cv2.imread(image_dir_s, 3)
        start_index = 0
        for person_box in person_bbx:
            for object_box in obj_bbx:
                ground_truth_this_sample = \
                    ground_truth_this_batch[start_index]
                score_HOI_this_sample = \
                    score_HOI_this_batch[start_index]
                print(score_HOI_this_sample)
                pred = [('GROUND_TRUTH', [
                    (ID2VERB[ind],
                     float('%.2f' % ground_truth_this_sample[ind]))
                    for ind in np.argsort(ground_truth_this_sample)[-5:][::-1]
                ])]
                pred.append(('TOTAL_PREDICTION', [
                    (ID2VERB[ind], float('%.2f' % score_HOI_this_sample[ind]))
                    for ind in np.argsort(score_HOI_this_sample)[-5:][::-1]
                ]))
                prediction = pd.DataFrame(pred, columns=['Name', 'Prediction'])

                img = cv2.imread(image_dir_s, 3)
                (x, y, w, h) = (int(person_box[0]), int(person_box[1]),
                                int(person_box[2] - person_box[0]),
                                int(person_box[3] - person_box[1]))
                cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)
                (x, y, w, h) = (int(object_box[0]), int(object_box[1]),
                                int(object_box[2] - object_box[0]),
                                int(object_box[3] - object_box[1]))
                cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 3)

                print(this_image)
                print('''Predictions (Five Highest Confidence Class):{}'''.
                      format(prediction))
                # if ground_truth_this_sample[VERB2ID['catch']] == 1:
                #     cv2.imwrite('/home/d9/Documents/VSGNet/new_test_loss_best/result/catch/'+'%.12i' % this_image + str(start_index) + '.jpg', img)
                #     prediction.to_csv('/home/d9/Documents/VSGNet/new_test_loss_best/result/catch/'+'%.12i' % this_image + str(start_index) + '.csv')
                cv2.imshow('image', img)
                start_index += 1
                k = cv2.waitKey(0)
                if k == 27:  # wait for ESC key to exit

                    cv2.destroyAllWindows()

            if k == 27:  # wait for ESC key to exit

                cv2.destroyAllWindows()
        if k == 27:  # wait for ESC key to exit

            cv2.destroyAllWindows()

    cv2.destroyAllWindows()
Ejemplo n.º 57
0
def detectPlatesInScene(imgOriginalScene):
    listOfPossiblePlates = []  # this will be the return value

    height, width, numChannels = imgOriginalScene.shape

    imgGrayscaleScene = np.zeros((height, width, 1), np.uint8)
    imgThreshScene = np.zeros((height, width, 1), np.uint8)
    imgContours = np.zeros((height, width, 3), np.uint8)

    cv2.destroyAllWindows()

    if Main.showSteps == True:  # show steps
        cv2.imshow("0", imgOriginalScene)
    # end if # show steps

    imgGrayscaleScene, imgThreshScene = Preprocess.preprocess(
        imgOriginalScene)  # preprocess to get grayscale and threshold images

    if Main.showSteps == True:  # show steps
        cv2.imshow("1a", imgGrayscaleScene)
        cv2.imshow("1b", imgThreshScene)
    # end if # show steps

    # find all possible chars in the scene,
    # this function first finds all contours, then only includes contours that could be chars (without comparison to other chars yet)
    listOfPossibleCharsInScene = findPossibleCharsInScene(imgThreshScene)

    if Main.showSteps == True:  # show steps
        print("step 2 - len(listOfPossibleCharsInScene) = " +
              str(len(listOfPossibleCharsInScene)))  # 131 with MCLRNF1 image

        imgContours = np.zeros((height, width, 3), np.uint8)

        contours = []

        for possibleChar in listOfPossibleCharsInScene:
            contours.append(possibleChar.contour)
        # end for

        cv2.drawContours(imgContours, contours, -1, Main.SCALAR_WHITE)
        cv2.imshow("2b", imgContours)
    # end if # show steps

    # given a list of all possible chars, find groups of matching chars
    # in the next steps each group of matching chars will attempt to be recognized as a plate
    listOfListsOfMatchingCharsInScene = DetectChars.findListOfListsOfMatchingChars(
        listOfPossibleCharsInScene)

    if Main.showSteps == True:  # show steps
        print("step 3 - listOfListsOfMatchingCharsInScene.Count = " + str(
            len(listOfListsOfMatchingCharsInScene)))  # 13 with MCLRNF1 image

        imgContours = np.zeros((height, width, 3), np.uint8)

        for listOfMatchingChars in listOfListsOfMatchingCharsInScene:
            intRandomBlue = random.randint(0, 255)
            intRandomGreen = random.randint(0, 255)
            intRandomRed = random.randint(0, 255)

            contours = []

            for matchingChar in listOfMatchingChars:
                contours.append(matchingChar.contour)
            # end for

            cv2.drawContours(imgContours, contours, -1,
                             (intRandomBlue, intRandomGreen, intRandomRed))
        # end for

        cv2.imshow("3", imgContours)
    # end if # show steps

    for listOfMatchingChars in listOfListsOfMatchingCharsInScene:  # for each group of matching chars
        possiblePlate = extractPlate(
            imgOriginalScene, listOfMatchingChars)  # attempt to extract plate

        if possiblePlate.imgPlate is not None:  # if plate was found
            listOfPossiblePlates.append(
                possiblePlate)  # add to list of possible plates
        # end if
    # end for

    print("\n" + str(len(listOfPossiblePlates)) +
          " possible plates found")  # 13 with MCLRNF1 image

    if Main.showSteps == True:  # show steps
        print("\n")
        cv2.imshow("4a", imgContours)

        for i in range(0, len(listOfPossiblePlates)):
            p2fRectPoints = cv2.boxPoints(
                listOfPossiblePlates[i].rrLocationOfPlateInScene)

            cv2.line(imgContours, tuple(p2fRectPoints[0]),
                     tuple(p2fRectPoints[1]), Main.SCALAR_RED, 2)
            cv2.line(imgContours, tuple(p2fRectPoints[1]),
                     tuple(p2fRectPoints[2]), Main.SCALAR_RED, 2)
            cv2.line(imgContours, tuple(p2fRectPoints[2]),
                     tuple(p2fRectPoints[3]), Main.SCALAR_RED, 2)
            cv2.line(imgContours, tuple(p2fRectPoints[3]),
                     tuple(p2fRectPoints[0]), Main.SCALAR_RED, 2)

            cv2.imshow("4a", imgContours)

            print("possible plate " + str(i) +
                  ", click on any image and press a key to continue . . .")

            cv2.imshow("4b", listOfPossiblePlates[i].imgPlate)
            cv2.waitKey(0)
        # end for

        print(
            "\nplate detection complete, click on any image and press a key to begin char recognition . . .\n"
        )
        cv2.waitKey(0)
    # end if # show steps

    return listOfPossiblePlates
Ejemplo n.º 58
0
import matplotlib.pyplot as plt
import cv2
import numpy as np

img = cv2.imread("caro.PNG", 0)

cv2.imshow("nome da janela", img)
cv2.waitKey(0)
cv2.destroyAllWindows()

cv2.imwrite("novo.png", img)
Ejemplo n.º 59
0
cap = cv2.VideoCapture(0)
count = 0

# Collect 100 samples of your face from webcam input
while True:

    ret, frame = cap.read()
    if face_extractor(frame) is not None:
        count += 1
        face = cv2.resize(face_extractor(frame), (200, 200))
        face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)

        # Save file in specified directory with unique name
        file_name_path = '/webapp/uploads/cyber' + str(count) + '.jpg'
        cv2.imwrite(file_name_path, face)

        # Put count on images and display live count
        cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
        cv2.imshow('Face Cropper', face)
        
    else:
        print("Face not found")
        pass

    if cv2.waitKey(1) == 13 or count == 100: #13 is the Enter Key
        break
        
cap.release()
cv2.destroyAllWindows()      
print("Collecting Samples Complete")
Ejemplo n.º 60
0
import cv2

img = cv2.imread('C://Users//47463//Desktop//2//cat.jpg')
print(img.shape[:2])

height, width = img.shape[:2]

reSize1 = cv2.resize(img, (2*width, 2*height), interpolation=cv2.INTER_CUBIC)
reSize2 = cv2.resize(img, (int(width/2), int(height/2)), interpolation=cv2.INTER_CUBIC)

cv2.imshow('reSize1', reSize1)
cv2.imshow('reSize2', reSize2)

cv2.waitKey()
cv2.destroyAllWindows()