def find_closest_auto(demofile, new_xyz):
    if args.parallel:
        from joblib import Parallel, delayed
    demo_clouds = [asarray(seg["cloud_xyz"]) for seg in demofile.values()]
    keys = demofile.keys()
    if args.parallel:
        costs = Parallel(n_jobs=3,verbose=100)(delayed(registration_cost)(demo_cloud, new_xyz) for demo_cloud in demo_clouds)
    else:
        costs = []
        for (i,ds_cloud) in enumerate(demo_clouds):
            costs.append(registration_cost(ds_cloud, new_xyz))
            print "completed %i/%i"%(i+1, len(demo_clouds))
    
    print "costs\n",costs
    if args.show_neighbors:
        nshow = min(5, len(keys))
        import cv2, rapprentice.cv_plot_utils as cpu
        sortinds = np.argsort(costs)[:nshow]
        near_rgbs = [asarray(demofile[keys[i]]["rgb"]) for i in sortinds]
        bigimg = cpu.tile_images(near_rgbs, 1, nshow)
        cv2.imshow("neighbors", bigimg)
        print "press any key to continue"
        cv2.waitKey()
        
    ibest = np.argmin(costs)
    return keys[ibest]
Example #2
0
 def image_core(self):
   #time1 = time.time()
   val,im = self.vid.read()
   #cv2.imshow("image2",im)
   posX,posY=0,0
   if val:
       im2=self.image_filter(im)
       #r,im1=cv2.threshold(im2,90,255,1)
       contours,hierarchy = cv2.findContours(im2,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
       print contours
       for h,cnt in enumerate(contours):
          area = cv2.contourArea(cnt)#error in opencv think of changing version to 2.4.2 (dang) suggest using linux
          if area > 1000:
           posX = int((cv2.moments(cnt)['m10']) / (cv2.moments(cnt)['m00']))
           posY = int((cv2.moments(cnt)['m01']) / (cv2.moments(cnt)['m00']))
           '''moments = cv2.moments(cnt)
           moment00 = moments['m00']
           moment10=moments['m10']
           moment01=moments['m01']
           posX = int(moment10/moment00)
           posY = int(moment01/moment00)'''
           cv2.circle(im,(int((posX)),int((posY))),40,(0,0,255),2,1)
           cv2.circle(im,(int((posX+5)),int((posY+5))),40,(0,0,255),2,1)
           cv2.circle(im,(int((posX-5)),int((posY-5))),40,(0,0,255),2,1)
           cv2.circle(im,(int((posX+5)),int((posY-5))),40,(0,0,255),2,1)
           cv2.circle(im,(int((posX-5)),int((posY+5))),40,(0,0,255),2,1)
          else:
           posX,posY=0,0
       im1=cv.fromarray(im)
       #cv2.imshow("image1",im)
       cv2.waitKey(10)
       #time2 = time.time()
       #print ((time2-time1)*1000.0)
       return im1,posX,posY
Example #3
0
	def captureImage(self):
		position = [0, 0]
		velocity = [0, 0]

		frame = self.cap.read()[1]

		if frame != None:
			frame = cv2.flip(frame, 1)

			self.frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

			threshold_mask = self.createMultipleThresholds(self.frame_hsv)


			contour = self.getLargestContour(threshold_mask)
			if type(contour) != int:
				cv2.drawContours(frame, contour, -1, (0, 255, 255), 2)
				position = self.getContourMoment(contour)
				cv2.circle(frame, (position[0], position[1]), 5, (0,0,255), -1)

			# calculate velocity
			velocity = [position[0] - self.oldPosition[0], position[1] - self.oldPosition[1]]
			# print velocity

			cv2.imshow("Frame", frame)
			cv2.waitKey(10)

		self.oldPosition = position
		return [position, velocity]
def find_hottest_points(cv_image):
  
  clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(3,3))
  #gray = clahe.apply(img)
  gray = clahe.apply(cv_image)
  gray = cv2.GaussianBlur (gray, (21,21), 0)

  min_thresh = cv2.threshold(gray, min_th, 255, cv2.THRESH_BINARY)[1]
  max_thresh = cv2.threshold(gray, max_th, 255, cv2.THRESH_BINARY_INV)[1]

  thresh = cv2.bitwise_and(min_thresh, max_thresh)

  thresh = cv2.dilate(thresh, None, iterations = 2)
  (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
    cv2.CHAIN_APPROX_SIMPLE)

  for c in cnts:
    if cv2.contourArea(c) > min_area and cv2.contourArea(c) < max_area:
      
      (x,y,w,h) = cv2.boundingRect(c)
#      cv2.rectangle(cv_image, (x, y), (x+w, y+h), (0, 255, 0), 2)
      cv2.rectangle(cv_image, (x, y), (x+w, y+h), 0, 2)
      continue


  cv2.imshow("region_detector", cv_image)
  cv2.moveWindow("region_detector",900,0)
  cv2.imshow("band_threshold_image", thresh)
  cv2.moveWindow("band_threshold_image",900,400)
  cv2.waitKey(1)
Example #5
0
def main():

    for fname in glob("left/*/*/ein/sceneModel/model.yml"):
        print fname

        f = open(fname) 

        lines = []
        # ignore the %YAML:1.0, because the python parser doesn't handle 1.0.
        f.readline() 

        for line in f:
            # for some reason the python parser doesn't like this line either.
            if "background_pose" in line:
                continue
            lines.append(line)
        data = "\n".join(lines)
        
        ymlobject = yaml.load(data)
        #print ymlobject
        scene = ymlobject["Scene"]
        observed_map = GaussianMap.fromYaml(scene["observed_map"])
        image = observed_map.toImage()
        cv2.imwrite("observed.png", image)
        cv2.imshow("observed map", image)
        
        
        
        
        print "observed map: ", observed_map.width, "x", observed_map.height
        dimage = readMatFromYaml(scene["discrepancy_magnitude"])
        cv2.imshow("discrepancy magnitude", dimage)

        cv2.waitKey(0)
        cv2.destroyAllWindows()
def main(argv):
    args = str(sys.argv[1])
    hogParams = {'hitThreshold': -.5, 'scale': 1.05}
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

    video = cv2.VideoCapture(args)

    ret, frame = video.read()

    while(ret):

        cimg = np.copy(frame)
        people, w = hog.detectMultiScale(frame, **hogParams)
        filetered = []
        for ri, r in enumerate(people):
            for qi, q in enumerate(people):
                if ri != qi and inside(r, q):
                    print "break"
                    break
            else:
                filetered.append(r)
        # draw_detections(frame, people)
        draw_detections(cimg, filetered, 1)
        cv2.imshow('detected people', cimg)
        cv2.waitKey(2)

        ret, frame = video.read()

    cv2.destroyAllWindows()
    video.release()
Example #7
0
def edge_detect(img):
    BLUR_SIZE = 51
    TRUNC_RATIO = 0.75
    CLOSING_SIZE = 5

    # denoised = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
    # img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    # too_bright=np.logical_and(img[:,:,1]<50, img[:,:,2]>200)
    # np.set_printoptions(threshold=np.nan)
    # np.savetxt('conconcon',img[:,:,1],'%i')
    # img[:,:,1]=np.where(too_bright, np.sqrt(img[:,:,1])+70, img[:,:,1])
    # img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    blur = cv2.blur(gray, (BLUR_SIZE, BLUR_SIZE))

    edge = np.floor(0.5 * gray + 0.5 * (255 - blur)).astype('uint8')

    hist,bins = np.histogram(edge.flatten(), 256, [0, 256])
    cdf = hist.cumsum()
    cdf_normalized = cdf * hist.max() / cdf.max()
    cdf_m = np.ma.masked_equal(cdf, 0)
    cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min())
    cdf = np.ma.filled(cdf_m, 0).astype('uint8')
    equ = cdf[edge]

    hist,bins = np.histogram(equ.flatten(),256,[0,256])
    max_idx = np.argmax(hist);
    hist_clean = np.where(equ > TRUNC_RATIO * max_idx, 255, equ)

    kernel = np.ones((CLOSING_SIZE, CLOSING_SIZE), np.uint8)
    closing = cv2.morphologyEx(hist_clean, cv2.MORPH_CLOSE, kernel)
    plt.imshow(closing, cmap='Greys_r')
    plt.show()
    cv2.waitKey(100)
Example #8
0
    def draw_motion(self, im=None, draw_outliers=False):

        show_image = False

        if im is None:
            im = self.result_image
            show_image = True

        if draw_outliers:
            print("points shape: " + str(self.prev_points.shape[0]) + " outliers " +
                  str(self.outliers.shape))

        for i in range(0, self.prev_points.shape[0]):
            prev_pt = self.prev_points[i]
            next_pt = self.next_points[i]

            color = np.array([0, 255, 0])

            if draw_outliers:
                id = 2*i
                if (self.outliers[id] or self.outliers[id+1]) and draw_outliers:
                    color = np.array([0, 0, 255])
                else:
                    color = np.array([0, 255, 0])

            cv2.circle(im, (prev_pt[0], prev_pt[1]), 2, color, -1)
            cv2.line(im, (prev_pt[0], prev_pt[1]), (next_pt[0], next_pt[1]), np.array([255, 0, 0]), 1)

        if show_image:
            cv2.imshow("", im)
            cv2.waitKey(0)

        return im
Example #9
0
def main():
	# version 3.0.0
	# version 2.4.11
	print cv2.__version__

	imgPlate = cv2.imread('plate_judge.jpg',cv2.IMREAD_COLOR)

	PlateLocater.m_debug = False
	Result = PlateLocater.fuzzyLocate(imgPlate)

	print type(Result)
	print '候选车牌数量:',len(Result)
	print Result[0].shape

	platesJudge(Result)

	# imgGray = cv2.cvtColor(imgPlate,cv2.COLOR_BGR2GRAY)
	# cv2.imshow('src',imgGray)
	# imgEqulhist = cv2.equalizeHist(imgGray)
	# cv2.imshow('equal',imgEqulhist)
	cv2.waitKey(0)
	cv2.destroyAllWindows()


	# box = cv2.boxPoints(mr)  # if you are use opencv 3.0.0
	# box = cv2.cv.boxPoints(mr) # if your are using opencv 2.4.11

	# svm 参考
	# http://answers.opencv.org/question/5713/save-svm-in-python/
	#

	# 遇到的问题
	# http://answers.opencv.org/question/55152/unable-to-find-knearest-and-svm-functions-in-cv2/

	return None
Example #10
0
def erode(imageName,iter):
    """Takes image name of image and erodes it"""
    image = cv2.imread(imageName)
    dilated = cv2.erode(image,None, iterations=iter)
    cv2.imshow("OUTPUT", dilated)
    cv2.imshow("INPUT", image)
    cv2.waitKey(0)
Example #11
0
def verb_showfilters(argv):
	"""Dump source code of session"""
	
	f  = KITNNFile(argv[2])
	s  = f.getSession(argv[3]).d["snap/1"]
	
	fine   = s["data/2" ][...]
	medium = s["data/8" ][...]
	coarse = s["data/14"][...]
	
	w  = 3+(16*7+15*3)+3
	h  = 3+( 9*7+ 8*3)+3
	
	img= np.zeros((h,w,3), dtype="uint8")
	
	for i in xrange(9):
		for j in xrange(16):
			n     = i*16+j
			if i in [0,1,2]:
				card  = fine  [n- 0]
			elif i in [3,4,5]:
				card  = medium[n-48]
			elif i in [6,7,8]:
				card  = coarse[n-96]
			card -= np.min(card)
			card /= np.max(card)
			card  = card.transpose(1,2,0)
			
			img[3+i*10:3+i*10+7, 3+j*10:3+j*10+7] = 255*card
	
	img = cv2.resize(img, (0,0), None, 8, 8, cv2.INTER_NEAREST)
	cv2.imshow("Filters", img)
	cv2.imwrite("Filters.png", img)
	cv2.waitKey()
def get_images_and_labels(path):
    # Append all the absolute image paths in a list image_paths
    # We will not read the image with the .sad (the sad face) extension in the training set
    # Rather, we will use them to test our accuracy of the training
    image_paths = [os.path.join(path, f) for f in os.listdir(path)]
    # images will contains face images
    images = []
    # labels will contains the label that is assigned to the image
    labels = []
    for image_path in image_paths:
        # Read the image and convert to grayscale
        image_pil = Image.open(image_path).convert('L')
        # Convert the image format into numpy array
        image = np.array(image_pil, 'uint8')
        # Get the label of the image
        nbr = int(os.path.split(image_path)[1].split(".")[0].replace("subject",""))
        # Detect the face in the image
        faces = faceCascade.detectMultiScale(frame,
    										scaleFactor=1.5,
    										minNeighbors=6,
    										minSize=(30, 30),
    										flags = cv2.cv.CV_HAAR_SCALE_IMAGE)
        # If face is detected, append the face to images and the label to labels
        for (x, y, w, h) in faces:
            images.append(image[y: y + h, x: x + w])
            labels.append(nbr)
            cv2.imshow("Adding faces to traning set...", image[y: y + h, x: x + w])
            cv2.waitKey(50)
    # return the images list and labels list
    return images, labels
Example #13
0
def coolBlack():
    IMAGE_WEIGHT = 0.5

    image = cv2.imread("G:/Filters/wasim.jpg",0)
    black = cv2.imread("G:/Filters/black5.jpg",0)
    black = cv2.resize(black, image.shape[::-1])

    res1 = cv2.addWeighted(image, IMAGE_WEIGHT, black, 1 - IMAGE_WEIGHT, 1)


    #NORMALIZE IMAGES
    image = np.float32(image)
    black = np.float32(black)

    image /= 255
    black /= 200

    res = image*black

    cv2.imshow("RES", res)
    cv2.waitKey(0)

    fname = "G:/Filtes/temp.jpg"
    cv2.imwrite(fname, res)
    res = cv2.imread(fname, 0)

    cv2.imshow("BLACK", res)
    cv2.waitKey(0)
Example #14
0
    def object_detection(self, depth_array):
        """
        Function to detect objects from the depth image given
        :return:
        """
        self.detect_arm()

        # Perform thresholding on the image to remove all objects behind a plain
        ret, bin_img = cv2.threshold(depth_array, 0.3, 1, cv2.THRESH_BINARY_INV)

        # Erode the image a few times in order to separate close objects
        element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        err_img = cv2.erode(bin_img, element, iterations=20)

        # Create a new array of type uint8 for the findContours function
        con_img = np.array(err_img, dtype=np.uint8)

        # Find the contours of the image and then draw them on
        contours, hierarchy = cv2.findContours(con_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        cv2.drawContours(con_img, contours, -1, (128, 255, 0), 3)

        for x in range(0, len(contours)):
            x, y, w, h = cv2.boundingRect(contours[x])
            cv2.rectangle(con_img, (x, y), ((x+w), (y+h)), (255, 0, 127), thickness=5, lineType=8, shift=0)

        # Show the colour images of the objects
        # self.show_colour(contours)

        # Show the Depth image and objects images
        cv2.imshow('Contours', con_img)
        cv2.imshow("Depth", bin_img)
        cv2.waitKey(3)
Example #15
0
    def show_colour(self, cnt):
        """
        Use the objects found to show them in colour
        :return:
        """
        # Go through each rectangle and display the rgb
        length = len(cnt)

        # Create an array of size the amount of rectangles
        crop_rgb = []
        for i in range(0, length):
            crop_rgb.append(1)

        for x in range(0, length):
            x, y, w, h = cv2.boundingRect(cnt[x])

            # Try to crop the rgb image for each box
            try:
                crop_rgb[x] = self.rgb_img[y:y+h, x:x+w]
            except:
                pass

        for x in range(0, length):
            name = "Cropped " + str(x)
            cv2.imshow(name, crop_rgb[x])
        cv2.waitKey(3)
 def CLQFilterDemo(self, img):
     # Constrained least square filter
     img = np.int16(img)
     noise = self.noiseGenerator.GuassNoise(img, 0, 10, np.int32(img.size))
     nImg = img + noise
     H = self.GenerateHDemo(img.shape)
     fImg = np.fft.fftshift(np.fft.fft2(img))
     gImg = np.fft.ifft2(np.fft.ifftshift(fImg * H))
     gImg += noise
     fgImg = np.fft.fftshift(np.fft.fft2(gImg))
     gamma = 0.3
     l = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])
     p = np.zeros(img.shape)
     p[0:3, 0:3] = l
     P = np.fft.fftshift(np.fft.fft2(p))
     N = 512 * 512 * (np.std(noise) ** 2.0 - np.mean(noise) ** 2.0)
     a = 0.1
     ggImg = self.CLSFilterOptimal(fgImg, gamma, H, P, N, a)
     #ggImg = self.CLSFilter(fgImg, gamma, H, P)
     cv2.namedWindow("orig")
     cv2.imshow("orig", np.uint8(img))
     cv2.namedWindow("g")
     cv2.imshow("g", np.uint8(gImg))
     cv2.namedWindow("CLQ filter restore")
     cv2.imshow("CLQ filter restore", np.uint8(ggImg))
     cv2.waitKey(0)
Example #17
0
def dewarp(imagedir):
    # Loading from json file
    C = CameraParams.fromfile(os.path.join(imagedir, "params.json"))
    K = C.K
    D = C.D
    print("Loaded camera parameters from " + os.path.join(imagedir, "params.json"))

    for f in file_list(imagedir, ['jpg', 'jpeg', 'png']):
        print(f)
        colour = cv2.imread(f)
        grey = cv2.cvtColor(colour, cv2.COLOR_BGR2GRAY)

        h, w = grey.shape[:2]
        newcameramtx, roi=cv2.getOptimalNewCameraMatrix(K, D, (w,h), 1, (w,h))
        mapx, mapy = cv2.initUndistortRectifyMap(K, D, None, newcameramtx, (w,h), 5)
        dewarped = cv2.remap(grey, mapx, mapy, cv2.INTER_LINEAR)

        x, y, w, h = roi
        dewarped = dewarped[y:y+h, x:x+w]
        grey = cv2.resize(grey, (0,0), fx=0.5, fy=0.5) 
        dewarped = cv2.resize(dewarped, (0,0), fx=0.5, fy=0.5) 

        cv2.imshow("Original", grey )
        cv2.imshow("Dewarped", dewarped)
        cv2.waitKey(-1)
Example #18
0
def realisticTexturemap(H_G_M, scale):
    map_img = cv2.imread('Images/ITUMap.bmp')
    point = getMousePointsForImageWithParameter(map_img, 1)[0]

    texture = cv2.imread('Images/ITULogo.jpg')
    #texture = cv2.cvtColor(texture,cv2.COLOR_BGR2GRAY)
    H_T_M = np.zeros(9).reshape(3,3)
    H_T_M[0][0] = scale
    H_T_M[1][1] = scale

    H_T_M[0][2] = point[0]
    H_T_M[1][2] = point[1]

    H_T_M[2][2] = 1

    H_M_G = np.linalg.inv(H_G_M)

    H_T_G = np.dot(H_M_G, H_T_M)

    fn = "GroundFloorData/sunclipds.avi"
    cap = cv2.VideoCapture(fn)
    #load Tracking data
    running, frame = cap.read()
    while running:
        running, frame = cap.read()
        h,w,d = frame.shape

        warped_texture = cv2.warpPerspective(texture, H_T_G,(w, h))

        result = cv2.addWeighted(frame, .6, warped_texture, .4, 50)

        cv2.imshow("Result", result)
        cv2.waitKey(0)
Example #19
0
def textureMapGroundFloor():
    #create H_T_G from first frame of sequence
    texture = cv2.imread('Images/ITULogo.jpg')

    fn = "GroundFloorData/sunclipds.avi"
    sequence = cv2.VideoCapture(fn)
    running, frame = sequence.read()

    h_t_g, calibration_points = SIGBTools.getHomographyFromMouse(texture, frame, -4)
    print h_t_g
    #fig = figure()
    while running:
        running, frame = sequence.read()

        if not running:
            return

        #texture map
        h,w,d = frame.shape
        warped_texture = cv2.warpPerspective(texture, h_t_g,(w, h))
        result = cv2.addWeighted(frame, .7, warped_texture, .3, 50)

        #display
        cv2.imshow("Texture Mapping", result)
        cv2.waitKey(1)
Example #20
0
def locate_thumbnail(thumbnail_filename, source_filename, display=False, save_visualization=False,
                     save_reconstruction=False, reconstruction_format="jpg"):
    thumbnail_basename, thumbnail_image = open_image(thumbnail_filename)
    source_basename, source_image = open_image(source_filename)

    logging.info("Attempting to locate %s within %s", thumbnail_filename, source_filename)
    kp_pairs = match_images(thumbnail_image, source_image)

    if len(kp_pairs) >= 4:
        title = "Found %d matches" % len(kp_pairs)
        logging.info(title)

        H, mask = find_homography(kp_pairs)

        new_thumbnail, corners, rotation = reconstruct_thumbnail(thumbnail_image, source_image, kp_pairs, H)

        print(json.dumps({
            "master": {
                "source": source_filename,
                "dimensions": {
                    "height": source_image.shape[0],
                    "width": source_image.shape[1],
                }
            },
            "thumbnail": {
                "source": thumbnail_filename,
                "dimensions": {
                    "height": thumbnail_image.shape[0],
                    "width": thumbnail_image.shape[1],
                }
            },
            "bounding_box": {
                "height": corners[0][1] - corners[0][0],
                "width": corners[1][1] - corners[1][0],
                "x": corners[1][0],
                "y": corners[0][0],
            },
            "rotation_degrees": rotation
        }))

        if save_reconstruction:
            new_filename = "%s.reconstructed.%s" % (thumbnail_basename, reconstruction_format)
            cv2.imwrite(new_filename, new_thumbnail)
            logging.info("Saved reconstructed thumbnail %s", new_filename)
    else:
        logging.warning("Found only %d matches; skipping reconstruction", len(kp_pairs))
        new_thumbnail = corners = H = mask = None

    if display or save_visualization:
        vis_image = visualize_matches(source_image, thumbnail_image, new_thumbnail, corners, kp_pairs, mask)

    if save_visualization:
        vis_filename = "%s.visualized%s" % os.path.splitext(thumbnail_filename)
        cv2.imwrite(vis_filename, vis_image)
        logging.info("Saved match visualization %s", vis_filename)

    if display:
        cv2.imshow(title, vis_image)
        cv2.waitKey()
        cv2.destroyAllWindows()
Example #21
0
def calibrateSharpening():
    frame = cv2.imread("failed_frame_224.png")
    new_frame = sharpen(frame)
    found, _ = cv2.findChessboardCorners(new_frame, (9,6))
    print found
    cv2.imshow("sharpened", new_frame)
    cv2.waitKey(0)
def _generate_training_set(img, image_file):
    save_location = "images/training/"
    _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
    _, regions = cv2.connectedComponents(img, img)

    if not os.path.exists("../images/cc"):
        os.makedirs("../images/cc")

    cv2.imwrite("../images/cc/cc.png", regions)
    cc = cv2.imread("../images/cc/cc.png", 0)
    _, cc_vis = cv2.threshold(cc, 1, 255, cv2.THRESH_BINARY)

    _, contours, hierarchy = cv2.findContours(cc_vis, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    idx = 0
    for cnt in contours:
        area = cv2.contourArea(cnt)
        if area < 50 or area > 1000:
            continue
        if len(cnt) < 5:
            continue
        idx += 1
        x, y, w, h = cv2.boundingRect(cnt)
        roi = img[y: y + h, x: x + w]
        name = image_file.split('.')[0]
        inverted = (255 - roi)
        cv2.imwrite(save_location + name + str(idx) + '.jpg', inverted)
    cv2.waitKey(0)
Example #23
0
def camactivate ():

	with picamera.PiCamera() as camera:
		camera.resolution = (512,512)
		time.sleep(2)
		camera.capture('im1.jpg')
		time.sleep(2)
		camera.capture('im2.jpg')
		time.sleep(2)
		camera.capture('im3.jpg')
		time.sleep(2)
		camera.capture('im4.jpg')

	im1=cv2.imread('im1.jpg',1)
	im2=cv2.imread('im2.jpg',1)
	im3=cv2.imread('im3.jpg',1)
	im4=cv2.imread('im4.jpg',1)

	cv2.putText(im1,'Cam1',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
	cv2.putText(im2,'Cam2',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
	cv2.putText(im3,'Cam3',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
	cv2.putText(im4,'Cam4',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)


	cv2.namedWindow('Catenated Images',cv2.WINDOW_NORMAL)
	concat=np.zeros((1024,1024,3),np.uint8)
	concat[0:512,0:512,:]=im1
	concat[0:512,512:1024,:]=im2
	concat[512:1024,0:512,:]=im3
	concat[512:1024,512:1024,:]=im4

	cv2.imshow('Catenated Images',concat)
	cv2.imwrite('concat.jpg',concat)
	cv2.waitKey(0)
Example #24
0
def detect_face(MaybeImage):
  if MaybeImage.success:
    image = MaybeImage.result
  else:
    return MaybeImage

  # Make image grayscale for processing
  gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

  # Detect faces in the image
  # faces will be an iterable object
  faces = faceCascade.detectMultiScale(
      image=gray_image,
      scaleFactor=1.1,
      minNeighbors=5,
      minSize=(40, 40),
      flags = cv2.cv.CV_HAAR_SCALE_IMAGE
  )

  if len(faces) == 1:
    face = faces[0]
    return Maybe(True, face)

  else:
    # If we're run as main we can show a box around the faces.
    # Otherwise it's nicer if we just spit out an error message.
    if __name__ == '__main__':
      # Draw a box around the faces
      for (x, y, w, h) in faces:
        cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)

      cv2.imshow("{:d} Faces found. Remove other faces. Press any key to quit.".format(len(faces)) ,image)
      cv2.waitKey(0)
    return Maybe(False, "Expected 1 face, found {:d} faces. Please make sure your face is in frame, and remove any other things detected as a face from the frame.".format(len(faces)))
Example #25
0
 def display( self ):
     '''显示图片(须先analyze)'''
     if (self.test_image == None):
         raise Exception('The image is not tested')
     cv2.namedWindow("Image")   
     cv2.imshow("Image", self.test_image)   
     cv2.waitKey (0)  
Example #26
0
    def draw_walls(self):
        left_wall_points = np.array([self.transform(point) for point in self.left_wall_points])
        right_wall_points = np.array([self.transform(point) for point in self.right_wall_points])

        rect = cv2.minAreaRect(left_wall_points[:,:2].astype(np.float32))
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(self.grid, [box], 0, 128, -1)

        rect = cv2.minAreaRect(right_wall_points[:,:2].astype(np.float32))
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(self.grid, [box], 0, 128, -1)

        # So I dont have to comment abunch of stuff out for debugging
        dont_display = True
        if dont_display:
            return

        # Bob Ross it up (just for display)
        left_f, right_f = self.transform(self.left_f), self.transform(self.right_f)
        left_b, right_b = self.transform(self.left_b), self.transform(self.right_b)

        boat = self.transform(self.boat_pos)
        target = self.transform(self.target)

        cv2.circle(self.grid, tuple(boat[:2].astype(np.int32)), 8, 255)
        cv2.circle(self.grid, tuple(target[:2].astype(np.int32)), 15, 255)
        cv2.circle(self.grid, tuple(self.transform(self.mid_point)[:2].astype(np.int32)), 5, 255)
        cv2.circle(self.grid, tuple(left_f[:2].astype(np.int32)), 10, 255)
        cv2.circle(self.grid, tuple(right_f[:2].astype(np.int32)), 10, 255)
        cv2.circle(self.grid, tuple(left_b[:2].astype(np.int32)), 3, 125)
        cv2.circle(self.grid, tuple(right_b[:2].astype(np.int32)), 3, 128)
        cv2.imshow("test", self.grid)
        cv2.waitKey(0)
def listener():
    global fnobj
    rospy.init_node('reconocimiento', anonymous=True) 
    rospy.Subscriber("chatter", coordenadas, callback)
    
    rate = rospy.Rate(50) #hz
    cap = cv2.VideoCapture(0)
    fnobj = 'logo.png'
    opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
    opts = dict(opts)
    feature_name = opts.get('--feature', 'sift')

    while not rospy.is_shutdown():
  
        ret, frame = cap.read()
    	crop_img = frame[y:h,x:w]   
    	cv2.imwrite("full.png",crop_img)
	
	
        img1 = cv2.imread('full.png',0)
        img2 = cv2.imread(fnobj, 0)
        detector, matcher = init_feature(feature_name)
	if detector != None:
        	print 'usando', feature_name
        else:
            	print 'unknown feature:', feature_name
            	sys.exit(1)

        kp1, desc1 = detector.detectAndCompute(img1, None)
        kp2, desc2 = detector.detectAndCompute(img2, None)
        print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))
	
	match_and_draw('analisis', matcher,desc1,desc2,kp1,kp2,img1,img2)	
	cv2.waitKey(1)
Example #28
0
    def show_results(self, image, results, imshow=True, deteted_boxes_file=None,
                     detected_image_file=None):
        """Show the detection boxes"""
        img_cp = image.copy()
        if deteted_boxes_file:
            f = open(deteted_boxes_file, "w")
        #  draw boxes
        for i in range(len(results)):
            x = int(results[i][1])
            y = int(results[i][2])
            w = int(results[i][3]) // 2
            h = int(results[i][4]) // 2
            if self.verbose:
                print("   class: %s, [x, y, w, h]=[%d, %d, %d, %d], confidence=%f" % (results[i][0],
                            x, y, w, h, results[i][-1]))

                cv2.rectangle(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
                cv2.rectangle(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
                cv2.putText(img_cp, results[i][0] + ' : %.2f' % results[i][5], (x - w + 5, y - h - 7),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
            if deteted_boxes_file:
                f.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' +
                        str(w) + ',' + str(h)+',' + str(results[i][5]) + '\n')
        if imshow:
            cv2.imshow('YOLO_small detection', img_cp)
            cv2.waitKey(1)
        if detected_image_file:
            cv2.imwrite(detected_image_file, img_cp)
        if deteted_boxes_file:
            f.close()
def test_color_block_finder_01():
    '''
    色块识别测试样例1 从图片中读取并且识别
    '''
    # 图片路径
    img_path = "demo-pic.png"
    # 颜色阈值下界(HSV) lower boudnary
    lowerb = (96, 210, 85) 
    # 颜色阈值上界(HSV) upper boundary
    upperb = (114, 255, 231)

    # 读入素材图片 BGR
    img = cv2.imread(img_path, cv2.IMREAD_COLOR)
    # 检查图片是否读取成功
    if img is None:
        print("Error: 请检查图片文件路径")
        exit(1)

    # 识别色块 获取矩形区域数组
    rects = color_block_finder(img, lowerb, upperb)
    # 绘制色块的矩形区域
    canvas = draw_color_block_rect(img, rects)
    # 在HighGUI窗口 展示最终结果
    cv2.namedWindow('result', flags=cv2.WINDOW_NORMAL | cv2.WINDOW_FREERATIO)
    cv2.imshow('result', canvas)

    # 等待任意按键按下
    cv2.waitKey(0)
    # 关闭其他窗口
    cv2.destroyAllWindows()
Example #30
0
def main():
    imgOriginal = cv2.imread(r'C:\Users\dbsnail\ImageFolder\images.jpg')               # open image

    if imgOriginal is None:                             # if image was not read successfully
        print "error: image not read from file \n\n"        # print error message to std out
        os.system("pause")                                  # pause so user can see error message
        return                                              # and exit function (which exits program)
    
    imgGrayscale = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2GRAY)        # convert to grayscale

    imgBlurred = cv2.GaussianBlur(imgGrayscale, (5, 5), 0)              # blur
    
    imgCanny = cv2.Canny(imgBlurred, 100, 200)                          # get Canny edges

    cv2.namedWindow("imgOriginal", cv2.WINDOW_AUTOSIZE)        # create windows, use WINDOW_AUTOSIZE for a fixed window size
    cv2.namedWindow("imgCanny", cv2.WINDOW_AUTOSIZE)           # or use WINDOW_NORMAL to allow window resizing

    cv2.imshow("imgOriginal", imgOriginal)         # show windows
    cv2.imshow("imgCanny", imgCanny)

    cv2.waitKey()                               # hold windows open until user presses a key

    cv2.destroyAllWindows()                     # remove windows from memory

    return
            coords[i] = (x, y)
            i + 1
        # for index in range(len(coords2[e]):
        #    print(e,":",coords2, sep = "\n")

        #nose
        coords3 = np.zeros((68, 2), dtype="float")
        print(coords3)
        for f in range(27, 35):

            x = float(landmarks.part(f).x / width)
            y = float(landmarks.part(f).y / height)
            coords3[f] = (x, y)
            #for index in range(len(coords3[f]):
            #  print(f,":",coords3, sep = "\n")

        #mouth
        coords4 = np.zeros((68, 2), dtype="float")
        for g in range(48, 59):

            x = float(landmarks.part(g).x / width)
            y = float(landmarks.part(g).y / height)
            coords4[f] = (x, y)
            #for index in range(len(coords4[f]):
            #    print(g,":",coords4, sep = "\n")

elif nrFace <= 0:
    print("no faces found")
if cv2.waitKey(0):
    cv2.destroyAllWindows()
Example #32
0
import cv2

import time

fire_cascade = cv2.CascadeClassifier('fire_detection.xml')
#fire_detection.xml file & this code should be in the same folder while running the code

cap = cv2.VideoCapture(0)
while 1:
    #ser1.write('0')
    ret, img = cap.read()
    #cv2.imshow('imgorignal',img)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    fire = fire_cascade.detectMultiScale(img, 1.2, 5)
    for (x, y, w, h) in fire:
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
        roi_gray = gray[y:y + h, x:x + w]
        roi_color = img[y:y + h, x:x + w]
        print('Fire is detected..!')

        time.sleep(0.2)

    cv2.imshow('img', img)

    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break

cap.release()
cv2.destroyAllWindows()
Example #33
0

print("Predicting images...")

# load test images
test_img1 = cv2.imread("test-data/test1.JPG")
test_img2 = cv2.imread("test-data/test2.jpg")
test_img3 = cv2.imread("test-data/test3.JPG")

# perform a prediction
predicted_img1 = predict(test_img1)
predicted_img2 = predict(test_img2)
predicted_img3 = predict(test_img3)
print("Prediction complete")

# display both images
# cv2.imshow(subjects[1], predicted_img1)
# cv2.imshow(subjects[2], predicted_img2)

titles = [subjects[1], subjects[2], subjects[3]]
images = [predicted_img1, predicted_img2, predicted_img3]

for i in range(3):
    plt.subplot(3, 3, i + 1), plt.imshow(images[i], 'gray')
    plt.title(titles[i])
    plt.xticks([]), plt.yticks([])

plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #34
0
def superm2(image):
    mimage = np.fliplr(image)
    kp1, des1 = sift.detectAndCompute(image, None)
    kp2, des2 = sift.detectAndCompute(mimage, None)
    for p, mp in zip(kp1, kp2):
        p.angle = np.deg2rad(p.angle)
        mp.angle = np.deg2rad(mp.angle)
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    houghr = np.zeros(len(matches))
    houghth = np.zeros(len(matches))
    weights = np.zeros(len(matches))
    i = 0
    good = []
    for match, match2 in matches:
        point = kp1[match.queryIdx]
        mirpoint = kp2[match.trainIdx]
        mirpoint2 = kp2[match2.trainIdx]
        mirpoint2.angle = np.pi - mirpoint2.angle
        mirpoint.angle = np.pi - mirpoint.angle
        if mirpoint.angle < 0.0:
            mirpoint.angle += 2 * np.pi
        if mirpoint2.angle < 0.0:
            mirpoint2.angle += 2 * np.pi
        mirpoint.pt = (mimage.shape[1] - mirpoint.pt[0], mirpoint.pt[1])
        if very_close(point.pt, mirpoint.pt):
            mirpoint = mirpoint2
            good.append(match2)
        else:
            good.append(match)
        theta = angle_with_x_axis(point.pt, mirpoint.pt)
        xc, yc = midpoint(point.pt, mirpoint.pt)
        r = xc * np.cos(theta) + yc * np.sin(theta)
        Mij = reisfeld(point.angle, mirpoint.angle, theta) * S(
            point.size, mirpoint.size
        )
        houghr[i] = r
        houghth[i] = theta
        weights[i] = Mij
        i += 1
    # matches = sorted(matches, key = lambda x:x.distance)
    good = sorted(good, key=lambda x: x.distance)

    def draw(r, theta):
        if np.pi / 4 < theta < 3 * (np.pi / 4):
            for x in range(len(image.T)):
                y = int((r - x * np.cos(theta)) / np.sin(theta))
                if 0 <= y < len(image.T[x]):
                    image[y][x] = 255
        else:
            for y in range(len(image)):
                x = int((r - y * np.sin(theta)) / np.cos(theta))
                if 0 <= x < len(image[y]):
                    image[y][x] = 255
    print(houghr)
    print(houghth)
    img3 = cv2.drawMatches(image, kp1, mimage, kp2, good[:15], None, flags=2)

    def hex():
        polys = plt.hexbin(houghr, houghth, bins=200, gridsize=image.shape[1])
        # plt.colorbar()
        # plt.show()
        hvals = polys.get_array()
        hcoords = polys.get_offsets()
        return hcoords[hvals.argmax()]


    best_coords = hex()
    print(best_coords)
    # draw(2.8, 2.4)
    draw(*best_coords)
    cv2.imshow('a', image); cv2.waitKey(0);
Example #35
0
    
    if len(contours)>0:
        centroid=max(contours,key=cv2.contourArea) #Kontuar alanı belirleniyor.
        M=cv2.moments(centroid) 
        area=cv2.contourArea(centroid)
        if area>3000:
            cx=int(M['m10']/M['m00']) #x koordinatını ve y koordinatını buluyoruz.
            cy=int(M['m01']/M['m00'])

            cv2.line(image,(cx,0),(cx,720),(255,0,0),1) #bulunan bu koordinatlara göre görüntüde line çiziliyor.
            cv2.line(image,(0,cy),(1280,cy),(255,0,0),1)
            cv2.line(image,(320,0),(320,640),(255,0,0),1)
            cv2.circle(image,(cx,cy),3,(0,0,255),-1)
            cv2.drawContours(image,contours,-1,(0,255,0),2) #Kontuar alanı çiziliyor
            if cx<500:
                print("sag yap") #Referans değeri(x koordinatı) 500 den küçük ise sağa dön
            if cx>520:
                print("SOL YAP") #Referans değeri(y koordinatı) 520 den büyük ise sola dön
            if 500<cx<520:
                print("Duz gıt") #Referans değeri(x-y koordinatı) 500 ile 520 arasında ise düz git
    cv2.imshow('İşlenmiş Görüntü',image) #işleme yaptığımız görüntülerin framelerini yansıtıyoruz
    cv2.imshow('th1',th1)
    cv2.imshow('th2',th2)
    cv2.imshow("input", res)
    key = cv2.waitKey(10) #'ESC' ye basıldıysa bütün frameleri kapat
    if key == 27:
        break
cv2.destroyAllWindows() #Bütün ekranları öldür.
cv2.VideoCapture(0).release() # video yakalamyı kapat
Example #36
0
	thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
 
    #Inflate the image to find the location of the difference
	thresh = cv2.dilate(thresh, None, iterations=2)
	cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
		cv2.CHAIN_APPROX_SIMPLE)
	cnts = cnts[0] if imutils.is_cv2() else cnts[1]

	for c in cnts:
		
		if cv2.contourArea(c) < args["min_area"]:
			continue
		(x, y, w, h) = cv2.boundingRect(c)
		cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
		text = "Occupied"
	# 
	cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
		cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
	cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
		(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
	cv2.imshow("Security Feed", frame)
	cv2.imshow("Thresh", thresh)
	cv2.imshow("Frame Delta", frameDelta)
	key = cv2.waitKey(1) & 0xFF

	if key == ord("q"):
		break
 
vs.stop() if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows()
Example #37
0
inHeight = 400
inWidth = int(((aspect_ratio*inHeight)*8)//8)

vid_writer = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 15, (frame.shape[1],frame.shape[0]))

net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
k = 0
while 1:
    background = cv2.imread('back.jpg')
    background = cv2.resize(background, (inWidth, inHeight))
    k+=1
    t = time.time()
    hasFrame, frame = cap.read()
    frameCopy = np.copy(frame)
    
    if cv2.waitKey(1) & 0xFF == ord('q'):
        cap.release()
        break

    inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight),
                              (0, 0, 0), swapRB=False, crop=False)

    net.setInput(inpBlob)

    output = net.forward()

    print("forward = {}".format(time.time() - t))

    # Empty list to store the detected keypoints
    points = []
Example #38
0
        drawPred(classIds[i], confidences[i], left, top, left + width,
                 top + height)


if (args.video):
    outputFile = "output/yolo_output.avi"
    cap = cv2.VideoCapture(args.video)
else:
    cap = cv2.VideoCapture(0)

vid_writer = cv2.VideoWriter(outputFile,
                             cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,
                             (round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                              round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))

while cv2.waitKey(1) < 0:
    hasFrame, frame = cap.read()
    if not hasFrame:
        print('DOne!!')
        print("Output file is stored as ", outputFile)
        cv2.waitKey(3000)
        break

    blob = cv2.dnn.blobFromImage(frame,
                                 1 / 255, (INP_WIDTH, INP_HEIGHT), [0, 0, 0],
                                 1,
                                 crop=False)

    #SETS THE INPUTS FOR THE DARKNET NETWORK
    net.setInput(blob)
import cv2
import numpy as np

kamera=cv2.VideoCapture(0)

while True:
    ret,kare=kamera.read()      #kameranın çalışıp çalışmadığını kontrol eder.

    bolge=kare[0:200,0:200]

    cv2.imshow("Video",kare)        
    cv2.imshow("Bolge",bolge)

    if cv2.waitKey(25) & 0xFF ==('q'):
        break

kamera.release()
    

cv2.destroyAllWindows()
# -*- coding: utf-8 -*-
"""
Created on Fri Mar  5 09:17:31 2021

@author: Aditya Manwar
"""


import cv2
from pyzbar import pyzbar
import numpy as np

cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_PLAIN

while True:
    _, frame = cap.read()
    
    decodedObj = pyzbar.decode(frame)
    for obj in decodedObj:
        print("Data", obj.data)
        cv2.putText(frame, str(obj.data), (50,50), font, 3, (255,0,0),3)
    
    cv2.imshow('Frame',frame)
    
    key = cv2.waitKey(1)
    if key == 27:
        break
Example #41
0
        raise ("IO Error")
    #cv2.namedWindow("Capture", cv2.WINDOW_AUTOSIZE)
    sensor = Sensor('EdgeSensor')
    feature1 = Feature('F1', sensor, [[0, 0, 1], [0, 1, 0], [1, 0, 0]])
    feature2 = Feature('F2', sensor, [[0, 0, 0], [1, 1, 1], [0, 0, 0]])
    #feature3 = Feature('F3',sensor,[[0,1,0],[1,1,1],[0,1,0]])
    feature4 = Feature('F4', sensor, [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    feature5 = Feature('F5', sensor, [[0, 1, 0], [0, 1, 0], [0, 1, 0]])
    sensor.addFeature(feature1)
    sensor.addFeature(feature2)
    #sensor.addFeature(feature3)
    sensor.addFeature(feature4)
    sensor.addFeature(feature5)
    x = 200

    cv2.namedWindow("Sensor")
    cv2.moveWindow("Sensor", 40, 530)

    while True:
        ret, image = capture.read()
        if ret == False:
            continue
        height, width, channels = image.shape
        sensor.read(image)
        cv2.imshow("World", sensor.world)
        sensor.run()
        cv2.imshow("Sensor", sensor.output)
        if cv2.waitKey(33) >= 0:
            break

    cv2.destroyAllWindows()
            xmax = int(min(imW,(boxes[i][3] * imW)))
            
            cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)

            # Draw label
            object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
            label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
            labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
            label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
            cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
            cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text

    # Draw framerate in corner of frame
    cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)

    # All the results have been drawn on the frame, so it's time to display it.
    cv2.imshow('Object detector', frame)

    # Calculate framerate
    t2 = cv2.getTickCount()
    time1 = (t2-t1)/freq
    frame_rate_calc= 1/time1

    # Press 'q' to quit
    if cv2.waitKey(1) == ord('q'):
        break

# Clean up
cv2.destroyAllWindows()
videostream.stop()
Example #43
0
					nearDistance = distance

				lightSource = image[y:y + h, x:x + w]

			if (nearX>0 or nearY>0) and nearDistance>10:
				cv2.rectangle(image,(nearX,nearY),(nearX+nearW,nearY+nearH),(0,255,0),2)
				cv2.line(image,(nearX+nearW/2,nearY+nearH),(refPtX,refPtY),(0,255,0))
				moveMotor(getAngle((nearX+nearW/2),(nearY+nearH/2),refPtX,refPtY))
			# frame = cv2.bitwise_and(frame, mask)
			cv2.imshow("Canny", canny)
			cv2.imshow("Frame", image)
			cv2.imshow("Gray", gray)
			cv2.imshow("Thresh", thresh)
			
		# if the 'q' key is pressed, stop the loop
		if cv2.waitKey(1) & 0xFF == ord("q"):
			break

except KeyboardInterrupt:
	# code you want to run before the program
	# exits when you press CTRL+C
	print "\n\n Keyboard interrupt detected"

# except:
    # catches ALL other exceptions including errors.
    # won't get any error messages for debugging
    # so only use it once your code is working
    # print "Other error or exception occurred!"

finally:
	print "\n Cleanup GPIO"
Example #44
0
    def run(self):
        args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
        try:
            video_src = video_src[0]
        except:
            video_src = 0
        args = dict(args)
        
        cascade_fn = args.get('--cascade', "data/haarcascades/haarcascade_frontalface_alt.xml")
        cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn))
        
        rectang = np.array([0, 0, 0, 0])
        time = 0.0
        while True:
            t = clock() 
            dt = 0.0 
#            ret, img = self.cam.read()
            _ret, self.frame = self.cam.read()
            
            gray = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY) #translate image to grey, get it ready for detection
            gray = cv.equalizeHist(gray)
            vis = self.frame.copy()
            
            
            rects = detect(gray, cascade)
            
#            if len(rects):
##                self.selection = (xmin, ymin, xmax, ymax)
#                self.selection = (rects[0][0], rects[0][1], rects[0][2], rects[0][3])
##                self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin)
#                self.track_window = (rects[0][0], rects[0][1], rects[0][2] - rects[0][0], rects[0][3] - rects[0][1])
#            else:
#                self.selection = None
#                self.track_window = None

            if rectang.all() == 0 or time > 3: #for first time we lauch the detection or we've lost the track of face
            
                rects = detect(gray, cascade) #detect the face in the whole image
            
                if len(rects) != 0 :  #if detects a face
                
                    rectang = np.array([rects[0][0] + 1, rects[0][1] + 1, rects[0][2] - 1, rects[0][3] - 1]) #restrict and restore the detecting region
                    dt = clock() - t
                    time = 0.0 #reset the chronoscope when detecting the face
                
                else:
                    rectang = np.array([0, 0, 0, 0]) #if we lose the track of face, initialize the detecting-rectangle to null
                
                
            else: #restrict the detecting-region when we have detected the face
            
                gray = gray[rectang[1]:rectang[3], rectang[0]:rectang[2]] 
                vis = vis[rectang[1]:rectang[3], rectang[0]:rectang[2]] 
                rects = detect(gray.copy(), cascade) #detect the face in marked rectangle
                
                if len(rects) != 0 : #if detects a face 
                    
                    rectang = np.array([rects[0][0] + 1, rects[0][1] + 1, rects[0][2] - 1, rects[0][3] - 1]) #restrict and restore the detecting region
                    dt = clock() - t
                    time = 0.0 #reset the chronoscope when detecting the face
                
                else:
                    rectang = np.array([0, 0, 0, 0])
                    
            if rectang.all() != 0:
                dt = (clock() - t) * 1000 #translate the unit of chronoscope to ms
                time = time + dt
            
            self.selection = (rectang[0],rectang[1],rectang[2],rectang[3])
            self.track_window = (rectang[0],rectang[1],rectang[2]-rectang[0],rectang[3]-rectang[1])

            
            
            
            
            hsv = cv.cvtColor(self.frame, cv.COLOR_BGR2HSV)
            mask = cv.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))

            if self.selection:
                x0, y0, x1, y1 = self.selection
                hsv_roi = hsv[y0:y1, x0:x1]
                mask_roi = mask[y0:y1, x0:x1]
                hist = cv.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
                cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX)
                self.hist = hist.reshape(-1)
                self.show_hist()

                vis_roi = vis[y0:y1, x0:x1]
                cv.bitwise_not(vis_roi, vis_roi)
                vis[mask == 0] = 0

            if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0:
                self.selection = None
                prob = cv.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
                prob &= mask
                
                prob[rects[0][1]:rects[0][3],rects[0][0]:rects[0][2]] = 0
                    
                term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
                track_box, self.track_window = cv.CamShift(prob, self.track_window, term_crit)

                if self.show_backproj:
                    vis[:] = prob[...,np.newaxis]
                try:
                    cv.ellipse(vis, track_box, (0, 0, 255), 2)
#                    V=vis
#                    print(self.track_window)
#                    break
                except:
                    print(track_box)

            cv.imshow('camshift', vis)

            ch = cv.waitKey(5)
            if ch == 27:
                break
            if ch == ord('b'):
                self.show_backproj = not self.show_backproj
        cv.destroyAllWindows()
Example #45
0
    # Capture frame-by-frame
    ret, frame = cap.read()

    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    ret, corners = cv2.findChessboardCorners(gray, (cbcol, cbrow),None)
    if ret == True:
        objpoints.append(objp)

        corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
        imgpoints.append(corners2)

        # Draw and display the corners
        img = cv2.drawChessboardCorners(frame, (cbcol, cbrow), corners2,ret)
        cv2.imshow('frame',img)
        cv2.waitKey(250)
    else :
        cv2.imshow('frame',frame)
        cv2.waitKey(250)

    if len(imgpoints) >= 150:
        print "calibrating camera now"
        cv2.destroyAllWindows()
        ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
        np.savez("../calibration_files/MBP", ret=ret, mtx=mtx, dist=dist, rvecs=rvecs, tvecs=tvecs)
        break
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
import cv2 as cv 

cap = cv.VideoCapture(0)
while True:
    ret,frame = cap.read()
    gray = cv.cvtColor(frame,cv.COLOR_BGR2GRAY)
    edges = cv.Canny(gray,100,200)
    cv.imshow('edges',edges)
    if cv.waitKey(10) == ord('q'):
        break
cv.destroyAllWindows()
Example #47
0
def main():
    img1_car =None
    img1_person = None
    img2_car = None
    img2_person = None
    img2_bicycle = None
    img3_car = None
    img1 = cv2.imread('../data/test/left/004945.jpg')
    img2 = cv2.imread('../data/test/left/004964.jpg')
    img3 = cv2.imread('../data/test/left/005002.jpg')
    detector_matrix = []
    depth_matrix = []
    detector_matrix_list = read_all_csv(detector_csv_path, detector_matrix)
    depth_matrix_list = read_all_csv(depth_csv_path, depth_matrix)

    for detector_matrix in detector_matrix_list:
        if detector_matrix.shape[0] == 6:
            img1_car = detector_matrix
        if detector_matrix.shape[0] == 2:
            img1_person = detector_matrix
        if detector_matrix.shape[0] == 1 and detector_matrix[0][0]>1000:
            img2_bicycle = detector_matrix
        if detector_matrix.shape[0] == 4:
            img2_car = detector_matrix
        if detector_matrix.shape[0] == 1 and detector_matrix[0][0]<1000:
            img2_person = detector_matrix
        if detector_matrix.shape[0] == 3:
            img3_car = detector_matrix


    img1_depth = depth_matrix_list[2]
    img2_depth = depth_matrix_list[1]
    img3_depth = depth_matrix_list[0]


    print("finish reading!")


    car_count = img1_rows_lists[0]
    person_count = img1_rows_lists[1]
    bicycle_count = img1_rows_lists[2]

    img1, img1_car_center, car_dis_list = calculate_mass_center(img1_car, img1_depth, img1, "car")
    np.savetxt("../data/test/results/004945_car_mass_center.csv", img1_car_center, delimiter=",")
    print("image 1 car's mass center is: ")
    print(img1_car_center)
    print()

    img_1, img1_person_center, person_dis_list = calculate_mass_center(img1_person, img1_depth, img1, "person")
    np.savetxt("../data/test/results/004945_person_mass_center.csv", img1_person_center, delimiter=",")
    print("image 1 person's mass center is: ")
    print((img1_person_center))
    print()

    total_list = car_dis_list + person_dis_list
    closest_index = np.argsort(total_list)
    str =  ""
    if closest_index[0] < len(car_dis_list):
        mass_center = img1_car_center[closest_index[0]]
        str = "the closest object is car, position for you is {}.".format(mass_center)
    if len(car_dis_list)<= closest_index[0] < len(person_dis_list+car_dis_list):
        mass_center = img1_person_center[closest_index[0] - car_count]
        str = "the closest object is person, position for you is {}.".format(mass_center)
    # if len(person_dis_list+car_dis_list)<= closest_index < len(bicycle_dis_list+person_dis_list+car_dis_list):
    #     str = "the closest object is bicycle."


    cv2.putText(img1, "Car number is: {}, Person number is: {} and Bicycle number is: {}"
                .format(car_count, person_count, bicycle_count), (30, 20), font, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
    cv2.putText(img1, str, (30, 40), font, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
    cv2.imshow("img", img1)

    cv2.waitKey(0)
    cv2.destroyAllWindows()



    car_count = img2_rows_lists[0]
    person_count = img2_rows_lists[1]
    bicycle_count = img2_rows_lists[2]

    img2, img2_car_center, car_dis_list = calculate_mass_center(img2_car, img2_depth, img2, "car")
    np.savetxt("../data/test/results/004964_car_mass_center.csv", img2_car_center, delimiter=",")
    print("image 2 car's mass center is: ")
    print(img2_car_center)
    print()

    img2, img2_person_center, person_dis_list = calculate_mass_center(img2_person, img2_depth, img2, "person")
    np.savetxt("../data/test/results/004964_person_mass_center.csv", img2_person_center, delimiter=",")
    print("image 2 person's mass center is: ")
    print(img2_person_center)
    print()

    img2, img2_bicycle_center, bicycle_dis_list = calculate_mass_center(img2_bicycle, img2_depth, img2, "bicycle")
    np.savetxt("../data/test/results/004964_bicycle_mass_center.csv", img2_bicycle_center, delimiter=",")
    print("image 2 bicycle's mass center is: ")
    print(img2_bicycle_center)
    print()

    total_list = car_dis_list + person_dis_list
    closest_index = np.argsort(total_list)
    str =  ""
    if closest_index[0] < len(car_dis_list):
        mass_center = img2_car_center[closest_index[0]]
        str = "the closest object is car, position for you is {}.".format(mass_center)
    if len(car_dis_list)<= closest_index[0] < len(person_dis_list+car_dis_list):
        mass_center = img2_person_center[closest_index[0]-car_count]
        str = "the closest object is person, position for you is {}.".format(mass_center)
    if len(person_dis_list+car_dis_list)<= closest_index[0] < len(bicycle_dis_list+person_dis_list+car_dis_list):
        mass_center = img2_bicycle_center[closest_index[0]-car_count-person_count]
        str = "the closest object is bicycle, position for you is {}.".format(mass_center)

    cv2.putText(img2, "Car number is: {}, Person number is: {} and Bicycle number is: {}"
                .format(car_count, person_count, bicycle_count), (30, 20), font, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
    cv2.putText(img2, str, (30, 40), font, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
    cv2.imshow("img", img2)

    cv2.waitKey(0)
    cv2.destroyAllWindows()



    car_count = img3_rows_lists[0]
    person_count = img3_rows_lists[1]
    bicycle_count = img3_rows_lists[2]

    img3, img3_car_center, car_dis_list = calculate_mass_center(img3_car, img3_depth, img3, "car")
    np.savetxt("../data/test/results/005002_car_mass_center.csv", img3_car_center, delimiter=",")
    print("image 3 car's mass center is: ")
    print(img3_car_center)
    print()

    total_list = car_dis_list + person_dis_list
    closest_index = np.argsort(total_list)
    str =  ""
    if closest_index[0] < len(car_dis_list):
        mass_center = img3_car_center[closest_index[0]]
        str = "the closest object is car, position for you is {}.".format(mass_center)
    # if len(car_dis_list)<= closest_index < len(person_dis_list+car_dis_list):
    #     str = "the closest object is person."
    # if len(person_dis_list+car_dis_list)<= closest_index < len(bicycle_dis_list+person_dis_list+car_dis_list):
    #     str = "the closest object is bicycle."

    cv2.putText(img3, "Car number is: {}, Person number is: {} and Bicycle number is: {}"
                .format(car_count, person_count, bicycle_count), (30, 20), font, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
    cv2.putText(img3, str, (30, 40), font, 0.5, (0, 0, 0), 1, cv2.LINE_AA)

    cv2.imshow("img", img3)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #48
0
def displayImage(img, label='Img', debug=False):
    cv2.imshow(label, img)
    if not debug:
        cv2.waitKey(0)
def detectAndTrackMultipleFaces():
	#Open the first webcame device
	capture = cv2.VideoCapture(0)

	#Create two opencv named windows
	#cv2.namedWindow("base-image", cv2.WINDOW_AUTOSIZE)
	cv2.namedWindow("result-image", cv2.WINDOW_AUTOSIZE)

	#Position the windows next to eachother
	#cv2.moveWindow("base-image",0,100)
	cv2.moveWindow("result-image",400,100)

	#Start the window thread for the two windows we are using
	cv2.startWindowThread()

	#The color of the rectangle we draw around the face
	rectangleColor = (0,165,255)

	#variables holding the current frame number and the current faceid
	frameCounter = 0
	currentFaceID = 0

	#Variables holding the correlation trackers and the name per faceid
	faceTrackers = {}
	faceNames = {}

	try:
		while True:
			#Retrieve the latest image from the webcam
			rc,baseImage = capture.read()

			#Resize the image to 320x240
			#baseImage = cv2.resize( fullSizeBaseImage, ( 320, 240))

			#Check if a key was pressed and if it was Q, then break
			#from the infinite loop
			pressedKey = cv2.waitKey(2)
			if pressedKey == ord('Q'):
				break



			#Result image is the image we will show the user, which is a
			#combination of the original image from the webcam and the
			#overlayed rectangle for the largest face
			resultImage = baseImage.copy()




			#STEPS:
			# * Update all trackers and remove the ones that are not 
			#   relevant anymore
			# * Every 10 frames:
			#       + Use face detection on the current frame and look
			#         for faces. 
			#       + For each found face, check if centerpoint is within
			#         existing tracked box. If so, nothing to do
			#       + If centerpoint is NOT in existing tracked box, then
			#         we add a new tracker with a new face-id


			#Increase the framecounter
			frameCounter += 1 



			#Update all the trackers and remove the ones for which the update
			#indicated the quality was not good enough
			fidsToDelete = []
			for fid in faceTrackers.keys():
				trackingQuality = faceTrackers[ fid ].update( baseImage )

				#If the tracking quality is good enough, we must delete
				#this tracker
				if trackingQuality < 7:
					fidsToDelete.append( fid )

			for fid in fidsToDelete:
				print("Removing fid " + str(fid) + " from list of trackers")
				faceTrackers.pop( fid , None )




			#Every 10 frames, we will have to determine which faces
			#are present in the frame
			if (frameCounter % 10) == 0:



				#For the face detection, we need to make use of a gray
				#colored image so we will convert the baseImage to a
				#gray-based image
				gray = cv2.cvtColor(baseImage, cv2.COLOR_BGR2GRAY)
				#Now use the haar cascade detector to find all faces
				#in the image
				#faces = faceCascade.detectMultiScale(gray, 1.3, 5)
				# detect faces in the image
				faces = detector.detect_faces(baseImage)
				#print("Faces: ", faces)

				#Loop over all faces and check if the area for this
				#face is the largest so far
				#We need to convert it to int here because of the
				#requirement of the dlib tracker. If we omit the cast to
				#int here, you will get cast errors since the detector
				#returns numpy.int32 and the tracker requires an int
				for person in faces:
					bounding_box = person['box']
					(startX, startY, endX, endY) = (bounding_box[0], bounding_box[1],
												bounding_box[2],bounding_box[3])
					x = int(startX)
					y = int(startY)
					w = int(endX)
					h = int(endY)


					#calculate the centerpoint
					x_bar = x + 0.5 * w
					y_bar = y + 0.5 * h



					#Variable holding information which faceid we 
					#matched with
					matchedFid = None

					#Now loop over all the trackers and check if the 
					#centerpoint of the face is within the box of a 
					#tracker
					for fid in faceTrackers.keys():
						tracked_position =  faceTrackers[fid].get_position()

						t_x = int(tracked_position.left())
						t_y = int(tracked_position.top())
						t_w = int(tracked_position.width())
						t_h = int(tracked_position.height())


						#calculate the centerpoint
						t_x_bar = t_x + 0.5 * t_w
						t_y_bar = t_y + 0.5 * t_h

						#check if the centerpoint of the face is within the 
						#rectangleof a tracker region. Also, the centerpoint
						#of the tracker region must be within the region 
						#detected as a face. If both of these conditions hold
						#we have a match
						if ( ( t_x <= x_bar   <= (t_x + t_w)) and 
							 ( t_y <= y_bar   <= (t_y + t_h)) and 
							 ( x   <= t_x_bar <= (x   + w  )) and 
							 ( y   <= t_y_bar <= (y   + h  ))):
							matchedFid = fid


					#If no matched fid, then we have to create a new tracker
					if matchedFid is None:

						print("Creating new tracker " + str(currentFaceID))

						#Create and store the tracker 
						tracker = dlib.correlation_tracker()
						tracker.start_track(baseImage,
											dlib.rectangle( x,
															y,
															x+w,
															y+h))

						faceTrackers[ currentFaceID ] = tracker

						#Start a new thread that is used to simulate 
						#face recognition. This is not yet implemented in this
						#version :)
						t = threading.Thread( target = doRecognizePerson ,
											   args=(faceNames, currentFaceID, baseImage, person))
						t.start()
						#doRecognizePerson(faceNames, currentFaceID, baseImage, person)
						#Increase the currentFaceID counter
						currentFaceID += 1




			#Now loop over all the trackers we have and draw the rectangle
			#around the detected faces. If we 'know' the name for this person
			#(i.e. the recognition thread is finished), we print the name
			#of the person, otherwise the message indicating we are detecting
			#the name of the person
			for fid in faceTrackers.keys():
				tracked_position =  faceTrackers[fid].get_position()

				t_x = int(tracked_position.left())
				t_y = int(tracked_position.top())
				t_w = int(tracked_position.width())
				t_h = int(tracked_position.height())

				cv2.rectangle(resultImage, (t_x, t_y),
										(t_x + t_w , t_y + t_h),
										rectangleColor ,2)


				if fid in faceNames.keys():
					cv2.putText(resultImage, faceNames[fid] , 
								(int(t_x + t_w/2), int(t_y)), 
								cv2.FONT_HERSHEY_SIMPLEX,
								0.5, (255, 255, 255), 2)
				else:
					cv2.putText(resultImage, "Detecting..." , 
								(int(t_x + t_w/2), int(t_y)), 
								cv2.FONT_HERSHEY_SIMPLEX,
								0.5, (255, 255, 255), 2)






			#Since we want to show something larger on the screen than the
			#original 320x240, we resize the image again
			#
			#Note that it would also be possible to keep the large version
			#of the baseimage and make the result image a copy of this large
			#base image and use the scaling factor to draw the rectangle
			#at the right coordinates.
			largeResult = cv2.resize(resultImage,
									 (OUTPUT_SIZE_WIDTH,OUTPUT_SIZE_HEIGHT))

			#Finally, we want to show the images on the screen
			#cv2.imshow("base-image", baseImage)
			cv2.imshow("result-image", largeResult)


			if cv2.waitKey(20) & 0xFF == ord("q"):
				break

	#To ensure we can also deal with the user pressing Ctrl-C in the console
	#we have to check for the KeyboardInterrupt exception and break out of
	#the main loop
	except KeyboardInterrupt as e:
		pass

	#Destroy any OpenCV windows and exit the application
	cv2.destroyAllWindows()
	exit(0)
              if areaRatio<27:
                    cv2.putText(frame,'3',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
              else:
                    cv2.putText(frame,'ok',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
                    
        elif l==4:
            cv2.putText(frame,'4',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
            
        elif l==5:
            cv2.putText(frame,'5',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
            
        elif l==6:
            cv2.putText(frame,'reposition',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
            
        else :
            cv2.putText(frame,'reposition',(10,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
            
        cv2.imshow('mask',mask)
        cv2.imshow('frame',frame)
    except:
        pass
        
    
    k = cv2.waitKey(5) & 0xFF
    if k == 27:
        break
    
cv2.destroyAllWindows()
vid.release()    
    
Example #51
0
#Fix the format of the input picture
img = cv.cvtColor(img_org, cv.COLOR_BGR2GRAY)
fromCenter = False
Boundaries = []

# Select ROI that the our object is present, choose only one object for this trial or system only process one object
ROI_bounding = cv.selectROI("Image", img, fromCenter)

# To Finish the selection process.
ROI = img[int(ROI_bounding[1]):int(ROI_bounding[1] + ROI_bounding[3]),
      int(ROI_bounding[0]):int(ROI_bounding[0] + ROI_bounding[2])]
cv.imshow("ROI" + str(ROI_bounding), ROI)
cv.imwrite("ROI" + str(ROI_bounding) + ".jpg", ROI)

#To give enough time to show the above images
cv.waitKey(10)

# At this point we have the object
# Apply a few filters to remove noises, we suggest to add closing and opening methods as well.
img = cv.medianBlur(ROI, 5)
blur1 = cv.GaussianBlur(img, (5, 5), 10)
blur1 = cv.GaussianBlur(blur1, (5, 5), 10)

# Showing the result and saving it.
cv.imshow("blur", blur1)
cv.imwrite("Blur" + str(ROI_bounding) + ".jpg", blur1)

# Change the domain to BW, now we are able to apply more morphological filters.
b, thresh_OTSU = cv.threshold(blur1, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)

# At this poit we should add some morphological methods such as opening and closing to get a better object.
Example #52
0
# Create a VideoCapture object and read from input file
cap = cv2.VideoCapture('Output.avi')
# Check if camera opened successfully
if (cap.isOpened() == False):
    print("Error opening video file")
# Read until video is completed
while (cap.isOpened()):
    # Capture frame-by-frame
    ret, frame = cap.read()
    if ret == True:
        # Display the resulting frame
        cv2.imshow('Frame', frame)
        # Press q on keyboard to exit
        # Change the number of waitKey according to the speed you want for the video
        # Lower numbers are faster
        if cv2.waitKey(60) & 0xFF == ord('q'):
            break
    # Break the loop
    else:
        break
# When everything done, release
# the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()

# Displaying the video at original speed
# If you wanna use this
# Comment the lines related to Deleting the video
#import subprocess, sys  # standard library
#opener = "open" if sys.platform == "darwin" else "xdg-open"
    input_image = cv2.imread(args.input_image, 1).astype(np.float32)

    input_image = cv2.resize(input_image, (input_shape[3], input_shape[2]))
    input_image = input_image.transpose((2, 0, 1))
    input_image = np.asarray([input_image])

    out = net.forward_all(**{net.inputs[0]: input_image})

    prediction = net.blobs['deconv6_0_0'].data[0].argmax(axis=0)

    prediction = np.squeeze(prediction)
    prediction = np.resize(prediction, (3, input_shape[2], input_shape[3]))
    prediction = prediction.transpose(1, 2, 0).astype(np.uint8)

    prediction_rgb = np.zeros(prediction.shape, dtype=np.uint8)
    label_colours_bgr = label_colours[..., ::-1]
    cv2.LUT(prediction, label_colours_bgr, prediction_rgb)

    #    cv2.imshow("ENet", prediction_rgb)
    key = cv2.waitKey(0)

    if args.out_dir is not None:
        input_path_ext = args.input_image.split(".")[-1]
        input_image_name = args.input_image.split("/")[-1:][0].replace(
            '.' + input_path_ext, '')
        out_path_im = args.out_dir + input_image_name + '_enet' + '.' + input_path_ext
        out_path_gt = args.out_dir + input_image_name + '_enet_gt' + '.' + input_path_ext

        cv2.imwrite(out_path_im, prediction_rgb)
        # cv2.imwrite(out_path_gt, prediction) #  label images, where each pixel has an ID that represents the class
Example #54
0
    def FaceDetect(self):
        import cv2
        import numpy as np

        cv2.namedWindow('frame')
        cv2.namedWindow('dist')

        # the classifier that will be used in the cascade
        faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')

        # capture video stream from camera source. 0 refers to first camera, 1 referes to 2nd and so on.
        cap = cv2.VideoCapture(1)

        triggered = False
        sdThresh = 10
        font = cv2.FONT_HERSHEY_SIMPLEX

        _, frame1 = cap.read()
        _, frame2 = cap.read()
        facecount = 0
        while (True):
            _, frame3 = cap.read()
            rows, cols, _ = np.shape(frame3)
            cv2.imshow('dist', frame3)
            dist = distMap(frame1, frame3)

            frame1 = frame2
            frame2 = frame3

            # apply Gaussian smoothing
            mod = cv2.GaussianBlur(dist, (9, 9), 0)

            # apply thresholding
            _, thresh = cv2.threshold(mod, 100, 255, 0)

            # calculate st dev test
            _, stDev = cv2.meanStdDev(mod)

            cv2.imshow('dist', mod)
            cv2.putText(
                frame2, "Standard Deviation - {}".format(round(stDev[0][0],
                                                               0)), (70, 70),
                font, 1, (255, 0, 255), 1, cv2.LINE_AA)

            if stDev > sdThresh:
                # the cascade is implemented in grayscale mode
                gray = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)

                # begin face cascade
                faces = faceCascade.detectMultiScale(gray,
                                                     scaleFactor=2,
                                                     minSize=(20, 20))
                facecount = 0
                # draw a rectangle over detected faces
                for (x, y, w, h) in faces:
                    print(x, y)
                    self.actionFollower(x, y)
                    facecount = facecount + 1
                    cv2.rectangle(frame2, (x, y), (x + w, y + h), (0, 255, 0),
                                  1)
                    cv2.putText(frame2, "No of faces {}".format(facecount),
                                (50, 50), font, 1, (0, 0, 255), 1, cv2.LINE_AA)
                else:
                    if facecount > 0:
                        #print("Face count:")
                        #print(facecount)
                        facecount = 0
                cv2.imshow('frame', frame2)

            if cv2.waitKey(1) & 0xFF == 27:
                break

        cap.release()
        cv2.destroyAllWindows()
Example #55
0
import cv2

original_img = cv2.imread('supremacy.jpg', 0)
template = cv2.imread('template_from_original.png', 0)
w, h = template.shape[::-1]

method = cv2.TM_CCOEFF

result = cv2.matchTemplate(
    original_img, template, method
)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)

top_left = min_loc
bottom_right = (top_left[0] + w, top_left[1] + h)

cv2.imshow('Original', original_img)

cv2.rectangle(original_img, top_left, bottom_right, 255, 2)
cv2.imshow('Supremacy Detection', original_img)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()
Example #56
0
    tiempo = time.time()
    with picamera.PiCamera(resolution=(2592, 1944),
                           framerate=2,
                           sensor_mode=0,
                           clock_mode='reset') as camera:
        camera.exposure_mode = 'sports'
        #with picamera.array.PiRGBArray(camera) as output:
        with PiRGBAArray(camera) as output:
            while contador < 1000:
                #if contador == 1:
                #    print(camera.sensor_mode)
                #lrs = piCameraStream.__next__()
                #imageArray = lrs.array
                #lowResCap.truncate(0)
                GPIO.output(pinInput, GPIO.HIGH)
                tiempoCapture1 = time.time()
                camera.capture(output, format='rgba', use_video_port=True)
                print('Capture only: ', time.time() - tiempoCapture1)
                GPIO.output(pinInput, GPIO.LOW)
                #print('Captured %dx%d image' % (output.array.shape[1], output.array.shape[0]))
                output.truncate(0)
                #print(type(output.array))
                cv2.imshow('Imagen', cv2.resize(output.array, (320, 240)))
                #cv2.imwrite(rutaDeGuardado+'imagen_{}.jpg'.format(contador), imageArray)
                print('Full time: ', time.time() - tiempo)
                tiempo = time.time()
                contador += 1
                ch = 0xFF & cv2.waitKey(1)
                if ch == ord('q'):
                    break
Example #57
0
    canny_output = cv.Canny(image, t, t * 2)
    cv.imshow("canny_output", canny_output)
    cv.imwrite("../result/canny_output.png", canny_output)
    return canny_output


src = cv.imread("../images/4.jpeg")
cv.namedWindow("input", cv.WINDOW_AUTOSIZE)
cv.imshow("input", src)
binary = canny_demo(src)
k = np.ones((3, 3), dtype=np.uint8)
binary = cv.morphologyEx(binary, cv.MORPH_DILATE, k)

# 轮廓发现
contours, hierarchy = cv.findContours(binary, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
for c in range(len(contours)):
    # x, y, w, h = cv.boundingRect(contours[c]);
    # cv.drawContours(src, contours, c, (0, 0, 255), 2, 8)
    # cv.rectangle(src, (x, y), (x+w, y+h), (0, 0, 255), 1, 8, 0);
    rect = cv.minAreaRect(contours[c])
    cx, cy = rect[0]
    box = cv.boxPoints(rect)
    box = np.int0(box)
    cv.drawContours(src,[box],0,(0,0,255),2)
    cv.circle(src, (np.int32(cx), np.int32(cy)), 2, (255, 0, 0), 2, 8, 0)

# 显示
cv.imshow("contours_analysis", src)
cv.imwrite("../result/contours_analysis.png", src)
cv.waitKey(0)
cv.destroyAllWindows()
def display_image(img_file):
    img = cv2.imread(img_file)
    cv2.imshow(img_file, img)
    cv2.waitKey(100)
Example #59
0
                centroid = (bbox[0] + (bbox[2] / 2), bbox[1] + (bbox[3] / 2))
                d2 = math.sqrt(((centroid[0] - relpos[1]) *
                                (centroid[0] - relpos[1])) +
                               ((centroid[1] - relpos[0]) *
                                (centroid[1] - relpos[0])))
                speed = (d1 - d2) / (time.time() - t1)
                t1 = time.time()
                d1 = d2
                sp = speed
        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100, 110),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        # Display tracker type on frame
        cv2.putText(frame, "Speed: " + str(sp), (100, 80),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
        cv2.putText(frame, tracker_type + " Tracker", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display result
        cv2.imshow("Tracking", frame)
        loop = loop + 1
        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27: break
Example #60
0
    def buildConfigFiles(self):
        # open the classes output file
        f = open(config.CLASSE_FILE, "w")

        # path_file_classes = os.path.dirname(config.CLASSE_FILE)
        # file_classes_yolo = open(path_file_classes + "/obj.names", "w")
        config_yolo.OBJ_NAMES_YOLO = os.path.join(config_yolo.RECORDS_YOLO,
                                                  'obj.names')
        file_classes_yolo = open(config_yolo.OBJ_NAMES_YOLO, "w")

        # loop over the classes

        count_classes = 0
        for (k, v) in config.CLASSES.items():
            # construct the class information and write to file
            item = ("item {\n"
                    "\tid: " + str(v) + "\n"
                    "\tname: '" + k + "'\n"
                    "}\n")
            f.write(item)
            file_classes_yolo.write(k + '\n')
            # close the output classes file
            count_classes = count_classes + 1
        f.close()
        file_classes_yolo.close()

        # initialize a data dictionary used to map each image filename
        # to all bounding boxes associated with the image, then load
        # the contents of the annotations file
        D = {}
        rows = open(config.ANNOT_PATH).read().strip().split("\n")

        for row in rows[1:]:
            # break the row into components
            row = row.split(",")[0].split(";")
            try:
                (imagePath, label, startX, startY, endX, endY) = row
            except:
                (imagePath, label, startX, startY, endX, endY, *_) = row

            (startX, startY) = (float(startX), float(startY))
            (endX, endY) = (float(endX), float(endY))

            # if we are not interested in the label, ignore it
            if label not in config.CLASSES:
                continue

            # build the path to the input image, then grab any other
            # bounding boxes + labels associated with the image
            # path, labels, and bounding box lists, respectively
            p = os.path.sep.join([config.BASE_PATH, imagePath])
            b = D.get(p, [])

            # build a tuple consisting of the label and bounding box,
            # then update the list and store it in the dictionary
            b.append((label, (startX, startY, endX, endY)))
            D[p] = b

        # create training and testing splits from our data dictionary
        (trainKeys, testKeys) = train_test_split(
            list(D.keys()),
            test_size=float(self.ids.text_input_TestProportion.text),
            random_state=42)

        # initialize the data split files
        datasets = [("train", trainKeys, config.TRAIN_RECORD),
                    ("test", testKeys, config.TEST_RECORD)]
        # loop over the datasets
        for (dType, keys, outputPath) in datasets:
            # inicialize the TensorFlow wirter and initialize the total
            # number of examples written to file

            # create train and test txt files for yolo
            config_yolo.TRAIN_TEST_YOLO = os.path.dirname(outputPath)
            f = open(config_yolo.TRAIN_TEST_YOLO + "/train.txt", "w")
            f.writelines("%s\n" % item for item in trainKeys)
            f.close()
            f = open(config_yolo.TRAIN_TEST_YOLO + "/test.txt", "w")
            f.writelines("%s\n" % item for item in testKeys)
            f.close()

            print("[INFO] processing '{}'...".format(dType))
            writer = tf.python_io.TFRecordWriter(outputPath)
            total = 0

            # loop over all thhe keys in the current set
            for k in keys:
                # load the input image from disk as a TensorFlow object
                # print(k)
                # input("Press Enter to continue...")
                encoded = tf.gfile.GFile(os.path.join(config.BASE_PATH, k),
                                         "rb").read()
                encoded = bytes(encoded)

                # load the image from disk again, this time as a PIL
                # object
                pilImage = Image.open(k)
                (w, h) = pilImage.size[:2]

                # parse the filename and encoding from input path
                filename = k.split(os.path.sep)[-1]
                encoding = filename[filename.rfind(".") + 1:]

                # initialize the annotation object used to store
                # information regarding the bounuding box + labels
                tfAnnot = tfannotation.TFAnnotation()
                tfAnnot.image = encoded
                tfAnnot.encoding = encoding
                tfAnnot.filename = filename
                tfAnnot.width = w
                tfAnnot.height = h

                # loop over the bounding boxes + labels associated with
                # the image

                #create .txt for each image
                image_path_yolo = k
                filename_yolo, file_extension_yolo = os.path.splitext(
                    image_path_yolo)
                file_path_yolo = image_path_yolo.replace(
                    file_extension_yolo, ".txt")

                # Get list of classes yolo - it will be used to get the idx
                file_classes_yolo = open(config_yolo.OBJ_NAMES_YOLO, "r")
                labels_yolo = file_classes_yolo.readlines()
                file_classes_yolo.close()

                file = open(file_path_yolo, 'w')
                for (label, (startX, startY, endX, endY)) in D[k]:
                    # TensorFlow assumes all bounding boxes are in the
                    # range [0,1] so we need to scale them

                    xMin = startX / w
                    xMax = endX / w
                    yMin = startY / h
                    yMax = endY / h

                    #YOLO format

                    width = endX - startX
                    height = endY - startY
                    Yolo_x = (startX + (width / 2)) / w
                    Yolo_y = (startY + (height / 2)) / h
                    Yolo_width = abs(width / w)
                    Yolo_height = abs(height / h)
                    # print(k)
                    # print(D[k])
                    # print('{} {:6f} {:6f} {:6f} {:6f}\n'.format(label, Yolo_x, Yolo_y, Yolo_width, Yolo_height))
                    # input("Press Enter to continue...")

                    label_yolo_idx = labels_yolo.index(label + '\n')
                    file.write('{} {:6f} {:6f} {:6f} {:6f}\n'.format(
                        label_yolo_idx, Yolo_x, Yolo_y, Yolo_width,
                        Yolo_height))

                    if self.countVerifyImages < 5:
                        image = cv2.imread(k)
                        startX = int(xMin * w)
                        startY = int(yMin * h)
                        endX = int(xMax * w)
                        endY = int(yMax * h)

                        cv2.rectangle(image, (startX, startY), (endX, endY),
                                      (0, 255, 0), 2)

                        root = tk.Tk()
                        screen_width = root.winfo_screenwidth()
                        screen_height = root.winfo_screenheight()
                        image = cv2.resize(image,
                                           (screen_width, screen_height),
                                           interpolation=cv2.INTER_AREA)
                        cv2.imshow("Image", image)
                        cv2.moveWindow("Image", 20, 20)
                        cv2.waitKey(0)
                        cv2.destroyAllWindows()
                        self.countVerifyImages += 1

                    # update the bounding boxes + labels lists
                    tfAnnot.xMins.append(xMin)
                    tfAnnot.xMaxs.append(xMax)
                    tfAnnot.yMins.append(yMin)
                    tfAnnot.yMaxs.append(yMax)
                    tfAnnot.textLabels.append(label.encode("utf8"))
                    tfAnnot.classes.append(config.CLASSES[label])
                    tfAnnot.difficult.append(0)

                    total += 1

                # encode the data point attributes using the TensorFlor
                # helper functions
                features = tf.train.Features(feature=tfAnnot.build())
                example = tf.train.Example(features=features)

                # add the example to the writer
                writer.write(example.SerializeToString())

            # close the writer and print diagnostic informationn to the
            # user
            writer.close()
            print("[INFO] {} examples saved for '{}'".format(total, dType))

        # create obj.data for yolo
        config_yolo.OBJ_DATA_YOLO = os.path.join(config_yolo.RECORDS_YOLO,
                                                 'obj.data')
        f = open(config_yolo.OBJ_DATA_YOLO, "w")
        print(f)
        f.writelines("classes = " + str(count_classes) + "\n")
        f.writelines("train = " + config_yolo.RECORDS_YOLO + "/train.txt\n")
        f.writelines("valid = " + config_yolo.RECORDS_YOLO + "/test.txt\n")
        f.writelines("names = " + config_yolo.RECORDS_YOLO + "/obj.names\n")
        f.close()
        config_yolo.NUM_CLASSES = count_classes
        self.manager.get_screen(
            'FineTuningScreenPipelineName').buildSelectModel()
        self.manager.current = 'FineTuningScreenPipelineName'