示例#1
0
def ProcessFrame(frame):
  out = obj()
  frameOut = frame.copy()
  frame = norm(frame)
  mean, std = cv2.meanStdDev(frame)
  r = dist(frame, (mean[0], mean[1], mean[2]))
  mean, std = cv2.meanStdDev(r)
  print "m: %d, std %d" % (mean, std)
  #r = frame[:, :, 2]
  r = cv2.GaussianBlur(r, (9, 9), 0)
  debugFrame("ChannelOfInterest", r)
  edges = cv2.Canny(r, std * 3.5,  1.2* std)
  debugFrame("edges", edges)
  

  lines = cv2.HoughLinesP(edges, 4, math.pi/180, 200, minLineLength = 100, maxLineGap = 50)
  if isinstance(lines, list):
    print "numLines: %d" % len(lines[0])
    for line in lines[0]:
      p1 = (line[0], line[1])
      p2 = (line[2], line[3])
      dx = p1[0] - p2[0]
      dy = abs(p1[1] - p2[1])
      theta = math.atan2(dy, dx)
      if abs(theta - math.pi/2) <  10 *math.pi/180:
        cv2.line(frameOut, p1, p2, (255, 0, 255), 5)
  
  """
  Below is the param info for HoughCircles
    image is the imaged being looked at by the function
    method cv2.CV_HOUGH_GRADIENT is the only method available
    dp the size of accumulator inverse relative to input image, dp = 2 means accumulator is half dim of input image
    minDist minimum distance between detected circles, too small and mult neighbor circ founds, big and neighbor circ counted as same
    param1 bigger value input into canny, unsure why it is useful therefore not using
    param2 vote threshold
    minRadius  min Radius of circle to look for
    maxRadius  max Radius of circle to look for
  """
  maxrad = 300
  minrad = 10
  step = 50
  for radius in range(minrad + step, maxrad + 1, step):
    circles =  cv2.HoughCircles(image = r, method = cv2.cv.CV_HOUGH_GRADIENT, dp = .5, minDist =  radius * 2, param2 = int((2 * radius * math.pi)/15), minRadius = radius - step, maxRadius = radius)
    msg = "minRadius: %d, maxRadius %d" % (radius - step, radius)
    if type(circles) != type(None):
      print msg + " found: %d" % (len(circles))
      for circ in circles[0,:]:
        out.append(circ)
        out.draw(frameOut)
        
    else:
      print msg + " no circ found"
      
  
  
  frameOut = out.draw(frameOut)
  debugFrame("houghProb", frameOut)
  print "-------------------------------"
  
  return out
示例#2
0
def discriminatory_power(grayscale):
    box = utilities.get_box(grayscale, utilities.get_middle(grayscale), 100)
    box_mean, box_std_dev = cv2.meanStdDev(box)
    all_mean, all_std_dev = cv2.meanStdDev(grayscale)
    # Return format is a bit weird ...
    box_var = box_std_dev[0][0]**2
    all_var = all_std_dev[0][0]**2
    box_mean = box_mean[0][0]
    all_mean = all_mean[0][0]
    return (box_mean - all_mean)**2 / (box_var + all_var)
示例#3
0
def ProcessFrame(frame):
  print("called")
  out = obj()
  frameOut = frame.copy()
  HEIGHT,WIDTH,_  = frame.shape
  contImg = np.zeros((HEIGHT,WIDTH,3), np.uint8)
  frame = norm(frame)
  mean, std = cv2.meanStdDev(frame)

  r = dist(frame, (mean[0], mean[1], mean[2]))
  mean, std = cv2.meanStdDev(r)
  print "m: %d, std %d" % (mean, std)
  #r = frame[:, :, 2]
  r = cv2.GaussianBlur(r, (9, 9), 0)
  debugFrame("red", r)
  if std > 6:
    edges = cv2.Canny(r, std * 1.8 , std * 1.2)
  else:
    edges = cv2.Canny(r, 30 , 20)
  debugFrame("edges", edges)
  lines = cv2.HoughLinesP(edges, 4, math.pi/180, 200, minLineLength = 100, maxLineGap = 50)
  if isinstance(lines, list):
    print "numLines: %d" % len(lines[0])
    for line in lines[0]:
      p1 = (line[0], line[1])
      p2 = (line[2], line[3])
      dx = p1[0] - p2[0]
      dy = abs(p1[1] - p2[1])
      theta = math.atan2(dy, dx)
      if abs(theta - math.pi/2) <  10 *math.pi/180:
        cv2.line(frameOut, p1, p2, (255, 0, 255), 5)

  contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
  dice = []
  for i in range(len(contours)):
    area = cv2.contourArea(contours[i])
    if area < WIDTH * HEIGHT / 2:
      cv2.drawContours(contImg, contours, i, (255, 255, 255), 5)
      x,y,w,h = cv2.boundingRect(contours[i])
      if w * h > 700 and h != 0 and w/h < 2 and w/h > 1/2:
        cv2.rectangle(frameOut,(x,y),(x+w,y+h),(0,0,255),2)
        dice.append([x + w/2, y + h/2, (w+h) / 2])

  
  debugFrame("contours", contImg)
  print "Contours: ", len(contours)
  if(len(dice) >= 2):
    for die in dice:
      out.append(die)
  frameOut = out.draw(frameOut)

  debugFrame("houghProb", frameOut)
  print "-------------------------------"
  
  return out
示例#4
0
def mean_diff(grayscale, box=None):
    if box is None:
        box = utilities.get_box(grayscale, utilities.get_middle(grayscale), 100)
    box_mean, box_std_dev = cv2.meanStdDev(box)
    all_mean, all_std_dev = cv2.meanStdDev(grayscale)
    # print(abs(box_mean - all_mean), 70*box_std_dev**2)
    import math
    alpha = 0.01
    print(abs(box_mean - all_mean)[0][0]*alpha, (1-alpha)*box_std_dev[0][0]**2, 0*all_std_dev[0][0]**2)
    # return abs(box_mean - all_mean) - 70*box_std_dev**2# - 30*all_std_dev**2
    return alpha*abs(box_mean - all_mean)[0][0] - (1-alpha)**box_std_dev[0][0]**2 - 0*all_std_dev[0][0]**2
示例#5
0
def ProcessFrame(frame):
  out = obj([False, 0, 0])
  frameOut = frame.copy()
  frame = norm(frame)
  mean, std = cv2.meanStdDev(frame)
  print mean
  r =  cv2.cvtColor(frameOut, cv2.COLOR_BGR2GRAY)
  mean, std = cv2.meanStdDev(r)
  print "m: %d, std %d" % (mean, std)
  #r = frame[:, :, 2]
  debugFrame("COI", r)
  r = cv2.GaussianBlur(r, (7, 7), 0)
  edges = cv2.Canny(r, std * 2.0 , 1.3 * std)
  debugFrame("edges", edges)
  

  lines = cv2.HoughLines(edges, 3, math.pi/180, 110)
  poles = []
  if lines is list:
    print "numLines: %d" % len(lines[0])
    for line in lines[0]:
      r = line[0]
      theta = line[1]
      if ( abs(theta - round((theta/math.pi)) * math.pi) < 3.0 * math.pi / 180.0):
        a = math.cos(theta)
        b = math.sin(theta)
        x0 = a * r
        y0 = b * r
        pt1 = (int(x0 - 100*b), int(y0 + 100 * a))
        pt2 = (int(x0 + 100 * b), int(y0 - 100 * a))
        poles.append(((x0, y0), pt1, pt2))
        cv2.line(frameOut, pt1, pt2, (255, 0, 255), 5)
  
  #filtering out the matching poles
  poles = sorted(poles, key = sortPoles)
  if len(poles) > 0:
    gatePoles = [poles[0]]
    for pole in poles:
      if abs(pole[0][0]  - gatePoles[-1][0][0]) > 80:
        gatePoles.append(pole)
    for pole in gatePoles:
      cv2.line(frameOut, pole[1], pole[2], (0, 0, 255), 10)
    if len(gatePoles) == 2:
      out = obj([True, gatePoles[0][0][0], gatePoles[1][0][0]])
     
    
    print "len Poles: %d, len gatePoles: %d" % (len(poles), len(gatePoles))
  
  
  frameOut = out.draw(frameOut)
  debugFrame("houghReg", frameOut)
  print "-------------------------------"
  
  return out  
示例#6
0
def discriminatory_power(img, params):
    """
    Taken from "Autonomous Robotic Vehicle Road Following, 1988"
    """
    mean1, std_dev1 = cv2.meanStdDev(extract_bb(img, params.fg_bb), mask=params.fg_mask_bb)
    mean2, std_dev2 = cv2.meanStdDev(img, mask=params.bg_mask)
    # Return format is a bit weird ...
    var1 = std_dev1[0][0]**2
    var2 = std_dev2[0][0]**2
    mean1 = mean1[0][0]
    mean2 = mean2[0][0]
    return (mean1 - mean2)**2 / (var1 + var2)
def filterImage(frame):
    frame = cv2.resize(frame, (0,0), fx=SCALE, fy=SCALE)
    frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
    mean, stddev = cv2.meanStdDev(frame)
    frame = cv2.subtract(frame, mean)
    frame = cv2.normalize(frame,None,0,255,cv2.NORM_MINMAX)
    return frame
示例#8
0
def waitForFish(cap, loc, timeout = 15):
  start_time = time.time()
  dist_thresh = 10
  while cap.isOpened():
    frame = getFrame(cap)
    frame = cv2.bitwise_and(frame, circle_mask)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray_circle_mask = cv2.cvtColor(circle_mask, cv2.COLOR_BGR2GRAY)
    img_mean, img_std_dev = cv2.meanStdDev(gray, mask=gray_circle_mask)

    circles = cv2.HoughCircles(gray,cv2.cv.CV_HOUGH_GRADIENT,1, minDist=50, param1=50,param2=30,minRadius=20,maxRadius=40)

    if circles is not None:
      circles = np.uint16(np.around(circles))
      for circle_num, i in enumerate(circles[0,:]):
        center = (i[0], i[1])
        radius = i[2]

        fish_mask = np.zeros(frame.shape, np.uint8)


        cv2.circle(fish_mask, center, radius, (255, 255, 255),-1)
        if dist(loc, center) > dist_thresh:
          return True

    elapsed_time = time.time() - start_time
    if elapsed_time > timeout:
      return False
def LumConDrift(bgImg,fundusMask): 
    m,n = bgImg.shape
    tsize = 50
    indx=0
    indy=0
    i = tsize
   
    ldrift = np.zeros((int(m/tsize),int(n/tsize)),np.float)
    cdrift = np.zeros((int(m/tsize),int(n/tsize)),np.float)
    while(i<m):
        j = tsize
        while(j<n):           
            if (i+tsize>=m and j+tsize<n):
                block = bgImg[i-tsize:m, j-tsize:j+tsize]
            elif (i+tsize<m and j+tsize>=n):
                block = bgImg[i-tsize:i+tsize, j-tsize:n]
            elif (i+tsize>=m and j+tsize>=n):
                block = bgImg[i-tsize:m, j-tsize:n]
            else :
                block = bgImg[i-tsize:i+tsize, j-tsize:j+tsize]
            mean,std = cv2.meanStdDev(block)
            ldrift[indx,indy] = mean
            cdrift[indx,indy] = std
            indy = indy+1
            j = j+tsize
        indy = 0
        indx = indx+1
        i = i+tsize
    ldrift = cv2.resize(ldrift,(n,m),interpolation = cv2.INTER_CUBIC)
    cdrift = cv2.resize(cdrift,(n,m),interpolation = cv2.INTER_CUBIC)
    ldrift = cv2.multiply(ldrift,fundusMask.astype(float))
    cdrift = cv2.multiply(cdrift,fundusMask.astype(float))
    return ldrift,cdrift
示例#10
0
def normalise(image):
   
    dbl_image = image.astype(float)
    norm_im = dbl_image

    # finding mean and standard deviation
    # Note: mean_stddev is a tuple where: mean = mean_stddev[0], std = mean_stddev[1]
    meanStd = cv2.meanStdDev(dbl_image)    
    required_mean = 0
    required_stddev = 1
    
    # mapping coordinates where pixel is more or less than mean
    x0,y0 = np.where(norm_im >= meanStd[0])
    x1,y1 = np.where(norm_im < meanStd[0])
    
    # computing normalization
    norm_im = dbl_image - meanStd[0]
    norm_im = norm_im**2
    norm_im = norm_im/meanStd[1]
    
    # separating foreground from background
    if meanStd[1] > 20:
        norm_im[x0,y0] = required_mean + np.sqrt(required_stddev*norm_im[x0,y0]) 
        norm_im[x1,y1] = required_mean - np.sqrt(required_stddev*norm_im[x1,y1])
            
    else:
        norm_im[x1,y1] = 1 # 1 is the maximum desired value
        norm_im[x0,y0] = 1
   
    return norm_im
def im_mask(im, sigma=1, image_is_grayscale=False):
    if image_is_grayscale:
        im_gray = im.copy()
    else:
        im_gray = rgb2gray(im)
    card_color_mu, card_color_std = cv2.meanStdDev(im_gray)
    return threshold_image(im_gray, card_color_mu, card_color_std, sigma).astype(np.uint8)
示例#12
0
文件: ct.py 项目: tsaith/vision
    def update_classifier(self, mu, sigma, feature_values, learn_rate):
        """
        Update the mean and standard deviation of the classifier.

        Parameters
        ----------
        mu : array-like, shape (n_features)
            The mean values.

        sigma : array-like, shape (n_features)
            The standard deviations.

        feature_values : array-like, shape (n_features, n_samples)
            Feature values.

        learn_rate : scalar
            Learning rate.
        """

        one_m_learn_rate = 1.0 - learn_rate

        for i in np.arange(self._n_features):
            mu_tmp, sigma_tmp = cv2.meanStdDev(feature_values[i, :])
            mu[i] = learn_rate*mu[i] + one_m_learn_rate*mu_tmp
            sigma[i] = np.sqrt(
                learn_rate*sigma[i]*sigma[i] +
                one_m_learn_rate*sigma_tmp*sigma_tmp +
                learn_rate*one_m_learn_rate*(sigma[i]-sigma_tmp)*(sigma[i]-sigma_tmp))
示例#13
0
def getTileValue(img, tileImgsAndValues):

    # First check std-dev of image to see if tile is blank
    means, stdDevs = cv2.meanStdDev(img)
    maxStdDev = np.amax(stdDevs)
    stdDevThreshold = 7.5
    if maxStdDev < stdDevThreshold:
        print("Blank tile detected using std-dev " + str(maxStdDev))
        return " "

    bestMatchValue = "X"
    bestMatchFactor = 0
    imgInGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    for imgAndValue in tileImgsAndValues:
        # Apply template Matching
        method = cv2.TM_CCOEFF_NORMED
        res = cv2.matchTemplate(imgInGray, imgAndValue["img"], method)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        if bestMatchFactor < max_val:
            bestMatchFactor = max_val
            bestMatchValue = imgAndValue["value"]

    threshold = 0.2
    if bestMatchFactor > threshold:
        print("Found match to value " + bestMatchValue + " matchFactor " + str(bestMatchFactor) + " maxColrStdDev " + str(maxStdDev))
        return bestMatchValue

    return "X"
示例#14
0
文件: WikiNorm.py 项目: efoco/Python
def normalise(image):

    dbl_image = image.astype(float)
    
    # computing normalization
    currentMin = np.min(dbl_image)
    currentMax = np.max(dbl_image)
    currentRange = currentMax - currentMin
    newMin = 0
    newMax = 1
    newRange = newMax - newMin
    
    # calculating mean and standard deviation
    meanStd = cv2.meanStdDev(dbl_image)
    
    norm_im = dbl_image
    # regions with a low standard deviation are assumed to NOT be regions of interest and 
    # have values close to currentMax therefore their value is set to the brightest possible -> 1
    if meanStd[1]>20:
        
        norm_im = (dbl_image - currentMin)*(newRange / currentRange)
        
    elif meanStd[1]<=20:
        
        norm_im = norm_im / currentMax
    
    return norm_im
 def analyse( self, painting ):
     try:
         image = cv2.imread(painting.filePath)
         mean,std_dev = cv2.meanStdDev(image)
         return numpy.hstack((numpy.float32(mean), numpy.float32(std_dev)))
     except IOError as e:
         print 'Unable to load painting "{0}". {1}'.format(painting.title, e)
示例#16
0
文件: treXton.py 项目: Pold87/treXton
def do_dipole_transformations(img):

    # Calculate gradients
    sobelx = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize=3, scale=4)
    sobely = cv2.Sobel(img, cv2.CV_8U, 0, 1, ksize=3, scale=4)

    # Color invariant

    mean_x, std_x = cv2.meanStdDev(sobelx)
    mean_y, std_y = cv2.meanStdDev(sobely)

    invar_x = sobelx / std_x.T
    invar_y = sobely / std_y.T

    invar_grad = np.sqrt(invar_x ** 2 + invar_y ** 2)
    invar_direction = np.arctan(invar_y / (invar_x + 1e-8))

    return invar_grad, invar_direction
示例#17
0
def edge_color_average(context):
    ''' Feature that computes the average
    edge color of the region of interest.

    :param context: The context to calculate with
    :returns: The score for this feature
    '''
    edges = context.region_edges
    (mean, stds) = cv2.meanStdDev(edges)
    return np.concatenate([mean, stds]).flatten()
 def variation(faces, intercept, slope):
     errorArray=[]
     for (x, y, w, h) in faces:
         faceX=x+(w/2)
         faceY=y+(h/2)
         lineFaceY= intercept + faceX*slope
         error = lineFaceY - faceY
     mean,sd = cv2.meanStdDev(numpy.array(errorArray))
     mean,sd = mean[0][0],sd[0][0]
     return (mean,sd)
示例#19
0
def region_color_average(context):
    ''' Feature that computes the average
    color of the region of interest.

    :param context: The context to calculate with
    :returns: The score for this feature
    '''
    crop = context.cropped_region
    (mean, stds) = cv2.meanStdDev(crop)
    return np.concatenate([mean, stds]).flatten()
示例#20
0
def reduceSat(img):
    _,s,_ = cv2.split(img)
    mean, stdDev = cv2.meanStdDev(s)
    mean = int(mean[0])
    stdDev = int(stdDev[0])
    lower_sat = np.array([-1000,50,-1000])
    upper_sat = np.array([1000,200,3000])
    mask = cv2.inRange(img,lower_sat,upper_sat)
    img = cv2.bitwise_and(img,img,mask=mask)
    return img
    def determine_marker_quality(self, frame):
        (bright_regions, dark_regions) = self.generate_template_for_quality_estimator()
        # cv2.imshow("bright_regions", 255*bright_regions)
        # cv2.imshow("dark_regions", 255*dark_regions)

        try:
            frame_img = self.extract_window_around_maker_location(frame)
            (bright_mean, bright_std) = cv2.meanStdDev(frame_img, mask=bright_regions)
            (dark_mean, dark_std) = cv2.meanStdDev(frame_img, mask=dark_regions)

            mean_difference = bright_mean - dark_mean
            normalised_mean_difference = mean_difference / (0.5*bright_std + 0.5*dark_std)
            # Ugly hack for translating the normalised_mean_differences to the range [0, 1]
            temp_value_for_quality = 1 - 1/(1 + math.exp(0.75*(-7+normalised_mean_difference)))
            self.quality = temp_value_for_quality
        except Exception as e:
            print "error"
            print e
            self.quality = 0.0
            return
def process_frame_diff_optical_flow(video_path, output_path=""):
    cap = cv2.VideoCapture(video_path)
    first_frame, _ = grab_and_convert_frame(cap)
    # No more frames left to grab or something went wrong
    if first_frame is None:
        return

    writer = get_video_writer(cap, video_path, output_path)

    # Mixture of Gaussian background subtraction model
    fgbg = cv2.BackgroundSubtractorMOG2()
    fgmask_prev = fgbg.apply(first_frame)

    while True:
        # take only every third frame to speed things up
        frame, orig1 = grab_and_convert_frame(cap)
        frame, orig2 = grab_and_convert_frame(cap)
        frame, orig = grab_and_convert_frame(cap)
        orig3 = orig
        if frame is None:
            print('No frame could be grabbed, exiting video processing')
            break

        fgmask = fgbg.apply(frame)
        cv2.imshow('mask', fgmask)

        # resize original for easy viewing
        orig = imutils.resize(orig, width=600)
        cv2.imshow('orig', orig)
        flow = cv2.calcOpticalFlowFarneback(fgmask_prev, fgmask, 0.5, 3, 15, 3, 5, 1.2, 0)
        mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
        max_mag = np.amax(mag)
        mag = mag / max_mag

        mean, stdDev = cv2.meanStdDev(mag)
        variance = stdDev * stdDev

        print (variance)

        if variance > 0.004:
            writer.write(orig1)
            writer.write(orig2)
            writer.write(orig3)

        fgmask_prev = fgmask

        # Apply optical flow to the foreground mask

        k = cv2.waitKey(30) & 0xff
        if k == ord("q"):
            break
    writer.release()
    cap.release()
    cv2.destroyAllWindows()
示例#23
0
def main():
  cap = cv2.VideoCapture(1)
  pixel_len, robot_pts = calibrateBoard(cap)
  frame = getFrame(cap)
  height, width = frame.shape[:2]
  circle_mask, mask_center, mask_radius = makeCircleMask(frame, robot_pts)
  cv2.destroyAllWindows()

  while cap.isOpened():
    frame = getFrame(cap)
    frame = cv2.bitwise_and(frame, circle_mask)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray_circle_mask = cv2.cvtColor(circle_mask, cv2.COLOR_BGR2GRAY)
    img_mean, img_std_dev = cv2.meanStdDev(gray, mask=gray_circle_mask)

    circles = cv2.HoughCircles(gray,cv2.cv.CV_HOUGH_GRADIENT,1, minDist=50, param1=50,param2=30,minRadius=20,maxRadius=40)

    if circles is not None:
      circles = np.uint16(np.around(circles))
      for circle_num, i in enumerate(circles[0,:]):
        center = (i[0], i[1])
        radius = i[2]

        fish_mask = np.zeros(frame.shape, np.uint8)
        #cv2.circle(frame,(i[0],i[1]),i[2],(0,255,0),2)
        #cv2.circle(frame,(i[0],i[1]),2,(0,0,255),3)

        cv2.circle(fish_mask, center, radius, (255, 255, 255),-1)
        fish_mask = cv2.cvtColor(fish_mask, cv2.COLOR_BGR2GRAY)
        fish_mean, fish_std_dev = cv2.meanStdDev(hsv, mask=fish_mask)
        if (fish_mean[0][0] < 95) or (fish_mean[0][0] > 120):
          cv2.circle(frame,(i[0],i[1]),i[2],(0,255,0),2)
          cv2.circle(frame,(i[0],i[1]),2,(0,0,255),3)



    cv2.imshow("Frame", frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
      break
示例#24
0
 def train(self, region):
     x,y,w,h = region
     # Get region
     train = self.frame[y:y+h,x:x+w]
     cv2.imwrite('debug.jpg', train)
     # Compute bounds
     (l,a,b), (stdl,stda,stdb) = cv2.meanStdDev(train)
     ldelta = stdl*1
     adelta = stda*1
     bdelta = stdb*1
     self.minbound = np.array([l-ldelta, a-adelta, b-bdelta])
     self.maxbound = np.array([l+ldelta, a+adelta, b+bdelta])
示例#25
0
def reduceBrightness(img):
    #lower_brightness = np.array([100,0,200])
    #upper_brightness = np.array([200,200,300])
    _,_,b = cv2.split(img)
    mean,stdDev = cv2.meanStdDev(b)
    mean = int(mean[0])
    stdDev = int(stdDev[0])
    lower_brightness = np.array([-1000,-1000,mean+stdDev])
    upper_brightness = np.array([2000,1000,300])
    mask = cv2.inRange(img,lower_brightness,upper_brightness)
    img = cv2.bitwise_and(img,img,mask=mask)
    return img
示例#26
0
def normalize(img):
    """ Normalize image using mean and std """

    ret_img = numpy.zeros_like(img).astype(theano.config.floatX)

    for idxC in xrange(img.shape[2]):
        [mu, sigma] = cv2.meanStdDev(img[:, :, idxC])
        if sigma == 0:
            ret_img[:, :, idxC] = (img[:, :, idxC] - mu)
        else:
            ret_img[:, :, idxC] = (img[:, :, idxC] - mu) / sigma

    return ret_img
示例#27
0
 def isBlank (self, img):
     '''test to see if the frame is uniform; None if error'''
     if (len(img.shape)==3):
         try:
             img = cv2.cvtColor(img, cv.CV_RGB2GRAY)
         except:
             return None
     # now img is gray image        
     mean, std = cv2.meanStdDev(img)
     if (std<self.blankThreshold):
         return True
     else:
         return False
示例#28
0
def calculate_roi_color(hsv):
	roi=hsv[110:200,10:100]
	#roi[:,:,2]=0
	avg,std=cv2.meanStdDev(roi)
	#avg,std=cv2.meanStdDev(roi)
	std=std*0.8
	maxColor=[round(sum(x)) for x in zip(avg,std)]

	minColor=[round(u-v) for (u,v) in zip(avg,std)]
	maxColor[2]=0
	minColor[2]=0
	print maxColor, "TO ", minColor
	return [maxColor,minColor]
示例#29
0
def detect_motion(motion):
    mean, std_dev = cv2.meanStdDev(motion)
    std_dev = std_dev[0, 0]
    if std_dev > config.MAX_DEVIATION:
        return 0, None, std_dev

    where = numpy.argwhere(motion == 255)
    number_of_changes = len(where)
    if number_of_changes:
        (min_y, min_x), (max_y, max_x) = where.min(0), where.max(0)
        return number_of_changes, (min_x, min_y, max_x, max_y), std_dev
    else:
        return number_of_changes, None, std_dev
示例#30
0
	def ext(self,img,classnum):
		#Hu-moments Extraction
		gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
		thresh=cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
		blur = cv2.bilateralFilter(thresh,15,80,80)
		Humom=cv2.HuMoments(cv2.moments(blur)).flatten()
		#RGB Means extraction
		means = cv2.mean(img)
		means = means[:3]
		#Histogram extraction
		hist = cv2.calcHist([img], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
		hist = hist.flatten()
		#Contour Hu
		gray2=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
		thresh=cv2.adaptiveThreshold(gray2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
		blur = cv2.bilateralFilter(thresh,15,80,80)
		gray_lap = cv2.Laplacian(blur,cv2.CV_16S,ksize = 3,scale = 1,delta = 0)
		dst = cv2.convertScaleAbs(gray_lap)
		Humom2=cv2.HuMoments(cv2.moments(dst)).flatten()
		'''
		#Mode
		most_intensity=mode(img)[0][0]
		'''
		#Stats Extraction
		(means2, stds) = cv2.meanStdDev(img)
		stats = np.concatenate([means2, stds]).flatten()
		#Class appending
		Humom=np.append(Humom,classnum)
		means=np.append(means,classnum)
		hist=np.append(hist,classnum)
		stats=np.append(stats,classnum)
		Humom2=np.append(Humom2,classnum)
		#most_intensity=np.append(most_intensity,classnum)
		
		with open('HuMoments.csv', 'ab')as csvfile:
			spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
			spamwriter.writerow(Humom)
		'''
		with open('RGB.csv', 'ab')as csvfile:
			spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
			spamwriter.writerow(means)
		'''
		with open('MeanStdDev.csv', 'ab')as csvfile:
			spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
			spamwriter.writerow(stats)

		with open('ContourHu.csv', 'ab')as csvfile:
			spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
			spamwriter.writerow(Humom2)
		'''
示例#31
0
    count += 1

# In[189]:

print(count)
print(chain)

#test image- FEAUTURE VECTOR
#Image feature vector of class 2 image
img.shape
#raw pixel feature vector of class 2 image
raw3 = img.flatten()
raw3.shape
raw3
#color mean descriptor of class 2 image
mean3 = cv2.mean(img)
mean3
#color mean and standard deviation of class 2 image
(mean3, std3) = cv2.meanStdDev(img)
mean2, std2
#combining mean and standard deviation
stat3 = np.concatenate([mean3, std3]).flatten()
stat3

# In[ ]:

#SUMMARY/CONCLUSION

#the image is converted to black and white (gray image) for simple storage and manipulation of the image
#and the boundary is extracted to to get the chain code from the direction matrix.
示例#32
0
def varianceWeight(img1, img2):
    mean1, var1 = cv2.meanStdDev(img1)
    mean2, var2 = cv2.meanStdDev(img2)
    weight1 = var1 / (var1 + var2)
    weight2 = var2 / (var1 + var2)
    return weight1, weight2
示例#33
0
 def get_stats(self):
     means, stddevs  = cv2.meanStdDev(self.im)
     return means, stddevs
model_path = "model"

if not os.path.isdir(cat_feat_path):
    os.makedirs(cat_feat_path)

if not os.path.isdir(non_cat_feat_path):
    os.makedirs(non_cat_feat_path)

for im_path in glob.glob(os.path.join(cat_im_path, "*")):
    #print(im_path)
    cat_image = cv2.imread(im_path, 0)
    #cat_image = cv2.resize(cat_image, (360, 480))
    cat_image = cv2.resize(cat_image, (256, 256))  #Resizing
    cat_image = cv2.GaussianBlur(cat_image, (5, 5), 0)  #Denoising
    cat_image = cv2.equalizeHist(cat_image)
    mean, std = cv2.meanStdDev(cat_image)
    cat_image_mean = mean[0]
    cat_image_std = std[0]
    cat_image_entropy = np.array([shannon_entropy(cat_image)])

    #cat_image_feats = np.array([shannon_entropy(cat_image),0, 0,0]).reshape(1,4)

    #print(cat_image_mean)
    grey_mat = greycomatrix(cat_image, [1], [0],
                            256,
                            symmetric=True,
                            normed=True)
    fd_contrast = greycoprops(grey_mat, 'contrast')[0]
    fd_dissimilarity = greycoprops(grey_mat, 'dissimilarity')[0]
    fd_homogeneity = greycoprops(grey_mat, 'homogeneity')[0]
    fd_ASM = greycoprops(grey_mat, 'ASM')[0]
示例#35
0
 def scoreFocus(self):
     edges = self.getLaplacian()
     _, std = cv.meanStdDev(edges)
     return abs(std)
示例#36
0
cv2.createTrackbar("Beta 2", "Sliders", 6, 10, change_b2)

cap = cv2.VideoCapture(1)

while(True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    if state is calibration_state:
        print("Current calibration frame:", calibration_frame_current)
        #split hsv channels
        h, s, v = cv2.split(frame_hsv)

        #calculate mean and stdv for current frames h and s channels
        buffer_Hmean, buffer_Hstdv = cv2.meanStdDev(h)
        buffer_Smean, buffer_Sstdv = cv2.meanStdDev(s)
        
        #accumulate the buffers
        Hmean += buffer_Hmean
        Hstdv += buffer_Hstdv
        Smean += buffer_Smean
        Sstdv += buffer_Sstdv

        calibration_frame_current += 1
        if calibration_frame_current is calibration_frame_max - 1:
            #calibration algorithm
            Hmean = Hmean / calibration_frame_max
            Hstdv = Hstdv / calibration_frame_max
            Smean = Smean / calibration_frame_max
            Sstdv = Sstdv / calibration_frame_max
def mean_and_standard_deviation(image1, image2):
  m1, stddev1 = cv.meanStdDev(image1)
  m2, stddev2 = cv.meanStdDev(image2)
  print(m1, m2) # Mean value for each channel
  print(stddev1, stddev2) # Standard Deviation for each channel
示例#38
0
 str_date = "18" + file[:10]
 date = datetime.strptime(str_date, "%y%m%d%H%M%S")
 p = int(file[-9]) - 1
 dates[p].append(date)
 delta = date - then
 hours[p].append(int(delta.total_seconds() / 3600))
 # Read images
 img_dir = dir + file
 mask_dir = crop_dir + os.path.splitext(file)[0] + "_1_mask.jpg"
 img = cv.imread(img_dir, -1)
 mask = cv.imread(mask_dir, 0)
 # Calibrate thermal image and find features
 Fdegree_img = -0.00000608 * np.float_power(
     img, 2) + 0.1715806300 * img - 920.665168
 not_mask = cv.bitwise_not(mask)
 mean, std = cv.meanStdDev(Fdegree_img, mask=mask)
 variance = (std[0][0])**2
 back_mean = cv.mean(Fdegree_img, not_mask)
 mean_diff = back_mean - mean[0][0]
 avg[p].append(mean[0][0])
 var[p].append(variance)
 dif[p].append(mean_diff[0])
 # Output images for illustration of plant ROI
 normalizedImg = np.zeros((60, 80))
 normalizedImg = cv.normalize(img,
                              normalizedImg,
                              255,
                              0,
                              cv.NORM_MINMAX,
                              dtype=cv.CV_8U)
 file_name = odir + os.path.splitext(file)[0] + ".jpg"
示例#39
0
def others(img):
    m, std = cv.meanStdDev(img)
    print(m, std)
示例#40
0
    def _extract_bits(cls, gray, corners):
        """
        Extract the bits encoding the ID of the marker given the image and the marker's corners.
        First finds the perspective transformation matrix from the marker's "original" coordinates relative to the
        given image, then uses the transformation matrix to transform the entire image such that the marker's
        perspective is removed. Then performs thresholding on the marker (if appropriate) and counts the pixels in each
        cell (spatial area of one bit) to determine if "1" or "0".
        :param gray: grayscale image with the marker in question; undergoes a perspective transformation such that the
            original marker's perspective is removed, and analysis can occur
        :param corners: corner points of the marker in the grayscale image; must be in correct order (clockwise), such
            that the mapping from the marker's original coordinates to the marker's grayscale image coordinates is
            calculated correctly
        :return: 2-dimensional array of binary values representing the marker; for a 4x4 marker with default detector
            params, bits would be (4 inner bits + 2 border bits)^2 = 36 bits
        """
        # Initialize variables
        markerSize = FiducialMarker.get_marker_size(
        )  # size of inner region of marker (area containing ID information)
        markerBorderBits = cls.params[
            cls.markerBorderBits]  # size of marker border
        cellSize = cls.params[
            cls.
            perspectiveRemovePixelPerCell]  # size of "cell", area consisting of one bit of info.
        cellMarginRate = cls.params[
            cls.perspectiveRemoveIgnoredMarginPerCell]  # cell margin
        minStdDevOtsu = cls.params[
            cls.
            minOtsuStdDev]  # min. std. dev. needed to run Otsu thresholding

        # Run assertions
        assert len(gray.shape) == 2
        assert len(corners) == 4
        assert markerBorderBits > 0 and cellSize > 0 and cellMarginRate >= 0 and cellMarginRate <= 1
        assert minStdDevOtsu >= 0

        # Determine new dimensions of perspective-removed marker
        markerSizeWithBorders = markerSize + 2 * markerBorderBits
        cellMarginPixels = int(cellMarginRate * cellSize)
        resultImgSize = int(markerSizeWithBorders * cellSize)
        # Initialize corner matrix of perspective-removed marker to calculate perspective transformation matrix
        resultImgCorners = np.array(
            [[0, 0], [resultImgSize - 1, 0],
             [resultImgSize - 1, resultImgSize - 1], [0, resultImgSize - 1]],
            dtype=np.float32)

        # Get transformation and apply to original image
        transformation = cv2.getPerspectiveTransform(
            corners, resultImgCorners).astype(np.float32)
        result_img = cv2.warpPerspective(gray,
                                         transformation,
                                         (resultImgSize, resultImgSize),
                                         flags=cv2.INTER_NEAREST)

        # Initialize matrix containing bits output
        bits = np.zeros((markerSizeWithBorders, markerSizeWithBorders),
                        dtype=np.int8)

        # Remove some border to avoid noise from perspective transformation
        # Remember that image matrices are stored row-major-order, [y][x]
        inner_region = result_img[int(cellSize / 2):int(-cellSize / 2),
                                  int(cellSize / 2):int(-cellSize / 2)]

        # Check if standard deviation enough to apply Otsu thresholding
        # If not enough, probably means all bits are same color (black or white)
        mean, stddev = cv2.meanStdDev(inner_region)
        if stddev < minStdDevOtsu:
            bits.fill(1) if mean > 127 else bits
            return bits

        # Because standard deviation is high enough, threshold using Otsu
        _, result_img = cv2.threshold(result_img, 125, 255,
                                      cv2.THRESH_BINARY | cv2.THRESH_OTSU)
        for y in range(markerSizeWithBorders):
            for x in range(markerSizeWithBorders):
                # Get each individual square of each cell, excluding the margin pixels
                yStart = y * cellSize + cellMarginPixels
                yEnd = yStart + cellSize - 2 * cellMarginPixels
                xStart = x * cellSize + cellMarginPixels
                xEnd = xStart + cellSize - 2 * cellMarginPixels
                square = result_img[yStart:yEnd, xStart:xEnd]
                if cv2.countNonZero(square) > (square.size / 2):
                    bits[y][x] = 1
        return bits
import numpy as np

src = cv.imread("./pictures/factory.jpg", cv.IMREAD_GRAYSCALE)
# cv.IMREAD_GRAYSCALE始终将图像转换为单通道的灰度图像

cv.namedWindow("input", cv.WINDOW_AUTOSIZE)
cv.imshow("input", src)

min1, max1, minLoc, maxLoc = cv.minMaxLoc(src)
# minMaxLoc寻找矩阵(一维数组当作向量,用Mat定义) 中最小值和最大值的位置.
# 多通道在使用minMaxLoc()函数是不能给出其最大最小值坐标的,因为每个像素点其实有多个坐标,所以是不会给出的
print("min: %.2f, max: %.2f" % (min1, max1))
print("min loc: ", minLoc)
print("max loc: ", maxLoc)

means, stddev = cv.meanStdDev(src)
# cv.meanStdDev 计算矩阵的均值和标准偏差
# src:输入矩阵,这个矩阵应该是1-4通道的,这可以将计算结果存在Scalar_ ‘s中
# mean:输出参数,计算均值
# stddev:输出参数,计算标准差
print("mean: %.2f, stddev: %.2f" % (means, stddev))
src[np.where(src < means)] = 0
# where 函数,对于不同的数组
# 当数组是一维数组时,返回的值是一维的索引,所以只有一组索引数组
# 当数组是二维数组时,满足条件的数组值返回的是值的位置索引,因此会有两组索引数组来表示值的位置
src[np.where(src > means)] = 255  # 对图像小于均值的数值的坐标位置处的数据进行二值化
cv.imshow("binary", src)

cv.waitKey(0)
cv.destroyAllWindows()
示例#42
0
while (True):
    _, frame3 = cap.read()
    hsv = cv2.cvtColor(frame3, cv2.COLOR_BGR2HSV)
    hsv2 = hsv.copy()
    shape = hsv.shape
    #print ("shape: ", shape)
    cv2.imshow('hsv', hsv)
    rows, cols, _ = np.shape(frame3)
    dist = distMap(frame1, frame3)
    #print ("shape dist: ", dist.shape)
    frame1 = frame2
    frame2 = frame3
    mod = cv2.GaussianBlur(dist, (9, 9), 0)
    _, thresh = cv2.threshold(mod, 100, 255, 0)
    _, stDev = cv2.meanStdDev(mod)

    for x in range(0, shape[0]):
        for y in range(0, shape[1]):
            if (mod[x, y] >= 200):
                hsv2[x, y, 0] = hsv2[x, y, 1] = hsv2[x, y, 2] = 0

    cv2.imshow('hsv2', hsv2)
    #dst = cv2.addWeighted(hsv, alpha, dist, beta, 0.0)

    #cv2.imshow('dst', dst)

    #cv2.imshow('dist', dist)
    cv2.putText(frame2, "Standard Deviation - {}".format(round(stDev[0][0],
                                                               0)), (70, 70),
                font, 1, (255, 0, 255), 1, cv2.LINE_AA)
def preprocess_image(img):
    gray = grayscale(img)
    numPixels = gray.shape[0] * gray.shape[1]
    mean, stdev = cv2.meanStdDev(gray)
    adj_stdev = max(stdev, math.sqrt(1.0 / numPixels))
    return (gray.astype(np.float32) - mean) / adj_stdev
    def detect_pole(self, img):
        roll_correction = None
        beam_thickness = None

        img = cv2.resize(img, (0, 0), fx=0.4, fy=0.4)
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        h, s, v = cv2.split(hsv)
        r, g, b = cv2.split(img)

        values = b - r - g

        height, width = values.shape
        #values = cv2.blur(values,(width / 100,width / 100))

        mean, std = cv2.meanStdDev(values)

        ret, blobs = cv2.threshold(values, mean + std * 1.5, 255,
                                   cv2.THRESH_BINARY)

        blobs = np.uint8(blobs)

        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))

        blobs = cv2.morphologyEx(blobs, cv2.MORPH_CLOSE, kernel)

        im2, contours, hierarchy = cv2.findContours(blobs, cv2.RETR_EXTERNAL,
                                                    cv2.CHAIN_APPROX_SIMPLE)

        maxVal = 0
        poleContour = None

        for c in contours:
            x, y, w, h = cv2.boundingRect(c)

            score = h * h / w
            if score > maxVal:
                maxVal = score
                poleContour = c

        if maxVal > 300:
            x, y, w, h = cv2.boundingRect(poleContour)
            area = cv2.contourArea(poleContour)
            thickness = 1.0 * area / h
            packet = []
            packet.append(maxVal)  #0
            packet.append(thickness)  #1
            packet.append(x + w / 2 - thickness / 2)  #2
            packet.append(y)  #3
            packet.append(x + w / 2 + thickness / 2)  #4
            packet.append(y + h)  #5
            packet.append(x + w / 2)  #6
            packet.append(y + h / 2)  #7
            packet.append(width / 2)  #8
            packet.append(height / 2)  #9
            packet.append(img)
            return packet

        return [maxVal, values]

        # Blur the image a bit to reduce detail and get better thresholding results
        blur = cv2.GaussianBlur(img, (5, 5), 3)

        # Binarize the image by thresholding the hls colorspace
        # These values target orange
        hls = hls_select_multiple(blur, 5, 35, 5, 255, 50, 255)

        # Hough Transform parameters used to detect line segments
        # Distance resolution of the accumulator in pixels
        rho = 1

        # Angle resolution of the accumulator in radians
        theta = np.pi / 180

        # Accumulator threshold param, only lines with enough votes get returned
        thresh = 200

        # Minimum line length, line segments shorter than that are rejected
        min_line_length = 200

        # Maximum allowed gap between points on the same line to link them
        max_line_gap = 2

        # Allowed line slope values will be 0 +/- slope_thresh
        slope_thresh = .5

        # (x1, y1): represent the left endpoint of the gate upper beam
        # (x2, y2): the opposite
        x1_avg = None
        y1_avg = None
        x2_avg = None
        y2_avg = None

        # Temporary values used for keep track of max/min values
        x_max = np.float64(-9999)
        x_min = np.float64(9999)
        y_max = np.float64(-9999)
        y_min = np.float64(9999)

        # Flag that keeps track of found line segment matches
        no_hits = True

        # Return line segments that meet the requirements of the paremeters above
        lines = cv2.HoughLinesP(hls,
                                rho=rho,
                                theta=theta,
                                threshold=thresh,
                                minLineLength=min_line_length,
                                maxLineGap=max_line_gap)  # NOQA

        if lines is None:
            return []

        # Average valid line segments to get a slope_avg
        for line in lines[0]:
            x1 = line[0]
            y1 = line[1]
            x2 = line[2]
            y2 = line[3]

            # Hacky solution to avoid divide by zero
            if y1 == y2:
                y1 += 1

            # The slope of the current line segment relative to Y axis (pole is vertical)
            slope = (np.float64(x2) - np.float64(x1)) / (
                np.float64(y2) - np.float64(y1))  # NOQA
            if (slope <= slope_thresh) and (slope >= -slope_thresh):
                if no_hits:
                    x1_avg = np.float64(x1)
                    y1_avg = np.float64(y1)
                    x2_avg = np.float64(x2)
                    y2_avg = np.float64(y2)

                    no_hits = False
                else:
                    x1_avg = (x1 + x1_avg) / 2
                    y1_avg = (y1 + y1_avg) / 2
                    x2_avg = (x2 + x2_avg) / 2
                    y2_avg = (y2 + y2_avg) / 2

                # Update max values found
                if x_max < x2:
                    x_max = x2
                if x_max < x1:
                    x_max = x1
                if x_min > x2:
                    x_min = x2
                if x_min > x1:
                    x_min = x1

                if y_max < y2:
                    y_max = y2
                if y_max < y1:
                    y_max = y1
                if y_min > y2:
                    y_min = y2
                if y_min > y1:
                    y_min = y1

        if no_hits:
            return []

        # If valid line segments found
        if no_hits is False:
            if x2_avg == x1_avg:
                x2_avg += 1

            slope_avg = (y2_avg - y1_avg) / (x2_avg - x1_avg)

            # The point slope equation can be used to extend the averaged line
            # And any point on the average line can be used to find the boundary
            # y - y0 = m (x - x0)
            y_min = slope_avg * (x_min - x1_avg) + y1_avg
            y_max = slope_avg * (x_max - x1_avg) + y1_avg
            y_min = y_min.astype(int)
            y_max = y_max.astype(int)
            x_min = x_min.astype(int)
            x_max = x_max.astype(int)

            x_mid = int((x1_avg + x2_avg) / 2)
            y_mid = int((y1_avg + y2_avg) / 2)

            if slope_avg < 0:
                # 57.2958 is used to convert radians to degrees
                hyp = (math.sqrt(
                    math.pow((x_max - x_min), 2) +
                    math.pow((y_max - y_min), 2)))
                offset_angle = 0
                if hyp > 0:
                    offset_angle = (57.2958 *
                                    (math.acos(abs(x_max - x_min) / hyp))
                                    )  # NOQA
                    offset_angle = abs(offset_angle - 90.0)
                roll_correction = round(offset_angle, 4)
                roll_correction = roll_correction
            else:
                hyp = (math.sqrt(
                    math.pow((x_max - x_min), 2) +
                    math.pow((y_max - y_min), 2)))
                offset_angle = 0
                # 57.2958 is used to convert radians to degrees
                if hyp > 0:
                    offset_angle = (57.2958 *
                                    (math.acos(abs(x_max - x_min) / hyp))
                                    )  # NOQA
                    offset_angle = 90 - offset_angle
                roll_correction = -round(offset_angle, 4)

            sum_rows = np.sum(hls, axis=1)
            beam_thickness = sum_rows[y_mid] / 255

        cam_center_y = int(img.shape[0] / 2)
        cam_center_x = int(img.shape[1] / 2)

        packet = []
        packet.append(roll_correction)  #0
        packet.append(beam_thickness)  #1
        packet.append(x_min)  #2
        packet.append(y_min)  #3
        packet.append(x_max)  #4
        packet.append(y_max)  #5
        packet.append(x_mid)  #6
        packet.append(y_mid)  #7
        packet.append(cam_center_x)  #8
        packet.append(cam_center_y)  #9

        if None in [x_mid, y_mid]:
            return []

        return packet  # NOQA
示例#45
0
    circles = cv2.HoughCircles(cimg,
                               cv2.HOUGH_GRADIENT,
                               1,
                               100,
                               param1=100,
                               param2=40,
                               minRadius=30,
                               maxRadius=500)
    if circles is not None:
        circles = np.uint16(np.around(circles))
        for i in circles[0, :]:
            ixMin = np.uint16(i[0] - (i[2] * 0.707106))
            ixMax = np.uint16(i[0] + (i[2] * 0.707106))
            iyMin = np.uint16(i[1] - (i[2] * 0.707106))
            iyMax = np.uint16(i[1] + (i[2] * 0.707106))
            mean, std = cv2.meanStdDev(frame[ixMin:ixMax, iyMin:iyMax])
            stdMean = np.mean(std)

            cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0),
                       1)  # draw the outer circle

            #cv2.putText(frame,"m:" + str(np.uint16(mean)), (i[0],i[1]), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
            #cv2.putText(frame,"s:" + str(std), (i[0],i[1]-20), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)

            if stdMean > 30 or stdMean < 0:
                continue

            if abs(np.mean(std - [stdMean, stdMean, stdMean])) > 30:
                continue

            print(mean)
示例#46
0
文件: test.py 项目: daniaokuye/tcdcn
def whitering(img):
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    m, s = cv2.meanStdDev(img)
    img = (img - m) / (1.e-6 + s)
    img = img.astype('float32')
    return img
示例#47
0
def digitalrec(image_org):

    cv2.resize(image_org, (201, 96))
    height = image_org.shape[0]
    width = image_org.shape[1]

    # 转灰度图
    # image_gray = cv2.cvtColor(image_org, cv2.COLOR_RGB2GRAY) # opencv自带灰度化会抹去示数
    image_gray = rChannelGray(image_org)

    # 二值化
    meanvalue = image_gray.mean()
    if meanvalue >= 200:
        hist = cv2.calcHist([image_gray], [0], None, [256], [0, 255])
        min_val, max_val, min_index, max_index = cv2.minMaxLoc(hist)
        ret, image_bin = cv2.threshold(image_gray,
                                       int(max_index[1]) - 7, 255,
                                       cv2.THRESH_BINARY)
    else:
        mean, stddev = cv2.meanStdDev(image_gray)
        ret, image_bin = cv2.threshold(image_gray, meanvalue + 65, 255,
                                       cv2.THRESH_BINARY)

    # 分割数字并识别
    count = 0
    hasWhite = False
    cooList = []

    for i in range(0, width - 1, 3):
        flag = hasWhite
        y = 1
        for j in range(0, height - 1, 3):
            y += 3
            if image_bin[j][i] == 255:
                hasWhite = True
                break

        if y < height - 3 and (not flag):
            cooList.append(i)

        if y < height - 3:
            # break出来
            continue
        elif y >= height - 3 and flag:
            # 遍历完一列
            hasWhite = False
            count += 1
            cooList.append(i)

    if len(cooList) % 2 != 0:
        cooList.append(width - 1)

    num = 0
    result = ''
    ims = []

    for i in range(0, len(cooList), 2):
        roi = image_bin[:, cooList[i]:cooList[i + 1]]

        onenumber, image = TubeIdentification(i, roi)
        ims.append(image)

        if (onenumber == -1):
            result += "0"
        else:
            result += str(onenumber)
            if isDot(roi):
                result += "."
        num += 1

    height, width = ims[0].shape

    # 创建空白长图
    longImage = Image.new("RGB", (width * len(ims), height))

    # 拼接图片
    for i, im in enumerate(ims):
        image = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))
        longImage.paste(image, box=(i * width, 0))

    longImage = np.array(longImage)

    return result, longImage
示例#48
0
# VideoCaptureのインスタンスを作成する。
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FPS, 30)
fps = cap.get(cv2.CAP_PROP_FPS)
# 各パラメータの初期値
me = 0
HB = -1
start = 0
# 無条件ループ
while True:
    # VideoCaptureから1フレーム読み込む
    ret, frame = cap.read()
    t = time.time()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  ## グレースケールに変換
    mean, stddev = cv2.meanStdDev(gray)  ## 平均・分散の計算
    # スクリーンショットを撮りたい関係で1/4サイズに縮小
    frame = cv2.resize(frame,
                       (int(frame.shape[1] / 4), int(frame.shape[0] / 4)))
    # 表示画面
    if HB >= 0:
        cv2.putText(frame, str(HB), (100, 150), cv2.FONT_HERSHEY_PLAIN, 5,
                    (0, 255, 0), 3, cv2.LINE_AA)
        cv2.putText(frame, 'A pulse rate', (0, 50), cv2.FONT_HERSHEY_PLAIN, 3,
                    (0, 255, 0), 3, cv2.LINE_AA)
    else:
        cv2.putText(frame, 'Press Enter', (0, 50), cv2.FONT_HERSHEY_PLAIN, 3,
                    (0, 255, 0), 3, cv2.LINE_AA)
    cv2.imshow('PUSH THE ENTER KEY TO START MEASUREMENT', frame)
    # 条件分岐
    if cv2.waitKey(1) == 113: break  ## qで終了
示例#49
0
def get_target_crops_from_img(img, geo_stamps, ppsi):
    '''takes the full image, geostamps, and ppsi and finds the targets,
    crops them, and instantiates a list of TargetCrop objects and returns them'''

    '''notes: ppsi of the image should be used in picking a reasonable max crop size'''
    img_downsized = Scale.get_img_scaled_to_one_bound(img, DOWNSCALE_CONSTRAINT).convert('RGB')

    #img.show()
    #img = Image.fromarray(cv2.GaussianBlur(numpy.array(img), (5,5), 3))
    #img = Image.fromarray(cv2.GaussianBlur(numpy.array(img), (7,7), 2))
    #img = img.resize((img.size[0]//5, img.size[1]//5))
    img_downsized = Image.fromarray(cv2.bilateralFilter(numpy.array(img_downsized), 15, 40, 40))
    #img_downsized.show()
    img_downsized.show()
    #img.show()
    image = numpy.array(img_downsized)


    kernel_size = 3
    kernel_margin = (kernel_size - 1)//2
    variance_map = numpy.zeros(img.size)


    var_img = numpy.zeros((image.shape[0], image.shape[1]))
    for x in range(kernel_margin, var_img.shape[0]-kernel_margin):
        for y in range(kernel_margin, var_img.shape[1]-kernel_margin):
            sub_arr = image[x-kernel_margin:x+kernel_margin+1, y-kernel_margin:y+kernel_margin+1]

            mean, std_dev = (cv2.meanStdDev(sub_arr))
            '''for some reason porting to this hasn't been the same as it functioned before'''
            var_img[x,y] = numpy.linalg.norm(std_dev)**2
    Image.fromarray(255*var_img/numpy.amax(var_img)).show()
    #pil_var_img = Image.fromarray(255*var_img/numpy.amax(var_img))
    #pil_var_img.show()
    #threshold_img = ImageMath.get_binary_bw_img(pil_var_img, pil_var_img.load(), 40)
    #threshold_img.show()
    var_img = var_img.T
    threshold_img = Image.new('L', (var_img.shape[0], var_img.shape[1]))
    threshold_image = threshold_img.load()

    for x in range(0, threshold_img.size[0]):
        for y in range(0, threshold_img.size[1]):
            if var_img[x,y] > VARIANCE_THRESHOLD:
                threshold_image[x,y] = 255
    threshold_img.show()

    threshold_connected_components_map = ImageMath.get_bw_connected_components_map(threshold_img, threshold_img.load())
    threshold_connected_components = ImageMath.convert_connected_component_map_into_clusters(threshold_connected_components_map)
    resize_ratio = float(img.size[0])/float(img_downsized.size[0])
    min_crop_size = (int((1.0/resize_ratio) * MAX_SQUARE_CROP_BOUNDS), int((1.0/resize_ratio) * MAX_SQUARE_CROP_BOUNDS))
    min_cluster_size = 38
    max_cluster_size = 200

    crops = []
    for i in range(0, len(threshold_connected_components)):
        if len(threshold_connected_components[i]) > min_cluster_size:
            crop_img = ImageMath.get_connected_component_mask(threshold_img.size, threshold_connected_components[i])
            crops.append(crop_img)


    color_crops = []
    crop_margin = 5
    for i in range(0, len(crops)):



        crop_img = crops[i]
        #crop_img.show()
        crop_img_mean_pixel = ImageMath.get_bw_img_mean_pixel(crop_img, crop_img.load())
        #crop_img.show()

        bilat_crop_img = img_downsized.crop((crop_img_mean_pixel[0]-(100 * (1.0/resize_ratio)), crop_img_mean_pixel[1]-(100 * (1.0/resize_ratio)), crop_img_mean_pixel[0]+(100 * (1.0/resize_ratio)), crop_img_mean_pixel[1]+ (100 * (1.0/resize_ratio)))).convert('L')
        bilat_crop_canny_img = Image.fromarray(cv2.Canny(numpy.array(bilat_crop_img), 40, 80))

        #bilat_crop_canny_img.show()

        bilat_start_x = crop_img_mean_pixel[0]-(100 * (1.0/resize_ratio))
        bilat_start_y = crop_img_mean_pixel[1]-(100 * (1.0/resize_ratio))
        bilat_bounding_rect = Crop.get_bw_img_bounds(bilat_crop_canny_img, bilat_crop_canny_img.load())
        bounding_rect = Crop.get_bw_img_bounds(crop_img, crop_img.load())
        bounding_rect.set_x(int(bounding_rect.get_x() * resize_ratio) - crop_margin)
        bounding_rect.set_y(int(bounding_rect.get_y() * resize_ratio) - crop_margin)
        bounding_rect.set_width(int(bounding_rect.get_width() * resize_ratio) + 3*crop_margin)
        bounding_rect.set_height(int(bounding_rect.get_height() * resize_ratio) + 3*crop_margin)
        append_img = Crop.get_img_cropped_to_bounds(img, bounding_rect)

        color_crops.append((numpy.asarray(crop_img_mean_pixel), append_img))

    '''kills crops in the same list whose center are very close to each other (sometimes the same target pops up twice)'''
    min_dist_threshold = 15

    min_area = 1600
    max_area = 48400*3
    i = 0
    while i < len(color_crops):
        sorted_crops = sorted(color_crops, key = lambda crop : numpy.linalg.norm(crop[0]-color_crops[i][0]))
        '''sorted_crops[1][0] so that it excludes a measurement to itself'''
        if numpy.linalg.norm(sorted_crops[1][0] - color_crops[i][0]) < min_dist_threshold:
            if not (color_crops[i][1].size[0]*color_crops[i][1].size[1] > sorted_crops[1][1].size[0]*sorted_crops[1][1].size[1]):
                del color_crops[i]
            else:
                i+=1
        else:
            i+=1

    i = 0
    while i < len(color_crops):
        area = color_crops[i][1].size[0]*color_crops[i][1].size[1]
        if area < min_area or area > max_area:
            del color_crops[i]
        else:
            i += 1

    i = 0
    KMEANS_RUN_TIMES = 10
    MIN_TOTAL_CLUSTER_DISTANCE_SUM = 370
    '''maybe, instead of summing the distance, use the area of the triangle that the three points make and threshold that'''
    while i < len(color_crops):
        #print('color crops[i][1]: ', color_crops[i][1])
        colors = numpy.array(color_crops[i][1].convert('RGB')).reshape((-1, 3))
        colors = numpy.float32(colors)

        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, KMEANS_RUN_TIMES, 1.0)
        ret, labels, color_clusters = cv2.kmeans(colors, 3, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        #print("color clusters: ", color_clusters)
        dist_sum = 0
        dist_sum += numpy.linalg.norm(color_clusters[1]-color_clusters[0])
        dist_sum += numpy.linalg.norm(color_clusters[2]-color_clusters[1])
        dist_sum += numpy.linalg.norm(color_clusters[0]-color_clusters[2])
        print("dist sum is: ", dist_sum)
        if not dist_sum > MIN_TOTAL_CLUSTER_DISTANCE_SUM:
            del color_crops[i]
        else:
            i += 1



    target_crops = []



    for i in range(0, len(color_crops)):
        target_crops.append(TargetCrop(img, color_crops[i][1], geo_stamps, color_crops[i][0], ppsi))
    return target_crops
示例#50
0
def single_file_run(source,
                    target_img,
                    sn_method='vahadane',
                    output_dir='patches'):

    basename = os.path.basename(source)
    source_img = cv2.imread(source)
    source_img = cv2.cvtColor(source_img, cv2.COLOR_BGR2RGB)

    # Get the stain matrices of source and target images
    if sn_method == 'vahadane':
        stain_matrix_target = get_stain_matrix(target_img, 'vahadane')
        stain_matrix_source = get_stain_matrix(source_img, 'vahadane')
    elif sn_method == 'mackenko':
        stain_matrix_target = get_stain_matrix(target_img, 'mackenko')
        stain_matrix_source = get_stain_matrix(source_img, 'mackenko')

    if sn_method == 'vahadane' or sn_method == 'mackenko':
        # Get stain concentrations
        target_concentrations = get_concentrations(target_img,
                                                   stain_matrix_target)
        maxC_target = np.percentile(target_concentrations, 99, axis=0).reshape(
            (1, 2))
        stain_matrix_target_RGB = convert_OD_to_RGB(
            stain_matrix_target)  # useful to visualize.

        source_concentrations = get_concentrations(source_img,
                                                   stain_matrix_source)
        maxC_source = np.percentile(source_concentrations, 99, axis=0).reshape(
            (1, 2))
        source_concentrations *= (maxC_target / maxC_source)
        tmp = 255 * np.exp(
            -1 * np.dot(source_concentrations, stain_matrix_target))

        normed_img = tmp.reshape(source_img.shape).astype(np.uint8)
        normed_img = cv2.cvtColor(normed_img, cv2.COLOR_BGR2RGB)

        # Save image
        cv2.imwrite(os.path.join(output_dir, basename), normed_img)

    elif sn_method == 'reinhard':
        I1_t, I2_t, I3_t = lab_split(target_img)
        m1_t, sd1_t = cv2.meanStdDev(I1_t)
        m2_t, sd2_t = cv2.meanStdDev(I2_t)
        m3_t, sd3_t = cv2.meanStdDev(I3_t)
        target_means = m1_t, m2_t, m3_t
        target_stds = sd1_t, sd2_t, sd3_t

        I1_s, I2_s, I3_s = lab_split(source_img)
        m1_s, sd1_s = cv2.meanStdDev(I1_s)
        m2_s, sd2_s = cv2.meanStdDev(I2_s)
        m3_s, sd3_s = cv2.meanStdDev(I3_s)
        source_means = m1_s, m2_s, m3_s
        source_stds = sd1_s, sd2_s, sd3_s

        norm1 = ((I1_s - source_means[0]) *
                 (target_stds[0] / source_stds[0])) + target_means[0]
        norm2 = ((I2_s - source_means[1]) *
                 (target_stds[1] / source_stds[1])) + target_means[1]
        norm3 = ((I3_s - source_means[2]) *
                 (target_stds[2] / source_stds[2])) + target_means[2]

        normed_img = merge_back(norm1, norm2, norm3)
        normed_img = cv2.cvtColor(normed_img, cv2.COLOR_BGR2RGB)

        # Save image
        cv2.imwrite(os.path.join(output_dir, basename), normed_img)
def color_stats(image):  # R_mean, G_mean, B_mean,R_std, G_std,B_std
    (means, stds) = cv.meanStdDev(image)

    stats = np.concatenate([means, stds]).flatten()
    return stats
示例#52
0
    white_key_mask[current_frame_hsv[:,:,1] < max_white_key_sat] = 1
    white_key_mask[previous_frame_hsv[:,:,1] < max_white_key_sat] = 1
    white_key_mask[current_frame_hsv[:,:,2] > min_white_key_val] = 1
    white_key_mask[previous_frame_hsv[:,:,2] > min_white_key_val] = 1

    hand_mask = np.zeros(frame_diff_l.shape)
    red_diff_frame = current_frame[:,:,2]*2  - current_frame[:,:,1] - current_frame[:,:,0]
    red_diff_frame[current_frame[:,:,2] < red_threshold] = 0
    red_diff_frame[red_diff_frame > 200] = 0
    hand_mask[red_diff_frame > red_difference] = 1

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
    im = np.zeros((100, 100), dtype=np.uint8)
    im[50:, 50:] = 255

    mean, std_dev = cv2.meanStdDev(frame_diff_l)

    frame_diff_l[frame_diff_l < 2*std_dev] = 0


    hand_mask = cv2.dilate(hand_mask, kernel, iterations=2)

    frame_diff_l[previous_hand_mask == 1] = frame_diff_l[previous_hand_mask == 1]/100
    frame_diff_l[white_key_mask == 1] = frame_diff_l[white_key_mask == 1]*20
    frame_diff_l[white_key_mask == 0] = 0
    frame_diff_l[hand_mask == 1] = 0

    frame_diff_l = cv2.blur(frame_diff_l, (13,13))
    frame_diff_l = cv2.normalize(frame_diff_l, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)

    cv2.imshow('frame diff saturation',frame_diff_l)
示例#53
0
print
print "Number of pixels:"
print img.dtype
print
'''
Color mean as per py image search
'''

print
print "Mean RGB:"

means = cv2.mean(img)
print means
'''
Color mean and standard deviation of each channel
'''

print
print "Color Mean:"

(means) = cv2.meanStdDev(img)

print
print "Std Dev of Color Mean (means, stds):"

(means, stds) = cv2.meanStdDev(img)

stats = np.concatenate([means, stds]).flatten()

print stats
示例#54
0
 def image_sharp(image_object):
     gray_lap = cv2.Laplacian(image_object, cv2.CV_16S, ksize=3)
     dst = cv2.convertScaleAbs(gray_lap)
     im_mean, im_std = cv2.meanStdDev(dst)
     return im_mean[0] * im_std[0]
示例#55
0
def getVarianceWeight(apple, orange):
    appleMean, appleVar = cv2.meanStdDev(apple)
    orangeMean, orangeVar = cv2.meanStdDev(orange)
    appleWeight = float(appleVar) / (appleVar + orangeVar)
    orangeWeight = float(orangeVar) / (appleVar + orangeVar)
    return appleWeight, orangeWeight
示例#56
0
import cv2 as cv
import numpy as np

src = cv.imread("lena.bmp")
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
cv.namedWindow("input", cv.WINDOW_AUTOSIZE)
cv.imshow("input", src)
cv.imshow("gray", gray)

minValue, maxValue, minLoc, maxLoc = cv.minMaxLoc(gray)
print("min: %.2f, max: %.2f" % (minValue, maxValue))
print("min loc:", minLoc)
print("max loc:", maxLoc)

means, stddev = cv.meanStdDev(gray)
print("mean: %.2f, stddev: %.2f" % (means, stddev))
gray[np.where(gray < means)] = 0
gray[np.where(gray > means)] = 255
cv.imshow("binary", gray)

cv.waitKey(0)
cv.destroyAllWindows()
示例#57
0
def filterRect(img, rect, threshold=.3):
    (x, y, w, h) = rect
    mask = np.zeros(img.shape, dtype='uint8')
    cv.rectangle(mask, (x, y), (x + w, y + h), 255, cv.FILLED)
    mean, stddev = cv.meanStdDev(img, mask=mask)
    return mean < threshold
示例#58
0
def others(m1, m2):
    M1, dev1 = cv.meanStdDev(m1)
    M2, dev2 = cv.meanStdDev(m2)
    print(M1, dev1)
    print(M2, dev2)
示例#59
0
fctr = 1 / n
while (True):
    isDisponible, fotograma = captura.read()

    if (isDisponible == True):
        print(fotograma.shape)
        alto, ancho, chanels = fotograma.shape
        alto = alto
        ancho = ancho

        hsv_frame = cv2.cvtColor(fotograma, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(hsv_frame)

        if (isCalibrating):
            #h_f64 = h.astype(np.float64)
            tmp_h_avg, tmp_h_stddev = cv2.meanStdDev(h)
            h_avg = h_avg + tmp_h_avg[0][0] * fctr
            h_stddev = h_stddev + tmp_h_stddev[0][0] * fctr
            print(h_stddev)

            tmp_s_avg, tmp_s_stddev = cv2.meanStdDev(s)
            s_avg = s_avg + tmp_s_avg[0][0] * fctr
            s_stddev = s_stddev + tmp_s_stddev[0][0] * fctr

            if (i < n):
                i += 1
            else:
                h_dist = c * h_stddev
                h_min = h_avg - h_dist
                h_max = h_avg + h_dist
        refined_box = [l, t, r, b]
        list_bboxes.append(refined_box)
        list_confidence.append(confidence)

    ### landmark
    LM_caffe_param = 60
    list_CLM = [] # caffe landmark list
    for bbox in list_bboxes:
        l,t,r,b = bbox
        roi = bgr_img[t:b+1, l:r+1]
        gray_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
        res = cv2.resize(gray_roi, (LM_caffe_param, LM_caffe_param)).astype(np.float32)
        
        m = np.zeros((LM_caffe_param,LM_caffe_param))
        sd = np.zeros((LM_caffe_param,LM_caffe_param))
        mean, std_dev = cv2.meanStdDev(res, m, sd)
        normalized_roi = (res - mean[0][0]) / (0.000001 + std_dev[0][0])

        blob = cv2.dnn.blobFromImage(normalized_roi, 1.0,
        (LM_caffe_param, LM_caffe_param), None)
        landmarknet.setInput(blob)
        caffe_landmark = landmarknet.forward()
        
        for landmark in caffe_landmark:
            LM = []
            for i in range(len(landmark)//2):
                x = landmark[2*i] * (r-l) + l
                y = landmark[2*i+1] * (b-t) + t
                LM.append((int(x),int(y)))
            list_CLM.append(LM)