Ejemplo n.º 1
0
 def test(trainY, testY, traingnd, testgnd, radius=2):
   # make sure trangnd and testgnd are flattened
   testgnd = testgnd.ravel()
   traingnd = traingnd.ravel()
   ntest = testY.shape[0]
   
   # Here's how to use Hamming dist built into numpy, but it is twice as slow as yours
   #dim = trainY.shape[1]
   #hamdis = scipy.spatial.distance.cdist(trainY, testY, 'hamming')    
   #hamdis = dim * hamdis
   #hamdis = (hamdis+.1).astype(np.int) # convert to int but make sure no rounding issues by adding .1 first
   
   hamdis = utils.pdist2(trainY, testY, 'hamming')
   
   precision = np.zeros(ntest)
   for j in xrange(ntest):
     ham = hamdis[:,j]
     lst = np.flatnonzero(ham <= radius)
     ln = len(lst)
     if ln == 0:
       precision[j] = 0
     else:
       precision[j] = len(np.flatnonzero(traingnd[lst] == testgnd[j])) / float(ln)
       
   return np.mean(precision)
Ejemplo n.º 2
0
    def test(trainY, testY, traingnd, testgnd, radius=2):
        # make sure trangnd and testgnd are flattened
        testgnd = testgnd.ravel()
        traingnd = traingnd.ravel()
        ntest = testY.shape[0]

        # Here's how to use Hamming dist built into numpy, but it is twice as slow as yours
        #dim = trainY.shape[1]
        #hamdis = scipy.spatial.distance.cdist(trainY, testY, 'hamming')
        #hamdis = dim * hamdis
        #hamdis = (hamdis+.1).astype(np.int) # convert to int but make sure no rounding issues by adding .1 first

        hamdis = utils.pdist2(trainY, testY, 'hamming')

        precision = np.zeros(ntest)
        for j in xrange(ntest):
            ham = hamdis[:, j]
            lst = np.flatnonzero(ham <= radius)
            ln = len(lst)
            if ln == 0:
                precision[j] = 0
            else:
                precision[j] = len(
                    np.flatnonzero(traingnd[lst] == testgnd[j])) / float(ln)

        return np.mean(precision)
Ejemplo n.º 3
0
    def _Z(data, anchors, nnanchors, sigma):
        n = data.shape[0]
        m = anchors.shape[0]

        # tried using for loops. too slow.
        #sqdist = scipy.spatial.distance.cdist(data, anchors,'sqeuclidean') # too slow
        sqdist = utils.pdist2(data, anchors, 'sqeuclidean')
        val = np.zeros((n, nnanchors))
        pos = np.zeros((n, nnanchors), dtype=np.int)
        for i in range(nnanchors):
            pos[:, i] = np.argmin(sqdist, 1)
            val[:, i] = sqdist[np.arange(len(sqdist)), pos[:, i]]
            sqdist[np.arange(n), pos[:, i]] = float('inf')

        # would be cleaner to calculate sigma in its own separate method, but this is more efficient
        if sigma is None:
            dist = np.sqrt(val[:, nnanchors - 1])
            sigma = np.mean(dist) / np.sqrt(2)

        # Next, calculate formula (2) from the paper
        # this calculation differs from the matlab. In the matlab, the RBF kernel's exponent only
        # has sigma^2 in the denominator. Here, 2 * sigma^2. This is accounted for when auto-calculating sigma
        # above by dividing by sqrt(2)

        # Here is how you first calculated formula (2), which is similar in approach to the matlab code (not with
        # respect to the difference mentioned above, though). However, you encountered floating point issues.
        #val = np.exp(-val / (2 * np.power(sigma,2)))
        #s = val.sum(1)[np.newaxis].T # had to do np.newaxis and transpose to make it a column vector
        #                             # just calling ".T" without that wasn't working. reshape would
        #                             # also work. I'm not sure which is preferred.
        #repmat = np.tile(s, (1,nnanchors))
        #val = val / repmat

        # So work in log space and then exponentiate, to avoid the floating point issues.
        # for the denominator, the following code avoids even more precision issues, by relying
        # on the fact that the log of the sum of exponentials, equals some constant plus the log of sum
        # of exponentials of numbers subtracted by the constant:
        #  log(sum_i(exp(x_i))) = m + log(sum_i(exp(x_i-m)))

        c = 2 * np.power(sigma, 2)  # bandwidth parameter
        exponent = -val / c  # exponent of RBF kernel
        # no longer using the np.newaxis approach, since you now see keepdims option
        #shift = np.amin(exponent, 1)[np.newaxis].T # np.axis to make column vector
        shift = np.amin(exponent, 1, keepdims=True)
        # no longer using the np.tile approach, since numpy figures it out. You were originally doing
        # exponent - shiftrep, but exponent - shift works the same.
        #shiftrep = np.tile(shift, (1,nnanchors))
        denom = np.log(np.sum(np.exp(exponent - shift), 1,
                              keepdims=True)) + shift
        val = np.exp(exponent - denom)

        Z = scipy.sparse.lil_matrix((n, m))
        for i in range(nnanchors):
            Z[np.arange(n), pos[:, i]] = val[:, i]
        Z = scipy.sparse.csr_matrix(Z)

        return (Z, sigma)
Ejemplo n.º 4
0
  def _Z(data, anchors, nnanchors, sigma):
    n = data.shape[0]
    m = anchors.shape[0]
    
    # tried using for loops. too slow.
    #sqdist = scipy.spatial.distance.cdist(data, anchors,'sqeuclidean') # too slow
    sqdist = utils.pdist2(data, anchors, 'sqeuclidean')
    val = np.zeros((n, nnanchors))
    pos = np.zeros((n, nnanchors), dtype=np.int)
    for i in range(nnanchors):
      pos[:,i] = np.argmin(sqdist, 1)
      val[:,i] = sqdist[np.arange(len(sqdist)), pos[:,i]]
      sqdist[np.arange(n), pos[:,i]] = float('inf')
    
    # would be cleaner to calculate sigma in its own separate method, but this is more efficient
    if sigma is None:
      dist = np.sqrt(val[:,nnanchors-1])
      sigma = np.mean(dist) / np.sqrt(2)
    
    # Next, calculate formula (2) from the paper
    # this calculation differs from the matlab. In the matlab, the RBF kernel's exponent only
    # has sigma^2 in the denominator. Here, 2 * sigma^2. This is accounted for when auto-calculating sigma
    # above by dividing by sqrt(2)
    
    # Here is how you first calculated formula (2), which is similar in approach to the matlab code (not with
    # respect to the difference mentioned above, though). However, you encountered floating point issues.
    #val = np.exp(-val / (2 * np.power(sigma,2)))
    #s = val.sum(1)[np.newaxis].T # had to do np.newaxis and transpose to make it a column vector
    #                             # just calling ".T" without that wasn't working. reshape would
    #                             # also work. I'm not sure which is preferred.
    #repmat = np.tile(s, (1,nnanchors))
    #val = val / repmat

    # So work in log space and then exponentiate, to avoid the floating point issues.
    # for the denominator, the following code avoids even more precision issues, by relying
    # on the fact that the log of the sum of exponentials, equals some constant plus the log of sum
    # of exponentials of numbers subtracted by the constant:
    #  log(sum_i(exp(x_i))) = m + log(sum_i(exp(x_i-m)))
    
    c = 2 * np.power(sigma,2) # bandwidth parameter
    exponent = -val / c       # exponent of RBF kernel
    # no longer using the np.newaxis approach, since you now see keepdims option
    #shift = np.amin(exponent, 1)[np.newaxis].T # np.axis to make column vector
    shift = np.amin(exponent, 1, keepdims=True)
    # no longer using the np.tile approach, since numpy figures it out. You were originally doing
    # exponent - shiftrep, but exponent - shift works the same.
    #shiftrep = np.tile(shift, (1,nnanchors))
    denom = np.log(np.sum(np.exp(exponent - shift), 1, keepdims=True)) + shift
    val = np.exp(exponent - denom)
    
    Z = scipy.sparse.lil_matrix((n,m))
    for i in range(nnanchors):
      Z[np.arange(n), pos[:,i]] = val[:,i]
    Z = scipy.sparse.csr_matrix(Z)
    
    return (Z, sigma)
Ejemplo n.º 5
0
    def _Z(data, anchors, nnanchors, sigma):
        n = data.shape[0]
        m = anchors.shape[0]

        # tried using for loops. too slow.
        sqdist = utils.pdist2(data, anchors, 'sqeuclidean')
        val = np.zeros((n, nnanchors))
        pos = np.zeros((n, nnanchors), dtype=np.int)
        for i in range(nnanchors):
            pos[:, i] = np.argmin(sqdist, 1)
            val[:, i] = sqdist[np.arange(len(sqdist)), pos[:, i]]
            sqdist[np.arange(n), pos[:, i]] = float('inf')

        # would be cleaner to calculate sigma in its own separate method,
        # but this is more efficient
        if sigma is None:
            dist = np.sqrt(val[:, nnanchors - 1])
            sigma = np.mean(dist) / np.sqrt(2)

        # Next, calculate formula (2) from the paper
        # this calculation differs from the matlab. In the matlab, the RBF
        # kernel's exponent only has sigma^2 in the denominator. Here,
        # 2 * sigma^2. This is accounted for when auto-calculating sigma above by
        #  dividing by sqrt(2)

        # Work in log space and then exponentiate, to avoid the floating point
        # issues. for the denominator, the following code avoids even more
        # precision issues, by relying on the fact that the log of the sum of
        # exponentials, equals some constant plus the log of sum of exponentials
        # of numbers subtracted by the constant:
        #  log(sum_i(exp(x_i))) = m + log(sum_i(exp(x_i-m)))

        c = 2 * np.power(sigma, 2)  # bandwidth parameter
        exponent = -val / c  # exponent of RBF kernel
        shift = np.amin(exponent, 1, keepdims=True)
        denom = np.log(np.sum(np.exp(exponent - shift), 1,
                              keepdims=True)) + shift
        val = np.exp(exponent - denom)

        Z = scipy.sparse.lil_matrix((n, m))
        for i in range(nnanchors):
            Z[np.arange(n), pos[:, i]] = val[:, i]
        Z = scipy.sparse.csr_matrix(Z)

        return (Z, sigma)
Ejemplo n.º 6
0
    def test(trainY, testY, traingnd, testgnd, radius=2):
        # make sure trangnd and testgnd are flattened
        testgnd = testgnd.ravel()
        traingnd = traingnd.ravel()
        ntest = testY.shape[0]

        hamdis = utils.pdist2(trainY, testY, 'hamming')

        precision = np.zeros(ntest)
        for j in xrange(ntest):
            ham = hamdis[:, j]
            lst = np.flatnonzero(ham <= radius)
            ln = len(lst)
            if ln == 0:
                precision[j] = 0
            else:
                numerator = len(np.flatnonzero(traingnd[lst] == testgnd[j]))
                precision[j] = numerator / float(ln)

        return np.mean(precision)
Ejemplo n.º 7
0
	def __performSaliency__(self,region_desc,useDistribution=False):
		start_time = time.time();
		(num_regions,regions,region_props,data) = region_desc
		frame_shape = regions.shape;
		
		_norm = np.sqrt(frame_shape[0]*frame_shape[0] + frame_shape[1]*frame_shape[1]);
		allp_exp_dist=np.exp(-squareform(pdist(region_props[0]))/(_norm*self.dist_weight));
		norm_dist=1/np.sum(allp_exp_dist,0)
		allp_col_dist =  squareform(pdist(data))
		allp_exp_dist = allp_exp_dist*norm_dist[:,None]
		uniqueness =normalize(np.sum(allp_col_dist*allp_exp_dist,0));
		
		if useDistribution:		
			allp_exp_col_dist = np.exp(allp_col_dist/(np.max(allp_col_dist)*self.color_weight));
			norm_col_dist=1/np.sum(allp_exp_col_dist,0)
			allp_exp_col_dist = allp_exp_col_dist*norm_col_dist[:,None];
			weighted_mean = np.dot(allp_exp_col_dist,region_props[0])
			allp_mean_var = pdist2(region_props[0],weighted_mean)
			distribution = normalize(np.sum(allp_mean_var*allp_exp_col_dist,0))
			saliency = normalize(uniqueness*np.exp(-1*distribution))
		else:
			saliency = uniqueness
		
		saliency  = sum([np.where(regions==region,saliency[region],0)
								for region in range(num_regions)],0)
		if self.props.doProfile:
			u_frame = sum([np.where(regions==region,255*uniqueness[region],0) 
							for region in range(num_regions)],0)
			cv2.imwrite(self.PROFILE_PATH+self.method+'_u.png',u_frame);
			if useDistribution:
				d_frame = sum([np.where(regions==region,255*distribution[region],0)
						for region in range(num_regions)],0)
				cv2.imwrite(self.PROFILE_PATH+self.method+'_d.png',d_frame);
			cv2.imwrite(self.PROFILE_PATH+self.method+'_p.png',np.uint8(saliency*255));
			print "Region contrast : ",time.time()-start_time
	
		return saliency;
Ejemplo n.º 8
0
    def track_object(self, frame, mask):
        frameWindow = self.__detect_object__(mask)
        shape = frame.shape[:2]
        hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        if self.activeWindowFeats is None:
            self.activeWindowFeats = [
                self.__computeFeats__(hsv_frame, window, mask, shape)
                for window in frameWindow
            ]
            self.windowMarkers = range(len(self.activeWindowFeats))
            self.windowCounters = [1] * len(self.activeWindowFeats)
            self.nextIdx = len(self.activeWindowFeats)

        mark_map = None
        num_activeWindows = len(self.activeWindowFeats)
        curWindowFeats = [
            self.__computeFeats__(hsv_frame, window, mask, shape)
            for window in frameWindow
        ]
        num_windows = len(curWindowFeats)
        marker = -1 * np.ones(num_windows, dtype=np.int32)
        if num_windows > 0 and num_activeWindows > 0:
            hist_dist = (pdist2(
                np.array(self.activeWindowFeats)[:, :self.n_bins],
                np.array(curWindowFeats)[:, :self.n_bins]) /
                         np.sqrt(self.n_bins))
            rect_dist = (pdist2(
                np.array(self.activeWindowFeats)[:,
                                                 self.n_bins:self.n_bins + 4],
                np.array(curWindowFeats)[:, self.n_bins:self.n_bins + 4]) /
                         np.sqrt(self.n_bins))
            centre_dist = (pdist2(
                np.array(self.activeWindowFeats)[:, self.n_bins + 4:],
                np.array(curWindowFeats)[:, self.n_bins + 4:]) /
                           np.sqrt(self.n_bins))
            dist = hist_dist * rect_dist * centre_dist
            (x, y) = np.meshgrid(range(num_activeWindows), range(num_windows))
            x = x.flatten()
            y = y.flatten()
            order = np.argsort(dist.flatten())
            mark_map = -1 * np.ones(num_activeWindows, dtype=np.int32)
            rev_mark_map = -1 * np.ones(num_windows, dtype=np.int32)
            for (_prev, _cur) in zip(x[order], y[order]):
                if mark_map[_prev] == -1 and rev_mark_map[_cur] == -1:
                    mark_map[_prev] = _cur
                    rev_mark_map[_cur] = _prev
        #Updating counters
        for _id in range(num_activeWindows):
            if not (mark_map is None) and (mark_map[_id] != -1):
                _map = mark_map[_id]
                self.activeWindowFeats[_id] = curWindowFeats[_map]
                marker[_map] = self.windowMarkers[_id]
                self.windowCounters[_id] += 1
                #if self.activeWindowVolume[_id] is None:
                #	self.activeWindowVolume[_id] =

            else:
                self.windowCounters[_id] -= 1
        #print frameIdx,	self.windowMarkers,self.windowCounters
        #Eliminating windows
        self.activeWindowFeats = [
            self.activeWindowFeats[idx] for idx in range(num_activeWindows)
            if self.windowCounters[idx] > -1
        ]
        self.windowMarkers = [
            self.windowMarkers[idx] for idx in range(num_activeWindows)
            if self.windowCounters[idx] > -1
        ]
        self.windowCounters = [
            self.windowCounters[idx] for idx in range(num_activeWindows)
            if self.windowCounters[idx] > -1
        ]
        # Adding windows
        for idx in range(num_windows):
            if marker[idx] == -1:
                self.activeWindowFeats.extend([curWindowFeats[idx]])
                self.windowMarkers.extend([self.nextIdx])
                marker[idx] = self.nextIdx
                self.windowCounters.extend([1])
                self.nextIdx += 1
        return (frameWindow, marker)