def checkoverlap(x1, y1, r1, x2, y2, r2): dxy = np.sqrt(np.power(y2 - y1, 2) + np.power(x2 - x1, 2)) dr = np.power(r2 + r1, 2) if dxy <= dr: return True else: return False
def calculatesubjectdistance(self, subject, threshold): # Future change (xt, yt), radius = self.subject.circle (xs, ys), radius = subject.circle loss = int(np.sqrt(np.power(xt - xs, 2) + np.power(yt - ys, 2))) if loss <= threshold: return True else: return False
def calculatesubjectdistance(self, subject, threshold): # Future change (xt, yt), radius = self.subject.circle (xs, ys), radius = subject.circle loss = int(np.sqrt(np.power(xt - xs, 2) + np.power(yt - ys, 2))) print 'Deassociate loss' print loss if loss <= threshold: return True else: return False
def lossfunction__old1(tr, sub): #@Borja(orginal)1 # Where to create the loss using data from the subject class # First simple loss function--------------------------------------- # Based in simple distance to subject base (xt, yt), radius = tr.pf.circle (xs, ys), radius = sub.circle distance = int(np.sqrt(np.power(xt - xs, 2) + np.power(yt - ys, 2))) # Second loss function--------------------------------------------- # Based in normal probability density function of particles for # position and detection size (x, y), (h, w), a = sub.rot_box p = tr.pf.p p_star = tr.pf.p_star p_mean = tr.pf.p_mean # optimizacion --> no calcular constantemente mean y std, sino hacerlo antes de entrar aqui # loss = -x -y -vx -vy loss = - np.log(normpdf(x, np.mean(p[:, 0]), np.std(p[:, 0]))) \ - np.log(normpdf(y, np.mean(p[:, 1]), np.std(p[:, 1]))) \ - np.log(normpdf(x - p_star[0], np.mean(p[:, 4]), np.std(p[:, 4]))) \ - np.log(normpdf(y - p_star[1], np.mean(p[:, 5]), np.std(p[:, 5]))) #- np.log(normpdf(sub.h, np.mean(p[:, 4]), np.std(p[:, 4]))) debug_flag=0 if debug_flag: """ print '###' print normpdf(p_star[1] - y, np.mean(p[:, 5]), np.std(p[:, 5])) print x print y """ """ print '----' print 'x, y, h: %s, %s, %s' % (x, y, sub.h) #print 'x: %s' % - np.log(normpdf(x, np.mean(p[:, 0]), np.std(p[:, 0]))) print 'vx: %s' % - np.log(normpdf(p_star[0] - x, np.mean(p[:, 4]), np.std(p[:, 4]))) print 'vy: %s' % - np.log(normpdf(p_star[1] - y, np.mean(p[:, 5]), np.std(p[:, 5]))) print '----' """ return loss, distance
def lossfunction__old1(tr, sub): #@Borja(orginal)1 # Where to create the loss using data from the subject class # First simple loss function--------------------------------------- # Based in simple distance to subject base (xt, yt), radius = tr.pf.circle (xs, ys), radius = sub.circle distance = int(np.sqrt(np.power(xt - xs, 2) + np.power(yt - ys, 2))) # Second loss function--------------------------------------------- # Based in normal probability density function of particles for # position and detection size (x, y), (h, w), a = sub.rot_box p = tr.pf.p p_star = tr.pf.p_star p_mean = tr.pf.p_mean # optimizacion --> no calcular constantemente mean y std, sino hacerlo antes de entrar aqui # loss = -x -y -vx -vy loss = - np.log(normpdf(x, np.mean(p[:, 0]), np.std(p[:, 0]))) \ - np.log(normpdf(y, np.mean(p[:, 1]), np.std(p[:, 1]))) \ - np.log(normpdf(x - p_star[0], np.mean(p[:, 4]), np.std(p[:, 4]))) \ - np.log(normpdf(y - p_star[1], np.mean(p[:, 5]), np.std(p[:, 5]))) #- np.log(normpdf(sub.h, np.mean(p[:, 4]), np.std(p[:, 4]))) debug_flag = 0 if debug_flag: """ print '###' print normpdf(p_star[1] - y, np.mean(p[:, 5]), np.std(p[:, 5])) print x print y """ """ print '----' print 'x, y, h: %s, %s, %s' % (x, y, sub.h) #print 'x: %s' % - np.log(normpdf(x, np.mean(p[:, 0]), np.std(p[:, 0]))) print 'vx: %s' % - np.log(normpdf(p_star[0] - x, np.mean(p[:, 4]), np.std(p[:, 4]))) print 'vy: %s' % - np.log(normpdf(p_star[1] - y, np.mean(p[:, 5]), np.std(p[:, 5]))) print '----' """ return loss, distance
def plikelihood(self): (x, y), (h, w), a = self.det det = np.floor(np.array([[x, y, h, w]])) yrep = np.ones((self.num_p, 4)) * det R2 = np.sum(np.power(self.p - yrep, 2), 1) width = 2 * (np.amax(np.sqrt(R2)) - np.amin(np.sqrt(R2))) prob = np.exp(-R2 / width) prob = prob / np.sum(prob) self.prob = prob self.sortprob()
def plikelihood(self): if self.update: (x, y), (h, w), a = self.rb det = np.floor(np.array([[x, y, h, w, x - self.p_star[0], y - self.p_star[1]]])) yrep = np.ones((self.num_p, 6)) * det R2 = np.sum(np.power(self.p[:, 0:6] - yrep, 2), 1) width = 2 * (np.amax(np.sqrt(R2)) - np.amin(np.sqrt(R2))) prob = np.exp(- R2 / width) a = np.sum(prob) if a != 0.0: prob = prob / np.sum(prob) self.prob = prob self.sortprob() self.calculatepstar()
def plikelihood(self): if self.update: (x, y), (h, w), a = self.rb det = np.floor( np.array([[x, y, h, w, x - self.p_star[0], y - self.p_star[1]]])) yrep = np.ones((self.num_p, 6)) * det R2 = np.sum(np.power(self.p[:, 0:6] - yrep, 2), 1) width = 2 * (np.amax(np.sqrt(R2)) - np.amin(np.sqrt(R2))) prob = np.exp(-R2 / width) a = np.sum(prob) if a != 0.0: prob = prob / np.sum(prob) self.prob = prob self.sortprob() self.calculatepstar()
def plikelihood_new(self): if self.update: (x, y), (h, w), a = self.rb detectionExtended = np.floor(np.array([[x, y, h, w, x - self.p_star[0], y - self.p_star[1]]])) yrep = np.ones((self.num_p, 6)) * detectionExtended prob = gaussMixModelPDF(oneParticle,detectionExtended,p_star) R2 = np.sum(np.power(self.p[:, 0:6] - yrep, 2), 1) width = 2 * (np.amax(np.sqrt(R2)) - np.amin(np.sqrt(R2))) prob = np.exp(- R2 / width) a = np.sum(prob) if a != 0.0: prob = prob / np.sum(prob) self.prob = prob self.sortprob() self.calculatepstar()
def plikelihood_new(self): if self.update: (x, y), (h, w), a = self.rb detectionExtended = np.floor( np.array([[x, y, h, w, x - self.p_star[0], y - self.p_star[1]]])) yrep = np.ones((self.num_p, 6)) * detectionExtended prob = gaussMixModelPDF(oneParticle, detectionExtended, p_star) R2 = np.sum(np.power(self.p[:, 0:6] - yrep, 2), 1) width = 2 * (np.amax(np.sqrt(R2)) - np.amin(np.sqrt(R2))) prob = np.exp(-R2 / width) a = np.sum(prob) if a != 0.0: prob = prob / np.sum(prob) self.prob = prob self.sortprob() self.calculatepstar()
def lossfunction(tr, sub): #@CIA june-25-2015 loss=0 distance=0 #----prepare variables (x, y), (h, b), a = sub.rot_box p = tr.pf.p p_star = tr.pf.p_star p_mean = tr.pf.p_mean #----obtain loss(tr,sub) #--distance(tr,sub) sqDistance = np.power(x - p_mean[0], 2) + np.power(y - p_mean[1], 2) distance = np.sqrt(sqDistance) #--mix of gaussians w_star = 0.20 w_mean = 0.80 #sigma_mean = 1 #sigma_star = 1 """ TODO ---- cov --> anchura de las particulas en vez de 1 """ """ COLOR_CUBES --- 1 vector de 8 cubos por canal de color BGR --- SUBJECT (Deteccion) b, g, r = sub.rgb_cubes TRACKER b, g, r = tr.rgb_cubes """ #.for X mu_starX = p_star[0] # x_star mu_meanX = p_mean[0] # x_mean sigma_starX = p_star[3]#/2 sigma_meanX = p_mean[3]#/2 lossX = w_star*norm.pdf(x,mu_starX,sigma_starX) + w_mean*norm.pdf(x,mu_meanX,sigma_meanX) #.for Y mu_starY = p_star[1] # x_star mu_meanY = p_mean[1] # x_mean sigma_starY = p_star[2]#/2 sigma_meanY = p_mean[2]#/2 lossY = w_star*norm.pdf(y,mu_starY,sigma_starY) + w_mean*norm.pdf(y,mu_meanY,sigma_meanY) #.neglog of joint(X,Y) assuming independence # For appareance lossApp = cv2.compareHist(tr.rgb_cubes.ravel().astype('float32'), sub.rgb_cubes.ravel().astype('float32'), cv2.cv.CV_COMP_BHATTACHARYYA) #print cv2.normalize(sub.rgb_cubes.ravel().astype('float32')) #print cv2.normalize(tr.rgb_cubes.ravel().astype('float32')) loss = - np.log(lossX) - np.log(lossY) + np.log(lossApp) if loss == float('Inf') or loss == -float('Inf'): loss = 100 return loss, distance
def normpdf(x, m, v): return (1 / (np.sqrt(2 * np.pi) * v)) * np.exp(-(1./2) * np.power((x - m) / v, 2))
def lossfunction(tr, sub): #@CIA june-25-2015 loss = 0 distance = 0 #----prepare variables (x, y), (h, b), a = sub.rot_box p = tr.pf.p p_star = tr.pf.p_star p_mean = tr.pf.p_mean #----obtain loss(tr,sub) #--distance(tr,sub) sqDistance = np.power(x - p_mean[0], 2) + np.power(y - p_mean[1], 2) distance = np.sqrt(sqDistance) #--mix of gaussians w_star = 0.20 w_mean = 0.80 #sigma_mean = 1 #sigma_star = 1 """ TODO ---- cov --> anchura de las particulas en vez de 1 """ """ COLOR_CUBES --- 1 vector de 8 cubos por canal de color BGR --- SUBJECT (Deteccion) b, g, r = sub.rgb_cubes TRACKER b, g, r = tr.rgb_cubes """ #.for X mu_starX = p_star[0] # x_star mu_meanX = p_mean[0] # x_mean sigma_starX = p_star[3] #/2 sigma_meanX = p_mean[3] #/2 lossX = w_star * norm.pdf(x, mu_starX, sigma_starX) + w_mean * norm.pdf( x, mu_meanX, sigma_meanX) #.for Y mu_starY = p_star[1] # x_star mu_meanY = p_mean[1] # x_mean sigma_starY = p_star[2] #/2 sigma_meanY = p_mean[2] #/2 lossY = w_star * norm.pdf(y, mu_starY, sigma_starY) + w_mean * norm.pdf( y, mu_meanY, sigma_meanY) #.neglog of joint(X,Y) assuming independence # For appareance lossApp = cv2.compareHist(tr.rgb_cubes.ravel().astype('float32'), sub.rgb_cubes.ravel().astype('float32'), cv2.cv.CV_COMP_BHATTACHARYYA) #print cv2.normalize(sub.rgb_cubes.ravel().astype('float32')) #print cv2.normalize(tr.rgb_cubes.ravel().astype('float32')) loss = -np.log(lossX) - np.log(lossY) + np.log(lossApp) if loss == float('Inf') or loss == -float('Inf'): loss = 100 return loss, distance
def normpdf(x, m, v): return (1 / (np.sqrt(2 * np.pi) * v)) * np.exp(-(1. / 2) * np.power( (x - m) / v, 2))