def MassCenter(data_sel, periods):
    data_bv = data_sel.copy()
    p = np.array(periods) - periods[0]+1
    #Get week's usages
    for i in periods:
        if i!=1:
            data_bv[i] = data_sel[i] - data_sel[i-1]
    
    mass_center = []
    mass_center2 = []
    mass_moment = []
    r_moment = []
            
    for i in range(0,data_bv.shape[0]):
        center = (data_bv[periods].irow(i).values*p).sum()/data_bv[periods].irow(i).values.sum()
        center2 = (data_bv[periods].irow(i).values*np.square(p)).sum()
        moment = (data_bv[periods].irow(i).values*np.square(p-center)).sum()
        r_m = moment/data_bv[periods].irow(i).values.sum()
        mass_center.append(center)
        mass_center2.append(center2)
        mass_moment.append(moment)
        r_moment.append(r_m)
    
    print data_bv.shape
    print data_sel.shape
        
    return np.array(mass_center), np.array(mass_moment), np.array(r_moment), np.array(mass_center2)
def dice_(seg, gt):
    intersection = 2. * np.sum(seg * gt)
    denominator = (np.sum(np.square(seg)) + np.sum(np.square(gt)))
    if denominator == 0:
        return 1.
    similarity = intersection / denominator
    return similarity
    def __regionQuery(self, i):
        """
        Searches for neighboring points for a given data set i in a distance <= epsilon

        ::param i: one data set / point, in whose epsilon region should be searched

        :return: a numpy array containing the indexes of all found neighboring points
        """

        # prepare a buffer object for the indexes of eventually found neighboring points
        regionIndexes = deque()

        # get a numpy iterator with C indexing-style
        npiter = np.nditer([self.data[0], self.data[1]], flags=["c_index"])

        # iterate through the numpy iterator
        for x, y in npiter:
            # if the currently processed point of the iterator is in a circle
            # with the radius of self.epsilon it is a neighboring point
            if np.sqrt(np.square(x - self.data[0][i]) + np.square(y - self.data[1][i])) < self.epsilon:
                regionIndexes.append(npiter.index)  # append the points index to the found neighbors

        points = np.array(regionIndexes)

        # this returns our array (tuple is not necessary)
        return points
Example #4
0
 def intercept(self, y, u):
     if self.aspherics is not None:
         return Interface.intercept(self, y, u) # expensive iterative
     # replace the newton-raphson with the analytic solution
     c, k = self.curvature, self.conic
     if c == 0:
         return -y[:, 2]/u[:, 2] # flat
     if not k:
         uy = (u*y).sum(1)
         uu = 1.
         yy = np.square(y).sum(1)
     else:
         k = np.array([(1, 1, 1 + k)])
         uy = (u*y*k).sum(1)
         uu = (np.square(u)*k).sum(1)
         yy = (np.square(y)*k).sum(1)
     d = c*uy - u[:, 2]
     e = c*uu
     f = c*yy - 2*y[:, 2]
     g = np.sqrt(np.square(d) - e*f)
     if self.alternate_intersection:
         g *= -1
     #g *= np.sign(u[:, 2])
     s = -(d + g)/e
     return s
    def compute_distances_no_loops(self, X):
        """
        Compute the distance between each test point in X and each training point
        in self.X_train using no explicit loops.

        Input / Output: Same as compute_distances_two_loops
        """
        num_test = X.shape[0]
        num_train = self.X_train.shape[0]
        dists = np.zeros((num_test, num_train))
        #########################################################################
        # TODO:                                                                 #
        # Compute the l2 distance between all test points and all training      #
        # points without using any explicit loops, and store the result in      #
        # dists.                                                                #
        #                                                                       #
        # You should implement this function using only basic array operations; #
        # in particular you should not use functions from scipy.                #
        #                                                                       #
        # HINT: Try to formulate the l2 distance using matrix multiplication    #
        #       and two broadcast sums.                                         #
        #########################################################################
        X_train_square = np.square(self.X_train).sum(axis=1)
        test_square = np.square(X).sum(axis=1)
        M = -2 * np.dot(X, self.X_train.T)
        dists = np.sqrt(M + X_train_square + np.matrix(test_square).T)
        #########################################################################
        #                         END OF YOUR CODE                              #
        #########################################################################
        return dists
def plot_robots_ratio_time_micmac(deploy_robots_mic, deploy_robots_mac, deploy_robots_desired, delta_t):
    plot_option = 0 # 0: ratio, 1: cost
    num_iter = deploy_robots_mic.shape[1]
    total_num_robots = np.sum(deploy_robots_mic[:,0,:])
    
    diffmic_sqs = np.zeros(num_iter)
    diffmac_sqs = np.zeros(num_iter)
    diffmic_rat = np.zeros(num_iter)
    diffmac_rat = np.zeros(num_iter)
    for t in range(num_iter):
        diffmic = np.abs(deploy_robots_mic[:,t,:] - deploy_robots_desired)    
        diffmac = np.abs(deploy_robots_mac[:,t,:] - deploy_robots_desired) 
        diffmic_rat[t] = np.sum(diffmic) / total_num_robots       
        diffmic_sqs[t] = np.sum(np.square(diffmic))
        diffmac_rat[t] = np.sum(diffmac) / total_num_robots 
        diffmac_sqs[t] = np.sum(np.square(diffmac))
        
    x = np.arange(0, num_iter) * delta_t
    if(plot_option==0):
        l1 = plt.plot(x,diffmic_rat)
        l2 = plt.plot(x,diffmac_rat)
    if(plot_option==1):
        l1 = plt.plot(x,diffmic_sqs)
        l2 = plt.plot(x,diffmac_sqs)
    
    plt.xlabel('time [s]')    
    plt.ylabel('ratio of misplaced robots')
    plt.legend((l1, l2),('Micro','Macro'))
    plt.show()
def predictions(weather_turnstile):


    features_df = pandas.DataFrame({'Hour': weather_turnstile['Hour'], 
                                    'rain': weather_turnstile['rain'],
                                    'meantempi': weather_turnstile['meantempi'],
                                    'meanwindspdi': weather_turnstile['meanwindspdi'],
                                    'precipi': weather_turnstile['precipi'],
                                    'HourSquared': np.square(weather_turnstile['Hour']),
                                    'meantempiSquared': np.square(weather_turnstile['meantempi']),
                                    'precipiSquared': np.square(weather_turnstile['precipi'])})
    label = weather_turnstile['ENTRIESn_hourly']

    # Adds y-intercept to model
    features_df = sm.add_constant(features_df)

    # add dummy variables of turnstile units to features
    dummy_units = pandas.get_dummies(weather_turnstile['UNIT'], prefix='unit')
    features_df = features_df.join(dummy_units)
    model = sm.OLS(label,features_df)

    results = model.fit()

    prediction = results.predict(features_df)
    return prediction
Example #8
0
 def render_indicators(self, W, H):
     gl.glBegin(gl.GL_QUADS)
     s = W/40.0
     h = H/40.0
     gl.glColor4f(0,0,0,1)
     gl.glVertex3f(W, 0, 0)
     gl.glVertex3f(W, 5*h, 0)
     gl.glVertex3f(0, 5*h, 0)
     gl.glVertex3f(0, 0, 0)
     def vertical_ind(place, val, color):
         gl.glColor4f(color[0], color[1], color[2], 1)
         gl.glVertex3f((place+0)*s, h + h*val, 0)
         gl.glVertex3f((place+1)*s, h + h*val, 0)
         gl.glVertex3f((place+1)*s, h, 0)
         gl.glVertex3f((place+0)*s, h, 0)
     def horiz_ind(place, val, color):
         gl.glColor4f(color[0], color[1], color[2], 1)
         gl.glVertex3f((place+0)*s, 4*h , 0)
         gl.glVertex3f((place+val)*s, 4*h, 0)
         gl.glVertex3f((place+val)*s, 2*h, 0)
         gl.glVertex3f((place+0)*s, 2*h, 0)
     true_speed = np.sqrt(np.square(self.car.hull.linearVelocity[0]) + np.square(self.car.hull.linearVelocity[1]))
     vertical_ind(5, 0.02*true_speed, (1,1,1))
     vertical_ind(7, 0.01*self.car.wheels[0].omega, (0.0,0,1)) # ABS sensors
     vertical_ind(8, 0.01*self.car.wheels[1].omega, (0.0,0,1))
     vertical_ind(9, 0.01*self.car.wheels[2].omega, (0.2,0,1))
     vertical_ind(10,0.01*self.car.wheels[3].omega, (0.2,0,1))
     horiz_ind(20, -10.0*self.car.wheels[0].joint.angle, (0,1,0))
     horiz_ind(30, -0.8*self.car.hull.angularVelocity, (1,0,0))
     gl.glEnd()
     self.score_label.text = "%04i" % self.reward
     self.score_label.draw()
Example #9
0
    def time_std(self):
        if hasattr(self, '_time_std'):
            return self._time_std
        if self.savedir is not None:
            try:
                with open(join(self.savedir, 'time_std.pkl'),
                          'rb') as f:
                    time_std = pickle.load(f)
            except IOError:
                pass
            else:
                # Same protocol as the averages. Make sure the
                # std is a single 4D (zyxc) array and if not just
                # re-calculate the time std.
                if isinstance(time_std, np.ndarray):
                    self._time_std = time_std
                    return self._time_std

        sums = np.zeros(self.frame_shape)
        sums_squares = np.zeros(self.frame_shape)
        counts = np.zeros(self.frame_shape)
        for frame in it.chain.from_iterable(self):
            sums += np.nan_to_num(frame)
            sums_squares += np.square(np.nan_to_num(frame))
            counts[np.isfinite(frame)] += 1
        means = old_div(sums, counts)
        mean_of_squares = old_div(sums_squares, counts)
        std = np.sqrt(mean_of_squares-np.square(means))
        if self.savedir is not None and not self._read_only:
            with open(join(self.savedir, 'time_std.pkl'), 'wb') as f:
                pickle.dump(std, f, pickle.HIGHEST_PROTOCOL)
        self._time_std = std
        return self._time_std
Example #10
0
def ratio_err(top,bottom,top_low,top_high,bottom_low,bottom_high):
    #uses simple propagation of errors (partial derivatives)
    #note it returns errorbars, not interval

    #-make sure input is numpy arrays-
    top = np.array(top)
    top_low = np.array(top_low)
    top_high = np.array(top_high)
    bottom = np.array(bottom)
    bottom_low = np.array(bottom_low)
    bottom_high = np.array(bottom_high)

    #-calculate errorbars-
    top_errlow = np.subtract(top,top_low)
    top_errhigh = np.subtract(top_high,top)
    bottom_errlow = np.subtract(bottom,bottom_low)
    bottom_errhigh = np.subtract(bottom_high,bottom)

    #-calculate ratio_low-
    ratio_low  = np.sqrt( np.square(np.divide(top_errlow,bottom)) + np.square( np.multiply(np.divide(top,np.square(bottom)),bottom_errlow)) )
    #-calculate ratio_high-
    ratio_high = np.sqrt( np.square(np.divide(top_errhigh,bottom)) + np.square( np.multiply(np.divide(top,np.square(bottom)),bottom_errhigh)) )
#    ratio_high = ((top_errhigh/bottom)**2.0 + (top/(bottom**2.0))*bottom_errhigh)**2.0)**0.5

    # return two vectors, err_low and err_high
    return ratio_low,ratio_high
    def test_meanSquaredDisplacement(self):
        from getMeanSquareDisplacement import getMeanSquareDisplacement
        numTradingDays = 4*getNumTradingDaysPerYear()
        growthRate     = 0.5
        logVolatility  = 2.0**-8
        numCols = 15000
        
        p = makeFakeDailyPrices(growthRate,logVolatility,numTradingDays,numCols)
        
        
        Ex21 = np.mean(getMeanSquareDisplacement(np.log(p),10),1)
        t = np.arange(10)
        volTerm = t*np.square(logVolatility)
        driftTerm = np.square(t*np.log(1.0+growthRate)/getNumTradingDaysPerYear())
        Ex20 = volTerm + driftTerm

        error = np.sqrt(np.mean(np.square((Ex21[1:] - Ex20[1:])/Ex20[1:])))

        self.assertLess(error,0.002)

        if 0:    
            import matplotlib.pyplot as plt
            print 'MSE =',np.round(100*error,3),'%'
            plt.plot(t,Ex21,'ro ')
            plt.plot(t,Ex20,'g:')
            plt.show()
Example #12
0
    def update(self):
        rho = self.opt_config.rho
        epsilon = self.opt_config.epsilon
        lr = self.opt_config.lr
        clip = self.opt_config.clip

        all_norm = 0.0
        for param_name in self.apollo_net.active_param_names():
            param = self.apollo_net.params[param_name]
            grad = param.diff
            all_norm += np.sum(np.square(grad))
        all_norm = np.sqrt(all_norm)

        for param_name in self.apollo_net.active_param_names():
            param = self.apollo_net.params[param_name]
            grad = param.diff

            if all_norm > clip:
                grad = clip * grad / all_norm

            if param_name in self.sq_grads:
                self.sq_grads[param_name] = (1 - rho) * np.square(grad) + rho * self.sq_grads[param_name]
                rms_update = np.sqrt(self.sq_updates[param_name] + epsilon)
                rms_grad = np.sqrt(self.sq_grads[param_name] + epsilon)
                update = -rms_update / rms_grad * grad

                self.sq_updates[param_name] = (1 - rho) * np.square(update) + rho * self.sq_updates[param_name]
            else:
                self.sq_grads[param_name] = (1 - rho) * np.square(grad)
                update = np.sqrt(epsilon) / np.sqrt(epsilon + self.sq_grads[param_name]) * grad
                self.sq_updates[param_name] = (1 - rho) * np.square(update)

            param.data[...] += lr * update
            param.diff[...] = 0
Example #13
0
def outlierCleaner(predictions, ages, net_worths):
    """
        clean away the 10% of points that have the largest
        residual errors (difference between the prediction
        and the actual net worth)

        return a list of tuples named cleaned_data where 
        each tuple is of the form (age, net_worth, error)
    """
    
    cleaned_data = []
    
    # calculate threshold of 90%    
    residual_errors = net_worths - predictions 
    residual_errors_square = np.square(residual_errors)
    residual_errors_square.sort(axis = 0)
#    print residual_errors_square
    
    percentile_90_index = int(len(residual_errors_square) * .9)
    percentile_90_threshold = residual_errors_square[percentile_90_index - 1][0]
#    print "threshold", percentile_90_threshold
    
    cleaned_data_all = zip(ages[:, 0].tolist(), net_worths[:, 0].tolist(), residual_errors[:, 0].tolist())
    
#    count = 0
    
    for e in cleaned_data_all: 
        (age, net_worth, error) = e
        if np.square(error) <= percentile_90_threshold: 
#            print error, percentile_90_threshold
            cleaned_data.append(e)
#            count += 1
#    print count
    return cleaned_data
Example #14
0
def genEmpCov_kernel(sigma, width, sample_set, knownMean = True):
    timesteps = sample_set.__len__()
#    print timesteps
    mean_tile = 0
    K_sum = 0
    if knownMean != True:
        for j in range(int(max(0,timesteps-width)),timesteps):            
            K =  np.exp(-np.square(timesteps-j-1)/sigma)
            samplesPerStep = sample_set[j].shape[1]
            mean_tile = mean_tile + K* sample_set[j]
            K_sum = K_sum + K
            
        mean_tile =  np.sum(mean_tile, axis = 1)/samplesPerStep
        mean_tile = np.tile(mean_tile, (samplesPerStep,1)).T
    K_sum = 0
    S = 0
#    print 'timesteps and width is %d, %d, %d'%(timesteps,width, max(0,timesteps- width))
   
    for j in range(int(max(0,timesteps-width)),timesteps):
        K = np.exp(-np.square(timesteps-j-1)/sigma)
#        print 'j = ',j, 'K = ', K
        samplesPerStep = sample_set[j].shape[1]
        S = S + K*np.dot(sample_set[j]- mean_tile, (sample_set[j] -  mean_tile).T)/samplesPerStep
        K_sum = K_sum + K
    S = S/K_sum
    return S
Example #15
0
    def setup(self):
        self.add_parameter(FloatParameter('audio-brightness', 1.0))
        self.add_parameter(FloatParameter('audio-stripe-width', 100.0))
        self.add_parameter(FloatParameter('audio-speed', 0.0))
        self.add_parameter(FloatParameter('speed', 0.01))
        self.add_parameter(FloatParameter('angle-speed', 0.1))
        self.add_parameter(FloatParameter('stripe-width', 20))
        self.add_parameter(FloatParameter('center-orbit-distance', 200))
        self.add_parameter(FloatParameter('center-orbit-speed', 0.1))
        self.add_parameter(FloatParameter('hue-step', 0.1))
        self.add_parameter(IntParameter('posterization', 8))
        self.add_parameter(StringParameter('color-gradient', "[(0,0,1), (0,0,1), (0,1,1), (0,1,1), (0,0,1)]"))
        self.add_parameter(FloatParameter('stripe-x-center', 0.5))
        self.add_parameter(FloatParameter('stripe-y-center', 0.5))
        self.hue_inner = random.random() + 100
        self._center_rotation = random.random()
        self.stripe_angle = random.random()

        cx, cy = self.scene().center_point()
        self.locations = self.scene().get_all_pixel_locations()
        x,y = self.locations.T
        x -= cx
        y -= cy
        self.pixel_distances = np.sqrt(np.square(x) + np.square(y))
        self.pixel_angles = (math.pi + np.arctan2(y, x)) / (2 * math.pi)
        self.pixel_distances /= max(self.pixel_distances)

        super(StripeGradient, self).setup()
Example #16
0
 def AGD_optimization(self, seed=None):
     if seed is None:
         self.U = np.sqrt(1/float(self.num_factors))*np.random.normal(size=(self.num_drugs, self.num_factors))
         self.V = np.sqrt(1/float(self.num_factors))*np.random.normal(size=(self.num_targets, self.num_factors))
     else:
         prng = np.random.RandomState(seed)
         self.U = np.sqrt(1/float(self.num_factors))*prng.normal(size=(self.num_drugs, self.num_factors))
         self.V = np.sqrt(1/float(self.num_factors))*prng.normal(size=(self.num_targets, self.num_factors))
     dg_sum = np.zeros((self.num_drugs, self.U.shape[1]))
     tg_sum = np.zeros((self.num_targets, self.V.shape[1]))
     last_log = self.log_likelihood()
     for t in range(self.max_iter):
         dg = self.deriv(True)
         dg_sum += np.square(dg)
         vec_step_size = self.theta / np.sqrt(dg_sum)
         self.U += vec_step_size * dg
         tg = self.deriv(False)
         tg_sum += np.square(tg)
         vec_step_size = self.theta / np.sqrt(tg_sum)
         self.V += vec_step_size * tg
         curr_log = self.log_likelihood()
         delta_log = (curr_log-last_log)/abs(last_log)
         if abs(delta_log) < 1e-5:
             break
         last_log = curr_log
Example #17
0
	def test_quarticspike(self):
		rr = np.square(self.X) + np.square(self.Y)
		r = np.sqrt(rr)
		res = blowup.quartic_spike(r)
		npt.assert_allclose(res[0,0],0.)
		npt.assert_allclose(res[0,self.N//2], 0.)
		npt.assert_allclose(res[self.N//2, self.N//2],1.)
Example #18
0
    def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
        x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
        y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
        
        a = inCoeffs[0]
        b = inCoeffs[1]
        c = inCoeffs[2]
        d = inCoeffs[3]
        f = inCoeffs[4]
        g = inCoeffs[5]
        h = inCoeffs[6]
        i = inCoeffs[7]
        j = inCoeffs[8]
        k = inCoeffs[9]
        m = inCoeffs[10]

        try:
            temp = a
            temp += b * numpy.exp(i * x_in + j)
            temp += c * numpy.exp(k * y_in + m)
            temp += d * numpy.square(numpy.exp(i * x_in + j))
            temp += f * numpy.square(numpy.exp(k * y_in + m))
            temp += g * numpy.power(numpy.exp(i * x_in + j), 3.0)
            temp += h * numpy.power(numpy.exp(k * y_in + m), 3.0)
            return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
        except:
            return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def glrm_set_loss_by_col():
    print("Importing USArrests.csv data...")
    arrestsH2O = h2o.upload_file(pyunit_utils.locate("smalldata/pca_test/USArrests.csv"))
    arrestsPy = np.array(h2o.as_list(arrestsH2O))
    arrestsH2O.describe()
    
    print("H2O GLRM with loss by column = Absolute, Quadratic, Quadratic, Huber")
    glrm_h2o = h2o.glrm(x=arrestsH2O, k=3, loss="Quadratic", loss_by_col=["Absolute","Huber"], loss_by_col_idx=[0,3], regularization_x="None", regularization_y="None")
    glrm_h2o.show()
    
    fit_y = glrm_h2o._model_json['output']['archetypes'].cell_values
    fit_y_np = [[float(s) for s in list(row)[1:]] for row in fit_y]
    fit_y_np = np.array(fit_y_np)
    fit_x = h2o.get_frame(glrm_h2o._model_json['output']['representation_name'])
    fit_x_np = np.array(h2o.as_list(fit_x))
    
    print("Check final objective function value")
    fit_xy = np.dot(fit_x_np, fit_y_np)
    fit_diff = arrestsPy.__sub__(fit_xy)
    obj_val = np.absolute(fit_diff[:,0]) + np.square(fit_diff[:,1]) + np.square(fit_diff[:,2])
    def huber(a):
        return a*a/2 if abs(a) <= 1 else abs(a)-0.5
    huber = np.vectorize(huber)
    obj_val = obj_val + huber(fit_diff[:,3])
    obj_val = np.sum(obj_val)
    glrm_obj = glrm_h2o._model_json['output']['objective']
    assert abs(glrm_obj - obj_val) < 1e-6, "Final objective was " + str(glrm_obj) + " but should equal " + str(obj_val)
Example #20
0
def FindOptimalScaleAndTranslationBetweenPointsAndReference(points,pointsRef):
    '''Find the (non-rotational) transformation that best overlaps points and pointsRef
       aka, minimize the distance between:
       (xref[i],yref[i],...)
       and
       (a*x[i]+x0,a*y[i]+y0,...)
       using linear least squares
       
       return the transformation parameters: a,(x0,y0,...)'''
    # Force to array of floats:
    points = np.asarray(points,dtype=np.float)
    pointsRef = np.asarray(pointsRef,dtype=np.float)

    # Compute some means:
    pm     = points.mean(axis=0)
    pm2    = np.square(pm)
    prefm  = pointsRef.mean(axis=0)
    p2m    = np.square(points).mean(axis=0)
    pTpref = (points * pointsRef).mean(axis=0)
    
    a = ((   (pm*prefm).sum() - pTpref.sum()   ) /
         #   -------------------------------     # fake fraction bar...
         (        pm2.sum() - p2m.sum()        ))
    p0 = prefm - a*pm
    return a,p0
def welch_ttest (X, y):

    classes = np.unique(y)
    n_class = len(classes)
    n_feats = X.shape[1]

    b = np.zeros(n_feats)
    for i in np.arange(n_class):
        for j in np.arange(i+1, n_class):
            if j > i:
                xi = X[y == i, :]
                xj = X[y == j, :]
                yi = y[y == i]
                yj = y[y == j]

                mi = np.mean (xi, axis=0)
                mj = np.mean (xj, axis=0)

                vi = np.var  (xi, axis=0)
                vj = np.var  (xj, axis=0)

                n_subjsi = len(yi)
                n_subjsj = len(yj)

                t = (mi - mj) / np.sqrt((np.square(vi) / n_subjsi) + (np.square(vj) / n_subjsj))
                t[np.isnan(t)] = 0
                t[np.isinf(t)] = 0

                b = np.maximum(b, t)

    return b
Example #22
0
    def __iter__(self):
        
        MAX_X,MAX_Y = self.dimensions
        MIN_V, MAX_V = self.velocity
        
        wt_min = 0.
        
        if self.init_stationary:

            x, y, x_waypoint, y_waypoint, velocity, wt = \
                init_random_waypoint(self.nr_nodes, MAX_X, MAX_Y, MIN_V, MAX_V, wt_min, 
                             (self.wt_max if self.wt_max is not None else 0.))

        else:

            NODES = np.arange(self.nr_nodes)
            print NODES
            x = U(0, MAX_X, NODES)
            y = U(0, MAX_Y, NODES)
            x_waypoint = U(0, MAX_X, NODES)
            y_waypoint = U(0, MAX_Y, NODES)
            wt = np.zeros(self.nr_nodes)
            velocity = U(MIN_V, MAX_V, NODES)

        theta = np.arctan2(y_waypoint - y, x_waypoint - x)
        costheta = np.cos(theta)
        sintheta = np.sin(theta)
        
        while True:
            # update node position
            x += velocity * costheta
            y += velocity * sintheta
            # calculate distance to waypoint
            d = np.sqrt(np.square(y_waypoint-y) + np.square(x_waypoint-x))
            # update info for arrived nodes
            arrived = np.where(np.logical_and(d<=velocity, wt<=0.))[0]
            
            # step back for nodes that surpassed waypoint
            x[arrived] = x_waypoint[arrived]
            y[arrived] = y_waypoint[arrived]
            
            if self.wt_max:
                velocity[arrived] = 0.
                wt[arrived] = U(0, self.wt_max, arrived)
                # update info for paused nodes
                wt[np.where(velocity==0.)[0]] -= 1.
                # update info for moving nodes
                arrived = np.where(np.logical_and(velocity==0., wt<0.))[0]
            
            if arrived.size > 0:
                x_waypoint[arrived] = U(0, MAX_X, arrived)
                y_waypoint[arrived] = U(0, MAX_Y, arrived)
                velocity[arrived] = U(MIN_V, MAX_V, arrived)
                theta[arrived] = np.arctan2(y_waypoint[arrived] - y[arrived], x_waypoint[arrived] - x[arrived])
                costheta[arrived] = np.cos(theta[arrived])
                sintheta[arrived] = np.sin(theta[arrived])
            
            self.velocity = velocity
            self.wt = wt
            yield np.dstack((x,y))[0]
Example #23
0
  def test_infer(self):
    kmeans = self.kmeans
    kmeans.fit(input_fn=self.input_fn(), relative_tolerance=1e-4)
    clusters = kmeans.clusters()

    # Make a small test set
    num_points = 10
    points, true_assignments, true_offsets = make_random_points(clusters,
                                                                num_points)
    # Test predict
    assignments = kmeans.predict(input_fn=self.input_fn(
        batch_size=num_points, points=points))
    self.assertAllEqual(assignments, true_assignments)

    # Test score
    score = kmeans.score(
        input_fn=lambda: (constant_op.constant(points), None), steps=1)
    self.assertNear(score, np.sum(true_offsets), 0.01 * score)

    # Test transform
    transform = kmeans.transform(
        input_fn=lambda: (constant_op.constant(points), None))
    true_transform = np.maximum(
        0,
        np.sum(np.square(points), axis=1, keepdims=True) - 2 * np.dot(
            points, np.transpose(clusters)) +
        np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
    self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
Example #24
0
    def fitness(self, recordings):
        """
        Calculates the sum squared difference between each spike in the
        signal and the closest spike in the reference spike train, plus the
        vice-versa case

        `analysis` -- The analysis object containing all recordings and
                      analysis of them [analysis.AnalysedRecordings]
        """
        spikes = recordings.get_analysed_signal().spikes()
        inner = spikes[numpy.where(
            (spikes >= (self.time_start + self.time_buffer)) &
            (spikes <= (self.time_stop - self.time_buffer)))]
        # If no spikes were generated create a dummy spike that is guaranteed
        # to be further away from a reference spike than any within the time
        # window
        if len(spikes) == 0:
            spike_t = self.time_stop + self.time_start
            spikes = neo.SpikeTrain([spike_t], spike_t, units=spike_t.units)
        fitness = 0.0
        for spike in inner:
            fitness += float(numpy.square(self.ref_spikes - spike).min())
        for ref_spike in self.ref_inner:
            fitness += float(numpy.square(spikes - ref_spike).min())
        return fitness
Example #25
0
 def K(self, X, X2=None,alpha=None,variance=None):
     """
     Computes the covariance matrix cov(X[i,:],X2[j,:]).
     
     Args:
         X: Matrix where each row is a point.
         X2: Matrix where each row is a point.
         alpha: It's the scaled alpha.
         Variance: Sigma hyperparameter.
         
     """
     if alpha is None:
         alpha=self.alpha
     if variance is None:
         variance=self.variance
     
     if X2 is None:
         X=X*alpha/self.scaleAlpha
         Xsq=np.sum(np.square(X), 1)
         r=-2.*np.dot(X, X.T) + (Xsq[:, None] + Xsq[None, :])
         r = np.clip(r, 0, np.inf)
         return variance*np.exp(-0.5*r)
     else:
         X=X*alpha/self.scaleAlpha
         X2=X2*alpha/self.scaleAlpha
         r=-2.*np.dot(X, X2.T) + (np.sum(np.square(X), 1)[:, None] + np.sum(np.square(X2), 1)[None, :])
         r = np.clip(r, 0, np.inf)
         return variance*np.exp(-0.5*r)
Example #26
0
   def analyse(self, a):
      global motion_detected, motion_timestamp, motion_array, motion_array_mask
      # calcuate length of motion vectors of mpeg macro blocks
      a = np.sqrt(
          np.square(a['x'].astype(np.float)) +
          np.square(a['y'].astype(np.float))
          ).clip(0, 255).astype(np.uint8)
      a = a * motion_array_mask
      # If there're more than 'sensitivity' vectors with a magnitude greater
      # than 'threshold', then say we've detected motion
      th = ((a > motion_threshold).sum() > motion_sensitivity)
      now = time.time()
      # motion logic, trigger on motion and stop after 2 seconds of inactivity
      if th:
         motion_timestamp = now

      if motion_detected:
          if (now - motion_timestamp) >= video_postseconds:
               motion_detected = False
      else:
        if th:
             motion_detected = True
        if debug:
                idx = a > motion_threshold
                a[idx] = 255
                motion_array = a
Example #27
0
 def hit(self, ray):
     # assume sphere at origin, so translate ray:
     raypoint = ray.point - self.point
     p0 = raypoint[0]
     p1 = raypoint[1]
     p2 = raypoint[2]
     v0 = ray.vector[0]
     v1 = ray.vector[1]
     v2 = ray.vector[2]
     a = ((N.square(v0))/(N.square(self.A))) + ((N.square(v1))/(N.square(self.B))) + ((N.square(v2))/(N.square(self.C)))
     b = ((2*p0*v0)/(N.square(self.A))) + ((2*p1*v1)/(N.square(self.B))) + ((2*p2*v2)/(N.square(self.C)))
     c = ((N.square(p0))/(N.square(self.A))) + ((N.square(p1))/(N.square(self.B))) + ((N.square(p2))/(N.square(self.C))) - 1
     disc = b*b - 4*a*c
     if disc > 0.0:
         t = (-b-N.sqrt(disc))/(2*a)
         if t > EPSILON:
             p = ray.pointAt(t)
             n = normalize(self.normalAt(p))
             return (t, p, n, self)
         t = (-b+N.sqrt(disc))/(2*a)
         if t > EPSILON:
             p = ray.pointAt(t)
             n = normalize(self.normalAt(p))
             return (t, p, n, self)
     return (None, None, None, None)
Example #28
0
def sphDist(ra1, dec1, ra2, dec2):
    """Calculate distance on the surface of a unit sphere.

    Input and Output are in radians.

    Notes
    -----
    Uses the Haversine formula to preserve accuracy at small angles.

    Law of cosines approach doesn't work well for the typically very small
    differences that we're looking at here.
    """
    # Haversine
    dra = ra1-ra2
    ddec = dec1-dec2
    a = np.square(np.sin(ddec/2)) + \
        np.cos(dec1)*np.cos(dec2)*np.square(np.sin(dra/2))
    dist = 2 * np.arcsin(np.sqrt(a))

    # This is what the law of cosines would look like
#    dist = np.arccos(np.sin(dec1)*np.sin(dec2) + np.cos(dec1)*np.cos(dec2)*np.cos(ra1 - ra2))

    # Could use afwCoord.angularSeparation()
    #  but (a) that hasn't been made accessible through the Python interface
    #  and (b) I'm not sure that it would be faster than the numpy interface.
    #    dist = afwCoord.angularSeparation(ra1-ra2, dec1-dec2, np.cos(dec1), np.cos(dec2))

    return dist
def energy(x, y, z):
    ex = np.sqrt(np.sum(np.square(np.subtract(x,mean(x)))))
    ey = np.sqrt(np.sum(np.square(np.subtract(y,mean(y)))))
    ez = np.sqrt(np.sum(np.square(np.subtract(z,mean(z)))))
    
    e = (1/(3 * len(x))) * (ex + ey + ez)
    return e
Example #30
0
def pick_triplets_impl(q_in, q_out):
  more = True
  while more:
      deq = q_in.get()
      if deq is None:
        more = False
      else:
        embeddings, emb_start_idx, nrof_images, alpha = deq
        print('running', emb_start_idx, nrof_images, os.getpid())
        for j in xrange(1,nrof_images):
            a_idx = emb_start_idx + j - 1
            neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)
            for pair in xrange(j, nrof_images): # For every possible positive pair.
                p_idx = emb_start_idx + pair
                pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))
                neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN
                all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<alpha, pos_dist_sqr<neg_dists_sqr))[0]  # FaceNet selection
                #all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction
                nrof_random_negs = all_neg.shape[0]
                if nrof_random_negs>0:
                    rnd_idx = np.random.randint(nrof_random_negs)
                    n_idx = all_neg[rnd_idx]
                    #triplets.append( (a_idx, p_idx, n_idx) )
                    q_out.put( (a_idx, p_idx, n_idx) )
        #emb_start_idx += nrof_images
  print('exit',os.getpid())
Example #31
0
def _squared_error(x, y):
    return np.square(x - y)
Example #32
0
def balldrop_analysis():

    # Load truth
    fdir = 'C:\Users\Steve\Documents\\research\lp_norm\\test\\balldrop'
    fname = 'balldrop_inputs_and_truth.pkl'
    inputs_file = os.path.join(fdir, fname)
    pklFile = open(inputs_file, 'rb')
    data = pickle.load(pklFile)
    truth_dict = data[2]
    pklFile.close()

    # Load filter results
    pnorm = 1.5
    n_outliers = 10.
    fname = 'ubatch_balldrop_output_' + str(pnorm) + 'norm_' + \
        str(int(n_outliers)).zfill(2) + 'p.pkl'
    out_file = os.path.join(fdir, fname)
    pklFile = open(out_file, 'rb')
    data = pickle.load(pklFile)
    filter_output = data[0]
    pklFile.close()

    # Compute errors
    ti_list = sorted(filter_output.keys())
    pos_err = np.zeros(len(ti_list))
    vel_err = np.zeros(len(ti_list))
    pos_sig = np.zeros(len(ti_list))
    vel_sig = np.zeros(len(ti_list))
    resid_array = np.zeros(len(ti_list))
    ii = 0
    for ti in ti_list:
        X_est = filter_output[ti]['X']
        P_est = filter_output[ti]['P']
        resids = filter_output[ti]['resids']

        X_true = truth_dict[ti]

        pos_err[ii] = float(X_est[0] - X_true[0])
        vel_err[ii] = float(X_est[1] - X_true[1])
        pos_sig[ii] = np.sqrt(P_est[0, 0])
        vel_sig[ii] = np.sqrt(P_est[1, 1])
        resid_array[ii] = float(resids[0])

        ii += 1

    # Compute RMS errors
    RMS_pos = np.sqrt(np.mean(np.square(pos_err)))
    RMS_vel = np.sqrt(np.mean(np.square(vel_err)))
    RMS_resid1 = np.sqrt(np.mean(np.square(resid_array)))

    print 'RMS Results'
    print 'RMS pos', RMS_pos
    print 'RMS vel', RMS_vel
    print 'RMS resid 1 (pos)', RMS_resid1

    # Plot Position and Velocity Errors
    plt.figure()
    plt.subplot(3, 1, 1)
    plt.plot(ti_list, pos_err, 'k.')
    plt.plot(ti_list, 3. * pos_sig, 'k--')
    plt.plot(ti_list, -3. * pos_sig, 'k--')
    plt.ylabel('Position Error [m]')

    plt.subplot(3, 1, 2)
    plt.plot(ti_list, vel_err, 'k.')
    plt.plot(ti_list, 3. * vel_sig, 'k--')
    plt.plot(ti_list, -3. * vel_sig, 'k--')
    plt.ylabel('Velocity Error [m/s]')

    plt.subplot(3, 1, 3)
    plt.plot(ti_list, resid_array, 'k.')
    plt.ylabel('Post-Fit Resids [m]')
    plt.xlabel('Time [sec]')

    plt.show()

    return
 def is_collision_obstacle(self, agent, obstacle):
     delta_pos = agent.state.p_pos - obstacle.state.p_pos
     dist = np.sqrt(np.sum(np.square(delta_pos)))
     dist_min = agent.size + obstacle.size
     return True if dist < dist_min + 0.02 else False
 def is_collision_agent(self, agent1, agent2):
     delta_pos = agent1.state.p_pos - agent2.state.p_pos
     dist = np.sqrt(np.sum(np.square(delta_pos)))
     dist_min = agent1.size + agent2.size
     return True if dist < dist_min + 0.01 else False
Example #35
0
    def visualize(self):
        mag = np.square(np.abs(self.psi))
        mag = np.power(mag, 0.5)

        plt.imshow(mag.T)
        plt.savefig(f'{self.__class__.__name__}.png')
Example #36
0
    weights_after = model.state_dict()
    outerstepsize = outerstepsize0 * (1 - iteration / niterations) # linear schedule
#     outerstepsize=outerstepsize0
    model.load_state_dict({name : 
        weights_before[name] + (weights_after[name] - weights_before[name]) * outerstepsize  # decreased update with iter increase
        for name in weights_before})

# Periodically plot the results on a particular task and minibatch
# if plot and iteration==0 or (iteration+1) % 1000 == 0:
plt.cla()
f = f_plot
weights_before = deepcopy(model.state_dict()) # save snapshot before evaluation
plt.plot(x_all, predict(x_all), label="pred after 0", color=(0,0,1))
for inneriter in range(32):
    train_on_batch(xtrain_plot, f(xtrain_plot))
    if (inneriter+1) % 8 == 0:
        frac = (inneriter+1) / 32
        plt.plot(x_all, predict(x_all), label="pred after %i"%(inneriter+1), color=(frac, 0, 1-frac))
plt.plot(x_all, f(x_all), label="true", color=(0,1,0))
lossval = np.square(predict(x_all) - f(x_all)).mean()
plt.plot(xtrain_plot, f(xtrain_plot), "x", label="train", color="k")
plt.ylim(-4,4)
plt.legend(loc="lower right")
# plt.pause(0.01)
plt.savefig('{}_2.png'.format(niterations))
plt.show()
model.load_state_dict(weights_before) # restore from snapshot
print('-----------------------------')
print('iteration {}'.format (iteration+1))
print('loss on plotted curve  {}'.format(lossval)) # would be better to average loss over a set of examples, but this is optimized for brevity
Example #37
0
def _mse(p, y): 
    return np.mean(np.square(p-y)) 
loss_history = []
train_acc_history = []
val_acc_history = []
seed = 0
rng = np.random.default_rng(seed=seed)
for t in range(iterations):
    time.sleep(t1 / 1000)
    count += 1

    indices = np.arange(Ntr)
    rng.shuffle(indices)
    x = x_train[indices]
    y = y_train[indices]
    h = 1.0 / (1.0 + np.exp(-(x.dot(w1) + b1)))
    y_pred = h.dot(w2) + b2
    loss = 1. / batch_size * np.square(y_pred - y).sum() + reg * (np.sum(w2 * w2) + np.sum(w1 * w1))
    loss_history.append(loss)
    if t % 10 == 0:
        print('iteration %d / %d: loss %f' % (t, iterations, loss))
        print('Learning rate -', 60 * count / (time.time() - start), 'epochs per minute')
    dy_pred = 1. / batch_size * 2.0 * (y_pred - y)
    dw2 = h.T.dot(dy_pred) + reg * w2
    db2 = dy_pred.sum(axis=0)
    dh = dy_pred.dot(w2.T)
    dw1 = x.T.dot(dh * h * (1 - h)) + reg * w1
    db1 = (dh * h * (1 - h)).sum(axis=0)
    w1 -= lr * dw1
    w2 -= lr * dw2
    b1 -= lr * db1
    b2 -= lr * db2
    lr *= lr_decay
Example #39
0
def MSE_loss(y, y_hat):
    """ 这里写你的代码 """
    return np.mean(np.square(y - y_hat))
Example #40
0
 def cal_euc_dis(self, A, B):
     return np.sqrt(np.sum(np.square(A - B)))
def test_quantization_error():
    image = np.random.uniform(0, 255, (500, 500))
    quantized = quantize(image)

    variance = np.sqrt(np.mean(np.square(image - quantized)))
    assert variance == pytest.approx(1 / (np.sqrt(12)), rel=0.01)
Example #42
0
def getRMS(vals):
    if np.size(vals) != 0:
        return np.sqrt(np.mean(np.square(vals)))
    else:
        return 0
Example #43
0
                            n1_node_attrs = np.column_stack(
                                [node_attrs, tmp_goal])
                            n_s = s
                            n_s1 = s1
                            n_s[0][:, 20] = n_g
                            n_s1[0][:, 20] = n_g
                        else:
                            n_s = s
                            n_s1 = s1
                            n_s[0][:, -1] = n_g
                            n_s1[0][:, -1] = n_g
                        # print('afterHER##########################################################################non_const_features', n_s)
                        final = (num_infl >= n_g)
                        if shaped_reward:
                            n_r = num_infl / g_n if final else -np.sum(
                                np.square(np.array(s1) == np.array(g_n)))
                        else:
                            n_r = num_infl / g_n if final else -1
                        # td = acmodel.td_compute(s, actual_action_embed, r1, s1,
                        #                         s_embs[get_action(s1, s_embs, env.possible_actions)[0]])
                        # td = acmodel.td_compute(n_s, actual_action_embed, n_r, n_s1,
                        #                         s_embs[get_action(n_s1, s_embs, env.possible_actions)[0]])
                        replay.add(n_s,
                                   actual_action_embed,
                                   n_r,
                                   n_s1,
                                   s_embs,
                                   actual_action,
                                   td=np.abs(td))

                # if (ep == 0 and stps < 2) or replay.size > batch_size:
import matplotlib.pyplot as plt
import scipy.signal as signal
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import warnings
warnings.filterwarnings('ignore')
get_ipython().magic(u'matplotlib inline')


# load acceleration datasets and initialize variables
trainingSetAcc = np.array(["ACC_UNA.csv", "ACC_JES.csv", "ACC_DDP.csv", "ACC_ENN.csv", "ACC_TJR.csv"])
trainingSetSignatures = []
acc, magACC, tACC = [], [], []

for i in range(len(trainingSetAcc)):
    acc.append(pd.read_csv(trainingSetAcc[i], header=0, sep=',').values)
    magACC.append(np.sqrt(np.square(acc[i][1:,:]).sum(axis=1)))
    tACC.append(np.linspace(1,60, len(acc[i][1::, 0].astype(float))))


# extract three samples from Participant 1
UA_example_0 = magACC[0][400:650]
UA_example_1 = magACC[0][850:1100]
UA_example_2 = magACC[0][1150:1450]

# compile samples, normalizing their length to the length of the shortest sample
UA_examples = np.array([UA_example_0[0:250], UA_example_1[0:250], UA_example_2[0:250]])
t_acc = range(0, 250)

# define mean gait
UA_signature = np.mean(UA_examples, axis=0)
Example #45
0
def rms(Z):
    rms = __np__.sqrt(__np__.mean(__np__.square(Z)))
    return rms
Example #46
0
def mse(y, y_pred):
    y = y.ravel()
    y_pred = y_pred.ravel()
    return np.square(y - y_pred).mean()
Example #47
0
for i in range(99):
    mod = lr()
    print("-")
    print(sample_train_pca.shape,label_train.shape)
    print("-")
    mod.fit(np.dot(sample_train, testk), label_train)

    # Plot Figure 1 based on w1 and w2
    #print(w_train[0].shape)
    #wt1 = w_train[0]#.reshape(-1,1)
    #wt1 = np.transpose(wt1)
    #wt2 = w_train[1]#.reshape(-1,1)
    #print(wt1.shape, wt2.shape)
    sample_pca_1 = mod.predict(np.dot(sample_test,testk))  # this is a n-by-1 vector; each row is one instance and the value is its projection on w1 (1st pca feature)
    #sample_pca_2 = mod.predict(np.dot(sample_test,wt2)) # same, but projection on w2
    error = np.square(sample_pca_1-label_test).mean()
    test.append(error)
    testk=testk[:,0:-1]

# now, plot data distribution based on these two features
test.reverse()
mpl.plot(test,label="MSE Error")
#mpl.plot(sample_pca_2,label="w2")
#mpl.title("W1 and W2")
mpl.ylabel("MSE")
mpl.xlabel("Number of instances used")
mpl.legend()
mpl.show()
mpl.clf()

Example #48
0
def sphere_normalize(x_data):
    array = np.zeros(x_data.shape)
    for i in range(len(x_data)):
        array[i] = x_data[i] / np.sqrt(np.sum(np.square(x_data[i] * 1.0)))
    return array
Example #49
0
XF = Xtrain
#vars
yf = ytrain.as_matrix()
XF = XF.as_matrix()
XFT = np.transpose(XF)
#calculo de coeficiente
XTXi = np.matrix(np.dot(XFT, XF)).getI()  #(XTX)-1
coef = np.dot(XTXi, XFT)  #(XTX)-1XT
coef = np.dot(coef, yf)  #B = (XTX)-1XTy
coef = np.transpose(coef)

yhat = np.dot(XF, coef)
yhat = np.transpose(yhat)
#yf
var = yf - yhat
#print XF.shape
N = XF.shape[0]
p = XF.shape[1]
std = np.sqrt((1. / (N - p - 1)) * np.sum(np.square(var)))

print std

#print np.matrix(np.diag(XTXi))
zscore = coef / np.transpose(np.matrix(np.sqrt(np.diag(XTXi))))
zscore = zscore / std
#print zscore
#print zscore.shape

print coef
print zscore
Example #50
0
from context import modest as md
import matplotlib.pyplot as plt
import numpy as np
#np.random.seed(1)
plt.close('all')

orbitPeriod = 10000 / (2 * np.pi)
orbitAmplitude = 0

constantVelocity = -20
constantAcceleration = -0.001

tFinal = 3600 * 24 * 1
speedOfLight = 299792
velocityStdDev = 30 / speedOfLight
vVar = np.square(velocityStdDev)
nTaps = 9
aStdDev = 1e-2 / speedOfLight
myProfile = './pulsarData/profiles/J0534+2200_profile.txt'
myPARFile = '/home/joel/Documents/pythonDev/research/pulsarJPDAF/Data/2019_03_15_22h02m05s_chandraPhaseFrequency/ephem_B1509-58_chandra5515.par'

# myPARFile = '/home/joel/Documents/pythonDev/research/pulsarJPDAF/Data/2019_03_25_10h08m33s_chandraPhaseFrequency/ephem_B0540-6919_chandra1735.par'
# myProfile = './pulsarData/profiles/sinProfile.txt'
myPARFile = '/home/joel/Documents/pythonDev/research/pulsarJPDAF/pulsarData/PAR_files/ephem_B0540-6919_chandra1735.par'

detectorArea = 400  # cm^2
electronVoltPerPhoton = 6e3  # Electron-Volt x 10^3
electronVoltPerErg = 6.242e11
ergsPerElectronVolt = 1 / electronVoltPerErg

myFlux = 9.93e-9  # erg/cm^2/s
Example #51
0
def add_layer(inputs, in_size, out_size, activation_function=None):
    Weights = tf.Variable(tf.random_normal([in_size, out_size]))
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases)
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs


# 生成伪数据
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.5, x_data.shape)
y_data = np.square(x_data) - 0.5

xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])

# 添加隐藏层和输出层
# hidden layer
layer1Result = add_layer(xs, 1, 10, activation_function=tf.nn.relu)

# output layer
tPrediction = add_layer(layer1Result, 10, 1, activation_function=None)

# loss函数
loss = tf.reduce_mean(
    tf.reduce_sum(tf.square(ys - tPrediction), reduction_indices=[1]))
tf.summary.scalar('loss', loss)
Example #52
0
def calcRMS(log, directory, subdir):
    file = open("/home/superjax/Documents/inertial_sense/config.yaml", 'r')
    config = yaml.load(file)
    directory = config["directory"]
    serials = config['serials']

    numDev = len(log.devices)
    debug = True
    np.set_printoptions(linewidth=200)
    averageRMS = []
    compassing = False
    navMode = (log.devices[0].data['ins2']['iStatus'] & 0x1000)[-1]
    if numDev > 1:

        print("\nComputing RMS Accuracies: (%d devices)" % (numDev))

        # Build a 3D array of the data.  idx 0 = Device,    idx 1 = t,     idx 2 = [t, lla, uvw, log(q)]
        data = [np.hstack((log.devices[i].data['ins2']['tow'][:,None],
                           log.devices[i].data['ins2']['lla'],
                           log.devices[i].data['ins2']['uvw'],
                           log.devices[i].data['ins2']['q'])) for i in range(numDev)]

        # Make sure that the time stamps are realistic
        for dev in range(numDev):
            if (np.diff(data[dev][:,0]) > 10.0).any():
                print("large gaps in data for dev", dev, "chopping off data before gap".format(dev))
                idx = np.argmax(np.diff(data[dev][:,0])) + 1
                data[dev] = data[dev][idx:,:]

        min_time = max([np.min(data[i][:,0]) for i in range(numDev)])
        max_time = min([np.max(data[i][:,0]) for i in range(numDev)])


        # If we are in compassing mode, then only calculate RMS after all devices have fix
        if log.devices[0].data['flashConfig']['RTKCfgBits'][-1] == 8:
            compassing = True
            time_of_fix_ms = [dev.data['gps1RtkCmpRel']['timeOfWeekMs'][np.argmax(dev.data['gps1RtkCmpRel']['arRatio'] > 3.0)] / 1000.0 for dev in log.devices]
            # print time_of_fix_ms
            min_time = max(time_of_fix_ms)


        # only take the second half of the data
        min_time = max_time - (max_time - min_time)/2.0

        # Resample at a steady 100 Hz
        dt = 0.01
        t = np.arange(1.0, max_time - min_time - 1.0, dt)
        for i in range(numDev):
            # Chop off extra data at beginning and end
            data[i] = data[i][data[i][:, 0] > min_time]
            data[i] = data[i][data[i][:, 0] < max_time]

            # Chop off the min time so everything is wrt to start
            data[i][:,0] -= min_time

            # Interpolate data so that it has all the same timestamps
            fi = interp1d(data[i][:,0], data[i][:,1:].T, kind='cubic', fill_value='extrapolate', bounds_error=False)
            data[i] = np.hstack((t[:,None], fi(t).T))

            # Normalize Quaternions
            data[i][:,7:] /= norm(data[i][:,7:], axis=1)[:,None]

        # Make a big 3D numpy array we can work with [dev, sample, data]
        data = np.array(data)


        # Convert lla to ned using first device lla at center of data as reference
        refLla = data[0, int(round(len(t) / 2.0)), 1:4].copy()
        for i in range(numDev):
            data[i, :, 1:4] = lla2ned(refLla, data[i, :, 1:4])

        # Find Mean Data
        means = np.empty((len(data[0]), 10))
        means[:,:6] = np.mean(data[:,:,1:7], axis=0) # calculate mean position and velocity across devices
        means[:,6:] = meanOfQuatArray(data[:,:,7:].transpose((1,0,2))) # Calculate mean attitude of all devices at each timestep

        # calculate the attitude error for each device
        att_error = np.array([qboxminus(data[dev,:, 7:], means[:, 6:]) for dev in range(numDev)])
        # Calculate the Mounting Bias for all devices (assume the mounting bias is the mean of the attitude error)
        mount_bias = np.mean(att_error, axis=1)
        if compassing:
            # When in compassing, assume all units are sharing the same GPS antennas and should therefore have
            # no mounting bias in heading
            mount_bias[:,2] = 0

        # Adjust all attitude errors to the mean by the mounting bias
        # TODO: Talk to Walt about the mount bias - because this probably includes more biases than just the mount bias
        att_error -= mount_bias[:,None,:]

        if debug:
            colors = ['r', 'g', 'b', 'm']
            plt.figure()
            plt.subplot(3,1,1) # Position
            plt.title("position error")
            for m in range(3):
                for n in range(numDev):
                    plt.plot(data[n,:,0], data[n, :, m+1], color = colors[m])
                plt.plot(data[0,:,0], means[:, m], linewidth=2, color = colors[m])
            plt.subplot(3,1,2)
            plt.title("velocity error")
            for m in range(3):
                for n in range(numDev):
                    plt.plot(data[n,:,0], data[n, :, m+4], color = colors[m] )
                plt.plot(data[0,:,0], means[:, m+3], linewidth=2, color = colors[m])
            plt.subplot(3,1,3)
            plt.title("attitude")
            for m in range(4):
                for n in range(numDev):
                    plt.plot(data[n,:,0], data[n, :, m+7], color = colors[m])
                plt.plot(data[0,:,0], means[:, m+6], linewidth=2, color = colors[m])

            plt.figure()
            for m in range(3):
                plt.subplot(3, 1, m +1)
                for n in range(numDev):
                    plt.plot(att_error[n, :, m])
            plt.show()

        # RMS = sqrt ( 1/N sum(e^2) )
        RMS = np.empty((numDev, 9))
        # Calculate RMS for position and velocity
        RMS[:,:6] = np.sqrt(np.mean(np.square(data[:, :, 1:7] - means[:,0:6]), axis=1))
        # Calculate RMS for attitude
        RMS[:,6:] = np.sqrt(np.mean(np.square(att_error[:, :, :]), axis=1))

        # Average RMS across devices
        averageRMS = np.mean(RMS, axis=0)

        print("average RMS = ", averageRMS)

        # Convert Attitude Error To Euler Angles
        RMS_euler = RMS[:,6:] # quat2eulerArray(qexp(RMS[:,6:]))
        averageRMS_euler = averageRMS[6:] #quat2eulerArray(qexp(averageRMS[None,6:]))[0]
        mount_bias_euler = mount_bias #quat2eulerArray(qexp(mount_bias))


        # Below is creating the RMS report
        thresholds = np.array([0.2, 0.2, 0.2, # LLA
                               0.2, 0.2, 0.2, # UVW
                               0.1, 0.1, 2.0]) # ATT (rpy) - (deg)
        if navMode or compassing:
            thresholds[8] = 0.3 # Higher heading accuracy
        else:
            thresholds[:6] = np.inf

        thresholds[6:] *= DEG2RAD # convert degrees threshold to radians



        specRatio = averageRMS / thresholds

        filename = os.path.join(directory, 'RMS_report_new.txt');
        f = open(filename, 'w')
        f.write('*****   Performance Analysis Report - %s   *****\n' % (subdir))
        f.write('\n')
        f.write('Directory: %s\n' % (directory))
        mode = "AHRS"
        if navMode: mode = "NAV"
        if compassing: mode = "DUAL GNSS"
        f.write("\n")

        # Print Table of RMS accuracies
        line = 'Device       '
        if navMode:
            f.write(
                '--------------------------------------------------- RMS Accuracy -------------------------------------------\n')
            line = line + 'UVW[  (m/s)   (m/s)   (m/s) ],  NED[    (m)     (m)     (m) ],'
        else:  # AHRS mode
            f.write('-------------- RMS Accuracy --------------\n')
        line = line + ' Att [  (deg)   (deg)   (deg) ]\n'
        f.write(line)

        for n in range(0, numDev):
            devInfo = itd.cDevInfo(log.devices[n].data['devInfo'])
            line = '%2d SN%d      ' % (n, devInfo.v['serialNumber'][-1])
            if navMode:
                line = line + '[ %6.4f  %6.4f  %6.4f ],     ' % ( RMS[n, 3], RMS[n, 4], RMS[n, 5])
                line = line + '[ %6.4f  %6.4f  %6.4f ],     ' % ( RMS[n, 0], RMS[n, 1], RMS[n, 2])
            line = line + '[ %6.4f  %6.4f  %6.4f ]\n' % (RMS_euler[n, 0] * RAD2DEG, RMS_euler[n, 1] * RAD2DEG, RMS_euler[n, 2] * RAD2DEG)
            f.write(line)

        line = 'AVERAGE:        '
        if navMode:
            f.write('------------------------------------------------------------------------------------------------------------\n')
            line = line + '[%7.4f %7.4f %7.4f ],     ' % (averageRMS[3], averageRMS[4], averageRMS[5])
            line = line + '[%7.4f %7.4f %7.4f ],     ' % (averageRMS[0], averageRMS[1], averageRMS[2])
        else:  # AHRS mode
            f.write('------------------------------------------\n')
        line = line + '[%7.4f %7.4f %7.4f ]\n' % (averageRMS_euler[0] * RAD2DEG, averageRMS_euler[1] * RAD2DEG, averageRMS_euler[2] * RAD2DEG)
        f.write(line)

        line = 'THRESHOLD:      '
        if navMode:
            line = line + '[%7.4f %7.4f %7.4f ],     ' % (thresholds[3], thresholds[4], thresholds[5])
            line = line + '[%7.4f %7.4f %7.4f ],     ' % (thresholds[0], thresholds[1], thresholds[2])
        line = line + '[%7.4f %7.4f %7.4f ]\n' % (thresholds[6] * RAD2DEG, thresholds[7] * RAD2DEG, thresholds[8] * RAD2DEG)
        f.write(line)

        line = 'RATIO:          '
        if navMode:
            f.write('------------------------------------------------------------------------------------------------------------\n')
            line = line + '[%7.4f %7.4f %7.4f ],     ' % (specRatio[3], specRatio[4], specRatio[5])
            line = line + '[%7.4f %7.4f %7.4f ],     ' % (specRatio[0], specRatio[1], specRatio[2])
        else:  # AHRS mode
            f.write('------------------------------------------\n')
        line = line + '[%7.4f %7.4f %7.4f ]\n' % (specRatio[6], specRatio[7], specRatio[8])
        f.write(line)

        def pass_fail(ratio): return 'FAIL' if ratio > 1.0 else 'PASS'

        line = 'PASS/FAIL:      '
        if navMode:
            line        = line + '[   %s    %s    %s ],     ' % (pass_fail(specRatio[3]),pass_fail(specRatio[4]),pass_fail(specRatio[5])) # LLA
            line        = line + '[   %s    %s    %s ],     ' % (pass_fail(specRatio[0]),pass_fail(specRatio[1]),pass_fail(specRatio[2])) # UVW
        line            = line + '[   %s    %s    %s ]\n' % (pass_fail(specRatio[6]),pass_fail(specRatio[7]),pass_fail(specRatio[8]))     # ATT
        f.write(line)

        if navMode:
            f.write('                                                                                         ')
        else:  # AHRS mode
            f.write('                  ')
        f.write('('+mode +' mode)\n\n')

        # Print Mounting Biases
        f.write('--------------- Angular Mounting Biases ----------------\n')
        f.write('Device       Euler Biases[   (deg)     (deg)     (deg) ]\n')
        for n in range(0, numDev):
            devInfo = itd.cDevInfo(log.devices[n].data['devInfo'])
            f.write('%2d SN%d               [ %7.4f   %7.4f   %7.4f ]\n' % (
                n, devInfo.v['serialNumber'][-1], mount_bias_euler[n, 0] * RAD2DEG, mount_bias_euler[n, 1] * RAD2DEG, mount_bias_euler[n, 2] * RAD2DEG))
        f.write('\n')

        # Print Device Version Information
        f.write(
            '------------------------------------------- Device Info -------------------------------------------------\n')
        for n in range(0, numDev):
            devInfo = itd.cDevInfo(log.devices[n].data['devInfo'])
            hver = devInfo.v['hardwareVer'][-1]
            cver = devInfo.v['commVer'][-1]
            fver = devInfo.v['firmwareVer'][-1]
            buld = devInfo.v['build'][-1]
            repo = devInfo.v['repoRevision'][-1]
            date = devInfo.v['buildDate'][-1]
            time = devInfo.v['buildTime'][-1]
            addi = devInfo.v['addInfo'][-1]
            f.write(
                '%2d SN%d  HW: %d.%d.%d.%d   FW: %d.%d.%d.%d build %d repo %d   Proto: %d.%d.%d.%d  Date: %04d-%02d-%02d %02d:%02d:%02d  %s\n' % (
                    n, devInfo.v['serialNumber'][-1],
                    hver[3], hver[2], hver[1], hver[0],
                    fver[3], fver[2], fver[1], fver[0], buld, repo,
                    cver[3], cver[2], cver[1], cver[0],
                    2000 + date[2], date[1], date[0],
                    time[3], time[2], time[1],
                    addi))
        f.write('\n')

        f.close()

        # Automatically open report in Windows
        if 'win' in sys.platform:
            subprocess.Popen(["notepad.exe", filename])  # non-blocking call
        if 'linux' in sys.platform:
            subprocess.Popen(['gedit', filename])

    print("Done.")

    # TODO: Pass out the union of the test errors
    return averageRMS
 def sampleEr(self, actualout):
     error = np.subtract(self.out, actualout)
     sqerror = np.sum(np.square(error)) / self.Top[2]
     return sqerror
 def return_euclidean_distance(feature_1, feature_2):
     feature_1 = np.array(feature_1)
     feature_2 = np.array(feature_2)
     dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))
     return dist
Example #55
0
def _estimate_weights_window(sindx, vals, nmedian, nstddev, type, outQueue):
    """
    Set weights using a median-filter method

    Parameters
    ----------
    sindx: int
        Index of station
    vals: array
        Array of values
    nmedian: odd int
        Size of median time window
    nstddev: odd int
        Size of stddev time window
    typ: str
        Type of values (e.g., 'phase')

    """
    import numpy as np
    from scipy.ndimage import generic_filter

    pad_width = [(0, 0)] * len(vals.shape)
    pad_width[-1] = ((nmedian-1)/2, (nmedian-1)/2)
    if type == 'phase':
        # Median smooth and subtract to de-trend
        if nmedian > 0:
            # Convert to real/imag
            real = np.cos(vals)
            pad_real = np.pad(real, pad_width, 'constant', constant_values=(np.nan,))
            med_real = np.nanmedian(_rolling_window_lastaxis(pad_real, nmedian), axis=-1)
            real -= med_real
            real[real < -1.0] = -1.0
            real[real > 1.0] = 1.0

            imag = np.sin(vals)
            pad_imag = np.pad(imag, pad_width, 'constant', constant_values=(np.nan,))
            med_imag = np.nanmedian(_rolling_window_lastaxis(pad_imag, nmedian), axis=-1)
            imag -= med_imag
            imag[imag < -1.0] = -1.0
            imag[imag > 1.0] = 1.0

            # Calculate standard deviations
            pad_width[-1] = ((nstddev-1)/2, (nstddev-1)/2)
            pad_real = np.pad(real, pad_width, 'constant', constant_values=(np.nan,))
            stddev1 = _nancircstd(_rolling_window_lastaxis(pad_real, nstddev), axis=-1, is_phase=False)
            pad_imag = np.pad(imag, pad_width, 'constant', constant_values=(np.nan,))
            stddev2 = _nancircstd(_rolling_window_lastaxis(pad_imag, nstddev), axis=-1, is_phase=False)
            stddev = stddev1 + stddev2
        else:
            phase = normalize_phase(vals)

            # Calculate standard deviation
            pad_width[-1] = ((nstddev-1)/2, (nstddev-1)/2)
            pad_phase = np.pad(phase, pad_width, 'constant', constant_values=(np.nan,))
            stddev = _nancircstd(_rolling_window_lastaxis(pad_phase, nstddev), axis=-1)
    else:
        # Median smooth and subtract to de-trend
        if nmedian > 0:
            pad_vals = np.pad(vals, pad_width, 'constant', constant_values=(np.nan,))
            med = np.nanmedian(_rolling_window_lastaxis(pad_vals, nmedian), axis=-1)
            vals -= med

        # Calculate standard deviation in larger window
        pad_width[-1] = ((nstddev-1)/2, (nstddev-1)/2)
        pad_vals = np.pad(vals, pad_width, 'constant', constant_values=(np.nan,))
        stddev = np.nanstd(_rolling_window_lastaxis(pad_vals, nstddev), axis=-1)

    # Check for periods where standard deviation is zero or NaN and replace
    # with min value to prevent inf in the weights. Also limit weights to
    # float16
    zero_scatter_ind = np.where(np.logical_or(np.isnan(stddev), stddev == 0.0))
    if len(zero_scatter_ind[0]) > 0:
        good_ind = np.where(~np.logical_or(np.isnan(stddev), stddev == 0.0))
        stddev[zero_scatter_ind] = np.min(stddev[good_ind])
    if nmedian > 0:
        fudge_factor = 2.0 # factor to compensate for smoothing
    else:
        fudge_factor = 1.0
    w = 1.0 / np.square(stddev*fudge_factor)

    # Rescale to fit in float16
    float16max = 65504.0
    if np.max(w) > float16max:
        w *= float16max / np.max(w)

    outQueue.put([sindx, w])
Example #56
0
	def infer(
		self,
		test_x,
		test_x_len,
		test_x_base_names,
		test_epoch,
		model_path='model',
		out_type='y',
		gain='mmse-lsa',
		out_path='out',
		n_filters=40,
		saved_data_path=None,
		):
		"""
		Deep Xi inference. The specified 'out_type' is saved.

		Argument/s:
			test_x - noisy-speech test batch.
			test_x_len - noisy-speech test batch lengths.
			test_x_base_names - noisy-speech base names.
			test_epoch - epoch to test.
			model_path - path to model directory.
			out_type - output type (see deepxi/args.py).
			gain - gain function (see deepxi/args.py).
			out_path - path to save output files.
			saved_data_path - path to saved data necessary for enhancement.
		"""
		out_path_base = out_path
		if not isinstance(test_epoch, list): test_epoch = [test_epoch]
		if not isinstance(gain, list): gain = [gain]

		# The mel-scale filter bank is to compute an ideal binary mask (IBM)
		# estimate for log-spectral subband energies (LSSE).
		if out_type == 'subband_ibm_hat':
			mel_filter_bank = self.mel_filter_bank(n_filters)

		for e in test_epoch:
			if e < 1: raise ValueError("test_epoch must be greater than 0.")
			for g in gain:

				out_path = out_path_base + '/' + self.ver + '/' + 'e' + str(e) # output path.
				if out_type == 'xi_hat': out_path = out_path + '/xi_hat'
				elif out_type == 'gamma_hat': out_path = out_path + '/gamma_hat'
				elif out_type == 's_STPS_hat': out_path = out_path + '/s_STPS_hat'
				elif out_type == 'y':
					if self.inp_tgt_type == 'MagIRM': out_path = out_path + '/y'
					else: out_path = out_path + '/y/' + g
				elif out_type == 'deepmmse': out_path = out_path + '/deepmmse'
				elif out_type == 'ibm_hat': out_path = out_path + '/ibm_hat'
				elif out_type == 'subband_ibm_hat': out_path = out_path + '/subband_ibm_hat'
				elif out_type == 'cd_hat': out_path = out_path + '/cd_hat'
				else: raise ValueError('Invalid output type.')
				if not os.path.exists(out_path): os.makedirs(out_path)


				self.model.load_weights(model_path + '/epoch-' + str(e-1) +
					'/variables/variables' )

				print("Processing observations...")
				inp_batch, supplementary_batch, n_frames = self.observation_batch(test_x, test_x_len)

				print("Performing inference...")
				tgt_hat_batch = self.model.predict(inp_batch, batch_size=1, verbose=1)

				print("Saving outputs...")
				batch_size = len(test_x_len)
				for i in tqdm(range(batch_size)):
					base_name = test_x_base_names[i]
					inp = inp_batch[i,:n_frames[i],:]
					tgt_hat = tgt_hat_batch[i,:n_frames[i],:]

					# if tf.is_tensor(supplementary_batch):
					supplementary = supplementary_batch[i,:n_frames[i],:]

					if saved_data_path is not None:
						saved_data = read_mat(saved_data_path + '/' + base_name + '.mat')
						supplementary = (supplementary, saved_data)

					if out_type == 'xi_hat':
						xi_hat = self.inp_tgt.xi_hat(tgt_hat)
						save_mat(out_path + '/' + base_name + '.mat', xi_hat, 'xi_hat')
					elif out_type == 'gamma_hat':
						gamma_hat = self.inp_tgt.gamma_hat(tgt_hat)
						save_mat(out_path + '/' + base_name + '.mat', gamma_hat, 'gamma_hat')
					elif out_type == 's_STPS_hat':
						s_STPS_hat = self.inp_tgt.s_stps_hat(tgt_hat)
						save_mat(out_path + '/' + base_name + '.mat', s_STPS_hat, 's_STPS_hat')
					elif out_type == 'y':
						y = self.inp_tgt.enhanced_speech(inp, supplementary, tgt_hat, g).numpy()
						save_wav(out_path + '/' + base_name + '.wav', y, self.inp_tgt.f_s)
					elif out_type == 'deepmmse':
						xi_hat = self.inp_tgt.xi_hat(tgt_hat)
						d_PSD_hat = np.multiply(np.square(inp), gfunc(xi_hat, xi_hat+1.0,
							gtype='deepmmse'))
						save_mat(out_path + '/' + base_name + '.mat', d_PSD_hat, 'd_psd_hat')
					elif out_type == 'ibm_hat':
						xi_hat = self.inp_tgt.xi_hat(tgt_hat)
						ibm_hat = np.greater(xi_hat, 1.0).astype(bool)
						save_mat(out_path + '/' + base_name + '.mat', ibm_hat, 'ibm_hat')
					elif out_type == 'subband_ibm_hat':
						xi_hat = self.inp_tgt.xi_hat(tgt_hat)
						xi_hat_subband = np.matmul(xi_hat, mel_filter_bank.transpose())
						subband_ibm_hat = np.greater(xi_hat_subband, 1.0).astype(bool)
						save_mat(out_path + '/' + base_name + '.mat', subband_ibm_hat,
							'subband_ibm_hat')
					elif out_type == 'cd_hat':
						cd_hat = self.inp_tgt.cd_hat(tgt_hat)
						save_mat(out_path + '/' + base_name + '.mat', cd_hat, 'cd_hat')
					else: raise ValueError('Invalid output type.')
Example #57
0
def l2_difference(A, B):
    return np.sum(np.square(np.abs(A[:, None, :] - B)), 2)
Example #58
0
def draw_boxes(im, boxes, labels=None, colors=None, font_scale=0.6,
               font_thick=1, box_thick=1, bottom_text=False, offsets=None):
  if not boxes:
    return im

  boxes = np.asarray(boxes, dtype="int")

  FONT = cv2.FONT_HERSHEY_SIMPLEX
  FONT_SCALE = font_scale


  if labels is not None:
    assert len(labels) == len(boxes), "{} != {}".format(len(labels), len(boxes))
  if colors is not None:
    assert len(labels) == len(colors)
  areas = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
  sorted_inds = np.argsort(-areas)  # draw large ones first
  assert areas.min() > 0, areas.min()

  im = im.copy()
  COLOR_DIFF_WEIGHT = np.asarray((3, 4, 2), dtype='int32')
  COLOR_CANDIDATES = PALETTE_RGB[:, ::-1]
  if im.ndim == 2 or (im.ndim == 3 and im.shape[2] == 1):
    im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
  for i in sorted_inds:
    box = boxes[i, :]
    # for cropped visualization
    if box[0] < 0 or box[1] < 0 or box[2] < 0 or box[3] < 0:
      continue

    color = (218, 218, 218)
    if colors is not None:
      color = colors[i]
    best_color = color

    lineh = 2 # for box enlarging, replace with text height if there is label
    if labels is not None:
      label = labels[i]

      # find the best placement for the text
      ((linew, lineh), _) = cv2.getTextSize(label, FONT, FONT_SCALE, font_thick)
      bottom_left = [box[0] + 1, box[1] - 0.3 * lineh]
      top_left = [box[0] + 1, box[1] - 1.3 * lineh]
      if top_left[1] < 0:   # out of image
        top_left[1] = box[3] - 1.3 * lineh
        bottom_left[1] = box[3] - 0.3 * lineh

      textbox = IntBox(int(top_left[0]), int(top_left[1]),
                       int(top_left[0] + linew), int(top_left[1] + lineh))
      textbox.clip_by_shape(im.shape[:2])

      offset = 0
      if offsets is not None:
        offset = lineh * offsets[i]

      if color is None:
        # find the best color
        mean_color = textbox.roi(im).mean(axis=(0, 1))
        best_color_ind = (np.square(COLOR_CANDIDATES - mean_color) *
                          COLOR_DIFF_WEIGHT).sum(axis=1).argmax()
        best_color = COLOR_CANDIDATES[best_color_ind].tolist()

      if bottom_text:
        cv2.putText(im, label, (box[0] + 2, box[3] - 4 + offset),
                    FONT, FONT_SCALE, color=best_color)
      else:
        cv2.putText(im, label, (textbox.x1, textbox.y2 - offset),
                    FONT, FONT_SCALE, color=best_color) #, lineType=cv2.LINE_AA)
    # expand the box on y axis for overlapping results
    offset = 0
    if offsets is not None:
      offset = lineh * offsets[i]
      box[0] -= box_thick * offsets[i] + 1
      box[2] += box_thick * offsets[i] + 1
      if bottom_text:
        box[1] -= box_thick * offsets[i] + 1
        box[3] += offset
      else:
        box[3] += box_thick * offsets[i] + 1
        box[1] -= offset

    cv2.rectangle(im, (box[0], box[1]), (box[2], box[3]),
                  color=best_color, thickness=box_thick)
  return im
Example #59
0
def mse(X, B, C):
    q_x = reconstruct(C, B)
    return np.sum(np.square(X - q_x), 1)
Example #60
0
def quantization_error(X, B, C):
    q_x = reconstruct(C, B)
    return np.mean(np.sum(np.square(X - q_x), 1))