示例#1
0
    def get_closest_obstacle_distance_matrix(self, x_array, y_array):
        """ The math below is same as the function 'get_closest_obstacle_distance' except
            that it accepts arrays for x and y to speed up the calculation"""
        x_coord_array = np.divide(
            np.subtract(x_array, self.map.info.origin.position.x),
            self.map.info.resolution)
        y_coord_array = np.divide(
            np.subtract(y_array, self.map.info.origin.position.y),
            self.map.info.resolution)
        x_coord_array = x_coord_array.astype(int)
        y_coord_array = y_coord_array.astype(int)

        x_nan_indexes = np.where(
            np.logical_or(x_coord_array > self.map.info.width,
                          x_coord_array < 0))
        y_nan_indexes = np.where(
            np.logical_or(y_coord_array > self.map.info.height,
                          y_coord_array < 0))
        out_of_bounds_indexes = np.unique(
            np.append(x_nan_indexes, y_nan_indexes))

        np.delete(x_coord_array, out_of_bounds_indexes)
        np.delete(y_coord_array, out_of_bounds_indexes)

        ind_array = np.add(x_coord_array,
                           np.multiply(y_coord_array, self.map.info.width))

        # Returns the sum of the distances of all points.
        return np.sum(
            halfnorm.pdf(self.closest_occ[ind_array],
                         scale=self.error_distribution_scale)) + np.sum(
                             len(out_of_bounds_indexes) *
                             self.MAX_DISTANCE_OUT_OF_BOUNDS)
示例#2
0
def half_normal_curve(ax, mu=0., sigma=1., half_mu=0.):
    x = np.linspace(0, mu + 3*sigma, 100)
    # x = np.linspace(halfnorm.ppf(0.01),
    #                halfnorm.ppf(0.99), 100)
    ax.plot(x, halfnorm.pdf(x, mu, sigma))
    plt.axvline(x=mu, c="r", linewidth=1)
    plt.axvline(x=half_mu, c="g", linewidth=1)
示例#3
0
def plot_distrib_angle(list_values, name, dt, xmax, ymax, color):

    #print(list_values)

    fig = matplotlib.pyplot.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.set_xlim([0, xmax])
    n, bins, patches = matplotlib.pyplot.hist(list_values,
                                              50,
                                              normed=1,
                                              facecolor=color,
                                              alpha=0.5)

    param = halfnorm.fit(sorted(list_values))
    pdf_fitted = halfnorm.pdf(sorted(list_values),
                              loc=param[0],
                              scale=param[1])

    mean_halfnorm = halfnorm.mean(loc=param[0], scale=param[1])
    std_halfnorm = halfnorm.std(loc=param[0], scale=param[1])

    print('mean = ' + str(mean_halfnorm))
    print('std  = ' + str(std_halfnorm))

    #chi_value = chisquare(sorted(list_values), f_exp=pdf_fitted)
    #print('chi_square = '+str(chi_value[0]))
    #print('p_value = '+str(chi_value[1]))

    matplotlib.pyplot.plot(sorted(list_values), pdf_fitted, 'g-')

    matplotlib.pyplot.savefig(str(name) + '_' + str(dt) + '.svg')
    #matplotlib.pyplot.show()
    matplotlib.pyplot.close()
    return std_halfnorm
def probability_zphot(z_phot,zl,zu,z_min,z_max,plot_results=False):
    xx = np.linspace(0,10,1000000)
    Yplus = xx+z_phot
    Yminus = -xx+z_phot

    positive = halfnorm.pdf(xx,loc=0,scale=zu)
    negative = halfnorm.pdf(xx,loc=0,scale=zl)

    negative *= np.amax(positive)/np.amax(negative)
    negative[Yminus<0]=0

    Tot = simps(negative[::-1],(-xx+z_phot)[::-1])+simps(positive,xx+z_phot)
    sel = (Yplus<z_max)*(Yplus>max(z_phot,z_min))
    selneg = (Yminus>z_min)*(Yminus<min(z_max,z_phot))

    if plot_results:
        fig,ax=mpl.subplots()
        ax.plot(Yplus,positive,'r-')
        ax.plot(Yminus,negative,'b-')

        ax.vlines(z_phot,0,np.amax(positive),color='k',linestyle='solid')
        ax.vlines(z_phot+zu,0,np.amax(positive),color='k',linestyle='solid')
        ax.vlines(z_phot-zl,0,np.amax(positive),color='k',linestyle='solid')


        ax.fill_between( Yplus[sel],0,positive[sel],color='indigo',alpha=0.7)
        ax.fill_between( Yminus[selneg],0,negative[selneg],color='green',alpha=0.7)


        ax.plot(Yminus[selneg][:5],negative[selneg][:5],'k-',lw=3)
#        mpl.show()

    if (True in sel) and True in (selneg):
        Tprob =  simps(positive[sel],Yplus[sel])+simps(negative[selneg][::-1],Yminus[selneg][::-1])
    elif (True in sel):
        Tprob =  simps(positive[sel],Yplus[sel])
    elif (True in selneg):
        Tprob = simps(negative[selneg][::-1],Yminus[selneg][::-1])
    else:
        Tprob = 0.0

    if np.isnan(Tprob/Tot):
        return -99
    else:
        return Tprob/Tot
示例#5
0
def predict(dp, X, Y, K, weighting=False):
    err = X - dp
    abs_err = np.abs(err)
    idx = np.argsort(abs_err)

    candidates = Y[idx[:K]]

    if weighting:
        weights = halfnorm.pdf(candidates, scale=np.std(Y))
        weights /= np.sum(weights)
        return np.dot(candidates, weights)
    else:
        return np.mean(candidates)
示例#6
0
def CalculateDistributions(interpolatedOutputList, lnspc):
    beta, hloc, hscale = halfgennorm.fit(lnspc)
    pdf_beta_halfgennorm = halfgennorm.pdf(lnspc, beta, hloc, hscale)
    eloc, escale = expon.fit(lnspc)
    pdf_beta_expon = expon.pdf(lnspc, eloc, escale)
    lns, lnloc, lnscale = lognorm.fit(lnspc)
    pdf_beta_lognorm = lognorm.pdf(lnspc, lns, lnloc, lnscale)
    nloc, nscale = halfnorm.fit(lnspc)
    pdf_beta_norm = halfnorm.pdf(lnspc, nloc, nscale)
    binss = int(np.trunc(np.log2(len(interpolatedOutputList))) + 1)
    print(beta)
    return binss, pdf_beta_halfgennorm, pdf_beta_expon, pdf_beta_lognorm, pdf_beta_norm


# def send_zipfile(request, IAGACode):
#     """
#     Create a ZIP file on disk and transmit it in chunks of 8KB,
#     without loading the whole file into memory. A similar approach can
#     be used for large dynamic PDF files.
#     """
#     temp = tempfile.TemporaryFile()
#     archive = zipfile.ZipFile(temp, 'w', zipfile.ZIP_DEFLATED)
#     # for index in range(10):
#     filename = settings.BASE_DIR +'/world/img/' + IAGACode + 'X.png' # Replace by your files here.

#     archive.write(filename, 'X.png') # 'file%d.png' will be the
#                                                       # name of the file in the
#                                                       # zip
#     archive.close()

#     temp.seek(0)
#     wrapper = FileWrapper(temp)

#     response = HttpResponse(wrapper, content_type='application/zip')
#     response['Content-Disposition'] = 'attachment; filename=test.zip'

#     return response
示例#7
0
    def generate_transition(self,
                            states,
                            actions,
                            opponent_count,
                            dist_type=np.uint16,
                            transition_type=np.float_,
                            shrink_bias=4,
                            max_z_score=5,
                            usage_weight=1,
                            horizon=5):
        """
        Create numpy tensor representing the probability of transitioning from one state to the next state.

        These will be generated by a probability distribution with the following properties:
            0. Transitions follow a gaussian distribution proportional to the manhattan dist
                between states.
            1. Logical successors to this state, that is ones where the score decreases, are
                more likely.
            2. The exploit being used is even more likely to decrease.
        
        :param iterable states: The valid states for this MDP.
        :param iterable actions: The valid actions for this MDP.
        :param np.type dist_type: The datatype to use for representing distances. Uses \
            unsigned int16 if not specifed.
        :param np.type transition_type: The datatype to use for the transition tensor. \
            Values are between 0 and 1, so this only determines the precision.
        :param float shrink_bias: How much to prioritize shrinking states over growing \
            states. For example, with value 4 transitioning from (1,1,1) to (0,1,1) is \
            four times as likely as transitioning to (2,1,1).
        :param float max_z_score: indicates the z score to assign to the largest distance.\
            By default this is 5, which is basically 0 probability.
        :param float usage_weight: indicates the exponential constant to use for \
            weighting. 1.08 means at 10 usages, the reward dropping is 2.2 times as likely \
            as normal. If this value is 1, repeated uses has no effect.
        :param int horizon: indicates the 
        """
        def get_dist(state1, state2):
            n = np.linalg.norm((self.get_rewards_from_state(state1) -
                                self.get_rewards_from_state(state2)),
                               ord=1)
            return n

        # if this has been generated before, just load it from binary.
        fname = 'transitions_%s_%s_%s_%s_nousage.pickle' % (
            opponent_count, len(actions), horizon, usage_weight)
        # fname = 'transitions_%s_%s_%s.pickle' % (opponent_count, len(actions), horizon)
        print(f'Checking for file {fname}')
        if isfile(fname):
            return pickle.load(open(fname, 'rb'))

        print(f'Creating transition tensor.')

        # create 2d dist array of state to state distance. Prevents recalculation.
        #   There should be a much more efficient way of doing this.
        # NOTE: using uint8 here means a distance of 255 is the max. In order for this
        #   to occur, we would have to have (for instance) 50 players, six exploits, and
        #   terminal vs initial state. This seems a reasonable assumption for now, but
        #   we will handle it by setting all such items to max distance.
        dist_name = 'dists_%s_%s_%s.pickle' % (opponent_count, len(actions),
                                               horizon)
        if not isfile(dist_name):
            dist_vector = np.vectorize(get_dist)
            dists = np.fromfunction(dist_vector,
                                    shape=(len(states), len(states)),
                                    dtype=dist_type)
            pickle.dump(dists, open(dist_name, 'wb+'))
            print('Dists written!')
        else:
            dists = pickle.load(open(dist_name, 'rb'))

        print(f'Dists array of shape {dists.shape} computed')

        # calculate the size of a z-score bin. We want the furthest possible dist
        #   to have a z-score of max_z_score. Thus we can calculate z-score
        #   w/ (val / max_dist) * max_dist. This linearly maps z scores from values
        #   along continuum.
        max_dist = np.max(dists)
        print(f'Max Distance is {max_dist}')

        transition = np.zeros((len(states), len(actions), len(states)),
                              dtype=transition_type)
        for s in range(len(states)):
            dist = dists[
                s]  # distance to other states by index. Grabbed once for efficiency.
            for a, action in enumerate(actions):

                # NOTE - t  his doesn't guarantee a sum of 1. That requires post processing later.
                for o in range(len(states)):
                    # If this state can't follow previous due to the exploit use count, then
                    #   set to 0 probability and continue.
                    if not self.is_successor(s, o, a):
                        transition[s][a][o] = 0
                        continue
                    # set the probability of reward shrinking as far more likely than increasing.
                    shrink_scale = shrink_bias if self.state_is_less(s,
                                                                     o) else 1
                    # increase chances of shrinking by usage.
                    uses = self.get_usages_from_state(s)
                    uses = uses[a] if a < len(uses) else 0
                    uses_scale = usage_weight**uses
                    # calculate z score of this state on the guassian curve.
                    z_score = (dist[o] / max_dist) * max_z_score
                    temp = halfnorm.pdf(z_score)
                    val = temp * shrink_scale * uses_scale
                    transition[s][a][o] = val

                # Normalize s,a. The probability of ending up in SOME state after taking
                #   action a in state s should always be one.
                # Correction - in some terminal states the probability of transitioning out will
                #   be 0. This is fine.
                s_a_sum = np.sum(transition[s][a])
                if s_a_sum != 0:
                    transition[s][a] /= s_a_sum
                print("Completed vector (%s, %s)" % (s, 'exploit' + str(a)))

        # save to pickle so we don't have to do this process again.
        assert (not np.any((np.isnan(transition))))
        pickle.dump(transition, open(fname, 'wb+'))
        return transition
def half_gaussian_filter(value, sigma):
    space = np.linspace(-4, 4, sigma * 8)
    neg_space = np.linspace(4 * 10, -4 * 10, sigma * 8)
    kernel = (halfnorm.pdf(space) + halfnorm.pdf(neg_space)) / sigma
    offset_value = np.concatenate((value, np.zeros(45)))
    return convolve(offset_value, kernel)[45:]

if True:
    title = ('Altitude %dkm | #Nodes %d\n Baseline %dm | TimeError %.2fps'
                %(altitude,sat_nodes,sat_length,time_err*1e12))
    fig = plt.figure()
    n, bins, patches = plt.hist(coord_error,bins = 50,
                                #range=(-10,10),
                                normed=True,
                                facecolor='g', alpha=0.6)
    
    # add a 'best fit' line
    param = halfnorm.fit(coord_error)
    x = np.linspace(0,3500,100)
    # fitted distribution
    pdf_fitted = halfnorm.pdf(x,loc=param[0],scale=param[1])
    plt.plot(x,pdf_fitted,'r-',alpha=0.8)
    
    #plot test values
    test = kstest(coord_error,'halfnorm')
    plt.text(2500,.0004,s=('Half-normal \nKS-Stat=%.3f \nPvalue = %.3f \nMean = %.3f \nSTD=%.3f'
                             %(test[0],test[1],coord_err_avg,coord_err_std)),
             bbox=dict(facecolor='white', edgecolor='black', boxstyle='round,pad=1'))
    
    plt.axvline(x=coord_err_avg,c='b',alpha=0.5)
    plt.axvline(x=coord_err_avg+coord_err_std,c='k',linestyle='-.',alpha=0.5)
    plt.axvline(x=coord_err_avg-coord_err_std,c='k',linestyle='-.',alpha=0.5)
    
    plt.xlabel('Error (m)')
    plt.title(title)
    #plt.axis([0, 650, 0, 0.007 ])
示例#10
0
from scipy.stats import halfnorm
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)

# Calculate a few first moments:

mean, var, skew, kurt = halfnorm.stats(moments='mvsk')

# Display the probability density function (``pdf``):

x = np.linspace(halfnorm.ppf(0.01),
                halfnorm.ppf(0.99), 100)
ax.plot(x, halfnorm.pdf(x),
       'r-', lw=5, alpha=0.6, label='halfnorm pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = halfnorm()
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = halfnorm.ppf([0.001, 0.5, 0.999])
np.allclose([0.001, 0.5, 0.999], halfnorm.cdf(vals))
# True

# Generate random numbers:
from scipy.stats import halfnorm
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)

# Calculate a few first moments:

mean, var, skew, kurt = halfnorm.stats(moments='mvsk')

# Display the probability density function (``pdf``):

x = np.linspace(halfnorm.ppf(0.01), halfnorm.ppf(0.99), 100)
ax.plot(x, halfnorm.pdf(x), 'r-', lw=5, alpha=0.6, label='halfnorm pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = halfnorm()
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = halfnorm.ppf([0.001, 0.5, 0.999])
np.allclose([0.001, 0.5, 0.999], halfnorm.cdf(vals))
# True

# Generate random numbers:

r = halfnorm.rvs(size=1000)