Beispiel #1
0
def ciBatchMeans(M, N, k):
    sim = simulateLindleyEfficient(lam, mu, M * N + k)

    # throw away the first k observations, and divide the rest into
    # subruns of length N each
    run = sim[k:(M * N + k)]
    p = reshape(run, (M, N))
    sample = mean(p, axis=0)  # take row means
    meanW = mean(sample)
    varW = var(sample)
    ci = meanW - 1.96 * sqrt(varW / M), meanW + 1.96 * sqrt(varW / M)
    return ci
Beispiel #2
0
def ciMultipleRuns(M, N, k):
    sumW = 0
    sumW2 = 0
    for _ in range(M):
        sim = simulateLindleyEfficient(lam, mu, N)
        meanWrun = mean(sim[k:N])
        sumW += meanWrun
        sumW2 += meanWrun**2
    meanW = sumW / M
    varW = (sumW2 - sumW**2 / M) / (M - 1)
    ci = meanW - 1.96 * sqrt(varW / M), meanW + 1.96 * sqrt(varW / M)
    return ci
    def find_mode_newton(self, return_full=False):
        """
        Newton search for mode of p(y|f)p(f)
        
        from GP book, algorithm 3.1, added step size
        """
        K = self.gp.K

        if self.newton_start is None:
            f = zeros(len(K))
        else:
            f = self.newton_start

        if return_full:
            steps = [f]

        iteration = 0
        norm_difference = inf
        objective_value = -inf

        while iteration < self.newton_max_iterations and norm_difference > self.newton_epsilon:
            # from GP book, algorithm 3.1, added step size
            # scale log_lik_grad_vector and K^-1 f = a

            w = -self.gp.likelihood.log_lik_hessian_vector(self.gp.y, f)
            w_sqrt = sqrt(w)

            # diag(w_sqrt).dot(K.dot(diag(w_sqrt))) == (K.T*w_sqrt).T*w_sqrt
            L = cholesky(eye(len(K)) + (K.T * w_sqrt).T * w_sqrt)
            b = f * w + self.newton_step * self.gp.likelihood.log_lik_grad_vector(self.gp.y, f)

            # a=b-diag(w_sqrt).dot(inv(eye(len(K)) + (K.T*w_sqrt).T*w_sqrt).dot(diag(w_sqrt).dot(K.dot(b))))
            a = w_sqrt * (K.dot(b))
            a = solve_triangular(L, a, lower=True)
            a = solve_triangular(L.T, a, lower=False)
            a = w_sqrt * a
            a = b - a

            f_new = K.dot(self.newton_step * a)

            # convergence stuff and next iteration
            objective_value_new = -0.5 * a.T.dot(f) + sum(self.gp.likelihood.log_lik_vector(self.gp.y, f))
            norm_difference = norm(f - f_new)

            if objective_value_new > objective_value:
                f = f_new
                if return_full:
                    steps.append(f)
            else:
                self.newton_step /= 2

            iteration += 1
            objective_value = objective_value_new

        self.computed = True

        if return_full:
            return f, L, asarray(steps)
        else:
            return f
Beispiel #4
0
def write_beta_diff_files(path, twiss_cor, twiss_no):
    file_bbx = open(os.path.join(path, "bbx.out"), "w")
    file_bby = open(os.path.join(path, "bby.out"), "w")
    print >> file_bbx, "* NAME S MEA ERROR MODEL"
    print >> file_bbx, "$ %s %le %le %le %le"
    print >> file_bby, "* NAME S MEA ERROR MODEL"
    print >> file_bby, "$ %s %le %le %le %le"
    if os.path.exists(os.path.join(path, 'getbetax_free.out')):
        twiss_getbetax = Python_Classes4MAD.metaclass.twiss(os.path.join(path, 'getbetax_free.out'))
    else:
        twiss_getbetax = Python_Classes4MAD.metaclass.twiss(os.path.join(path, 'getbetax.out'))
    for i in range(len(twiss_getbetax.NAME)):
        bpm_name = twiss_getbetax.NAME[i]
        bpm_included = True
        try:
            check = twiss_cor.NAME[twiss_cor.indx[bpm_name]]  # @UnusedVariable
        except:
            print "No ", bpm_name
            bpm_included = False
        if bpm_included:
            j = twiss_cor.indx[bpm_name]
            t_x = twiss_getbetax  # Variable for abbreviation
            error_beta = sqrt(t_x.STDBETX[i] ** 2 + t_x.ERRBETX[i] ** 2) / t_x.BETXMDL[i]
            print >> file_bbx, bpm_name, t_x.S[i], (t_x.BETX[i] - t_x.BETXMDL[i]) / t_x.BETXMDL[i], error_beta, (twiss_cor.BETX[j] - twiss_no.BETX[j]) / twiss_no.BETX[j]

    if os.path.exists(os.path.join(path, 'getbetay_free.out')):
        twiss_getbetay = Python_Classes4MAD.metaclass.twiss(os.path.join(path, 'getbetay_free.out'))
    else:
        twiss_getbetay = Python_Classes4MAD.metaclass.twiss(os.path.join(path, 'getbetay.out'))
    for i in range(len(twiss_getbetay.NAME)):
        bpm_name = twiss_getbetay.NAME[i]
        bpm_included = True
        try:
            check = twiss_cor.NAME[twiss_cor.indx[bpm_name]]  # @UnusedVariable
        except:
            print "No ", bpm_name
            bpm_included = False
        if bpm_included:
            j = twiss_cor.indx[bpm_name]
            t_y = twiss_getbetay  # Variable for abbreviation
            error_beta = sqrt(t_y.STDBETY[i] ** 2 + t_y.ERRBETY[i] ** 2) / t_y.BETYMDL[i]
            print >> file_bby, bpm_name, t_y.S[i], (t_y.BETY[i] - t_y.BETYMDL[i]) / t_y.BETYMDL[i], error_beta, (twiss_cor.BETY[j] - twiss_no.BETY[j]) / twiss_no.BETY[j]

    file_bbx.close()
    file_bby.close()
 def adapt(self, mcmc_chain, step_output):
     # this is an extension of the base adapt call
     KameleonWindow.adapt(self, mcmc_chain, step_output)
     
     iter_no = mcmc_chain.iteration
     
     if iter_no > self.sample_discard and iter_no < self.stop_adapt:
         learn_scale = 1.0 / sqrt(iter_no - self.sample_discard + 1.0)
         self.nu2 = exp(log(self.nu2) + learn_scale * (exp(step_output.log_ratio) - self.accstar))
Beispiel #6
0
def ciMultipleRunsParallel(M, N, k):
    sumW = 0
    sumW2 = 0

    numCores = multiprocessing.cpu_count()

    resultsAllWs = Parallel(n_jobs=numCores)(
        delayed(simulateLindleyEfficient)(lam, mu, N) for _ in range(M))
    # compute the mean and variance
    for i in range(M):
        sim = resultsAllWs[i]
        meanWrun = mean(sim[k:N])
        sumW += meanWrun
        sumW2 += meanWrun**2
    meanW = sumW / M
    varW = (sumW2 - sumW**2 / M) / (M - 1)
    ci = meanW - 1.96 * sqrt(varW / M), meanW + 1.96 * sqrt(varW / M)
    return ci
def distance(centroid, observation):
    dists = []
    for i in range(0, len(centroid)):
        #        if i == 1:
        #            dists.append(pow(absolute(int(centroid[i]) - int(observation[i])),2))
        #        else:
        if i != 1:
            dists.append(pow(Levenshtein.distance(centroid[i], observation[i]), 2))

    return sqrt(sum(dists))
Beispiel #8
0
 def _distancePointToLineSegment(self, a, b, p):
   '''
   Returns tuple of minimum distance to the directed line segment AB from p, and the distance along AB of the point of intersection
   '''  
   ab_mag2 = dot((b-a),(b-a))
   pa_mag2 = dot((a-p),(a-p))
   pb_mag2 = dot((b-p),(b-p))
   if pa_mag2 + ab_mag2 <= pb_mag2:
     return (sqrt(pa_mag2),0)
   elif pb_mag2 + ab_mag2 <= pa_mag2:
     return (sqrt(pb_mag2), sqrt(ab_mag2))
   else:
     c = cross((b-a),(p-a))
     if ab_mag2 == 0:
       raise ValueError, 'Division by zero magnitude line segment AB'
     dist_to_line2 = dot(c,c)/ab_mag2
     dist_to_line = sqrt(dist_to_line2)
     dist_along_segment = sqrt(pa_mag2 - dist_to_line2)
     return (dist_to_line, dist_along_segment)
Beispiel #9
0
def create_gaussian_kernel(gaussian_kernel_sigma = 1.5, gaussian_kernel_width = 11):
    # 1D Gaussian kernel definition
    gaussian_kernel = np.ndarray((gaussian_kernel_width))
    mu = int(gaussian_kernel_width / 2)

    #Fill Gaussian kernel
    for i in range(gaussian_kernel_width):
            gaussian_kernel[i] = (1 / (sqrt(2 * pi) * (gaussian_kernel_sigma))) * \
                exp(-(((i - mu) ** 2)) / (2 * (gaussian_kernel_sigma ** 2)))

    return gaussian_kernel
 def construct_proposal(self, y):
     assert(len(shape(y)) == 1)
     m = MixtureDistribution(self.distribution.dimension, self.num_eigen)
     m.mixing_proportion = Discrete((self.eigvalues + 1) / (sum(self.eigvalues) + self.num_eigen))
     # print "current mixing proportion: ", m.mixing_proportion.omega
     for ii in range(self.num_eigen):
         L = sqrt(self.dwscale[ii] * self.eigvalues[ii]) * reshape(self.eigvectors[:, ii], (self.distribution.dimension, 1))
         m.components[ii] = Gaussian(y, L, is_cholesky=True, ell=1)
     # Z=m.sample(1000).samples
     # Visualise.plot_data(Z)
     return m
    def adapt(self, mcmc_chain, step_output):
        # this is an extension of the base adapt call
        KameleonWindow.adapt(self, mcmc_chain, step_output)

        iter_no = mcmc_chain.iteration

        if iter_no > self.sample_discard and iter_no < self.stop_adapt:
            learn_scale = 1.0 / sqrt(iter_no - self.sample_discard + 1.0)
            self.nu2 = exp(
                log(self.nu2) + learn_scale *
                (exp(step_output.log_ratio) - self.accstar))
 def adapt(self, mcmc_chain, step_output):
     iter_no = mcmc_chain.iteration
     if iter_no > self.sample_discard:
         # 1./number_of_iterations (discard some samples in the beginning)
         learn_scale=1.0 / sqrt(iter_no - self.sample_discard + 1.0)
         #print "current learning rate: ", learn_scale
         if self.adapt_scale:
             self.scale_adapt(learn_scale,step_output)
         if iter_no % self.sample_lag == 0:
             self.mean_and_cov_adapt(learn_scale)
             self.eigen_adapt()
Beispiel #13
0
 def _grad(self, w):
     self._compute_pls(w)
     grad = numpy.zeros(len(self.mrf.formulas), numpy.float64)        
     for fidx, varval in self._stat.items():
         for varidx, counts in varval.items():
             evidx = self.mrf.variable(varidx).evidence_value_index()
             g = counts[evidx]
             for i, val in enumerate(counts):
                 g -= val * self._pls[varidx][i]
             grad[fidx] += g
     self.grad_opt_norm = sqrt(float(fsum([x * x for x in grad])))
     return numpy.array(grad)
Beispiel #14
0
def get_gaussian_kernel(gaussian_kernel_width=11, gaussian_kernel_sigma=1.5):
    """Generate a gaussian kernel."""
    # 1D Gaussian kernel definition
    gaussian_kernel_1d = numpy.ndarray((gaussian_kernel_width))
    norm_mu = int(gaussian_kernel_width / 2)

    # Fill Gaussian kernel
    for i in range(gaussian_kernel_width):
        gaussian_kernel_1d[i] = (
            (1 / (sqrt(2 * pi) * (gaussian_kernel_sigma))) *
            exp(-(((i - norm_mu) ** 2)) / (2 * (gaussian_kernel_sigma ** 2))))
    return gaussian_kernel_1d
Beispiel #15
0
def get_gaussian_kernel(gaussian_kernel_width=11, gaussian_kernel_sigma=1.5):
    """Generate a gaussian kernel."""
    # 1D Gaussian kernel definition
    gaussian_kernel_1d = numpy.ndarray((gaussian_kernel_width))
    norm_mu = int(gaussian_kernel_width / 2)

    # Fill Gaussian kernel
    for i in range(gaussian_kernel_width):
        gaussian_kernel_1d[i] = (1 / (sqrt(2 * pi) * (gaussian_kernel_sigma))) * exp(
            -(((i - norm_mu) ** 2)) / (2 * (gaussian_kernel_sigma ** 2))
        )
    return gaussian_kernel_1d
Beispiel #16
0
def ciRegenerative(N):
    sim = simulateLindleyEfficient(lam, mu, N)
    # Now we're going to split the simulation results vector every time we encounter
    # an ampty system (i.e. a waiting time of zero)
    idx = where(sim == 0)[0]  # the positions of the zeros
    sa = split(sim, idx)  # split the list into sub-lists
    Yi = [sum(x) for x in sa]  # the sum of the waiting times in each sub-list
    Ni = [len(x) for x in sa]  # the number of waiting times in each sub-list
    M = len(Yi)  # the number of sub-lists
    Yavg = mean(Yi)  # The average of the sums of the waiting times
    Navg = mean(Ni)  # the mean number of waiting times of the sub-lists
    Wavg = Yavg / Navg  # The overall mean waiting time

    cv = cov(
        Yi, Ni
    )[0,
      1]  # sample covariance is element at (0, 1) or (1, 0) of the covariance matrix
    sV2 = var(Yi) + Wavg**2 * var(Ni) - 2 * Wavg * cv
    print(sV2)
    ci = Wavg - 1.96 * sqrt(sV2 / M) / Navg, Wavg + 1.96 * sqrt(sV2 / M) / Navg
    return (ci)
Beispiel #17
0
def create_gaussian_kernel(gaussian_kernel_sigma=1.5,
                           gaussian_kernel_width=11):
    # 1D Gaussian kernel definition
    gaussian_kernel = np.ndarray((gaussian_kernel_width))
    mu = int(gaussian_kernel_width / 2)

    #Fill Gaussian kernel
    for i in range(gaussian_kernel_width):
        gaussian_kernel[i] = (1 / (sqrt(2 * pi) * (gaussian_kernel_sigma))) * \
            exp(-(((i - mu) ** 2)) / (2 * (gaussian_kernel_sigma ** 2)))

    return gaussian_kernel
Beispiel #18
0
 def _grad(self, w, **params):    
     if self.current_wts is None or not list(w) != self.current_wts:
         self.current_wts = w
         self.probs = self._compute_probs(w)
     grad = numpy.zeros(len(w))
     for fidx, partitions in self._stat.items():
         for part, values in partitions.items():
             v = values[self.evidx[part]]
             for i, val in enumerate(values):
                 v -= self.probs[part][i] * val
             grad[fidx] += v
     self.grad_opt_norm = sqrt(float(fsum([x * x for x in grad])))
     return numpy.array(grad)
Beispiel #19
0
 def _grad(self, w, **params):        
     self._compute_pls(w)
     grad = numpy.zeros(len(self.mrf.formulas), numpy.float64)        
     for fidx, varval in self._stat.iteritems():
         for varidx, counts in varval.iteritems():
             if self.mrf.variable(varidx).predicate.name in self.epreds: continue
             evidx = self.mrf.variable(varidx).evidence_value_index()
             g = counts[evidx]
             for i, val in enumerate(counts):
                 g -= val * self._pls[varidx][i]
             grad[fidx] += g
     self.grad_opt_norm = float(sqrt(fsum(map(lambda x: x * x, grad))))
     return numpy.array(grad)
Beispiel #20
0
def serie_std(serie):
    serie_mean = mean(serie)
    serie_std = []
    std = 0.0
    for value in serie:
        std += pow(value - serie_mean, 2)
    std = sqrt(std/(len(serie)-1))
    for value in serie:
        if std == 0.0:
            serie_std.append(0.0)
        else:
            serie_std.append((value-serie_mean)/std)
    return serie_std
Beispiel #21
0
 def setMeasure(self, power, dt):
     if power<0:
         self.Power = 0
     else:
         self.Power = sqrt(power)
     #Omega & Alpha
     self.computeOmega(dt)
     #thrust
     self.computeThrust()
     #tau_d
     self.computeTauD()
     #tau_z
     self.computeTauZ()
    def get_gaussian(self, f=None, L=None):
        if f is None or L is None:
            f, L, _ = self.find_mode_newton(return_full=True)

        w = -self.gp.likelihood.log_lik_hessian_vector(self.gp.y, f)
        w_sqrt = sqrt(w)
        K = self.gp.K

        # gp book 3.27, matrix inversion lemma on
        # (K^-1 +W)^-1 = K -KW^0.5 B^-1 W^0.5 K
        C = (K.T * w_sqrt).T
        C = solve_triangular(L, C, lower=True)
        C = solve_triangular(L.T, C, lower=False)
        C = (C.T * w_sqrt).T
        C = K.dot(C)
        C = K - C

        return Gaussian(f, C, is_cholesky=False)
    def get_gaussian(self, f=None, L=None):
        if f is None or L is None:
            f, L, _ = self.find_mode_newton(return_full=True)

        w = -self.gp.likelihood.log_lik_hessian_vector(self.gp.y, f)
        w_sqrt = sqrt(w)
        K = self.gp.K

        # gp book 3.27, matrix inversion lemma on
        # (K^-1 +W)^-1 = K -KW^0.5 B^-1 W^0.5 K
        C = (K.T * w_sqrt).T
        C = solve_triangular(L, C, lower=True)
        C = solve_triangular(L.T, C, lower=False)
        C = (C.T * w_sqrt).T
        C = K.dot(C)
        C = K - C

        return Gaussian(f, C, is_cholesky=False)
Beispiel #24
0
 def intersection (self, obj):
     k = 0.0
     if obj.radius > 0.0: #sphere
         o2e = vector3.sub_vec(self.point, obj.point)
         half_b = vector3.dot_vec(o2e, self.direction)
         c = vector3.dot_vec(o2e,o2e) - obj.radius*obj.radius
         if half_b*half_b - c >= 0:
             k =  -half_b - sqrt(half_b*half_b - c)
         else :
             k = 1.0e8
     else : #face
         o2e = vector3.sub_vec(self.point, obj.point)
         b = vector3.dot_vec(self.direction,obj.normal)
         if abs(b) > 1.0e-3 :
             k = -(vector3.dot_vec(o2e,obj.normal)) / b
         else :
             k = 1.0e8
     if k<0 : k = 1.0e8
     return vector3.mul_vec(self.direction, k)
Beispiel #25
0
def fieldpoint(i, t, h, d, w):
    '''Calculates a point in the field line.
    Note that there is no time dependence.
    '''
    # Determine the next position.
    o = map(lambda i, d: [i[0] + d[0] * h, i[1] + d[1] * h], i, d)

    # Determine the magnetic field at the new position.
    bx, by, bz = bfield(o[0][0], o[1][0], o[2][0], w)
    b = sqrt(bx**2 + by**2 + bz**2)

    # Avoid divide by zero errors
    if b == 0:
        # It's probably [[0, 0], [0, 0], [0, 0]]
        # But due to floating point error, it might not actually be.
        return [[bx, 0], [by, 0], [bz, 0]]

    # Divide by magnetic field strength to arrive at the gradient.
    return [[bx / b, 0], [by / b, 0], [bz / b, 0]]
Beispiel #26
0
 def _calculatePointResiduals(self, curve, tube_radius = None):
   if tube_radius is None:
     X = self._X
   else:
     within_tube_indices = self.calculateCoverageIndices(curve, tube_radius)
     X = self._X.take(list(within_tube_indices), axis = 0) 
     
   if self._maxSegmentLength is None:
     self._maxSegmentLength = self._calculateMaxSegmentLength(curve)
   lpc_points = curve['save_xd']
   num_lpc_points = len(lpc_points)
   tree_lpc_points = KDTree(lpc_points)
   residuals = empty(len(X))
   residuals_lamb = empty(len(X))
   path_length = curve['lamb']
   
   for j, p in enumerate(X): 
     closest_lpc_point = tree_lpc_points.query(p)
     candidate_radius = sqrt(closest_lpc_point[0]**2 + 0.25*self._maxSegmentLength**2)
     candidate_segment_ends = tree_lpc_points.query_ball_point(p, candidate_radius)
     candidate_segment_ends.sort()
     
     current_min_segment_dist = (closest_lpc_point[0],0)
     current_closest_index = closest_lpc_point[1]
     last_index = None
     for i, index in enumerate(candidate_segment_ends):
       if index!=0 and last_index != index - 1:
         prv_segment_dist = self._distancePointToLineSegment(lpc_points[index-1], lpc_points[index], p)
         if prv_segment_dist[0] < current_min_segment_dist[0]:
           current_min_segment_dist = prv_segment_dist
           current_closest_index = index - 1
       if index !=  num_lpc_points - 1:  
         prv_segment_dist = self._distancePointToLineSegment(lpc_points[index], lpc_points[index+1], p)
         if prv_segment_dist[0] < current_min_segment_dist[0]:
           current_min_segment_dist = prv_segment_dist
           current_closest_index = index
       last_index = index
     residuals[j] = current_min_segment_dist[0]
     residuals_lamb[j] = path_length[current_closest_index] + current_min_segment_dist[1]
   lamb_order = argsort(residuals_lamb)
   return (residuals_lamb[lamb_order], residuals[lamb_order])
    def predict(self, X_test, f_mode=None):
        """
        Predictions for GP with Laplace approximation.
        
        from GP book, algorithm 3.2,
        
        """
        if f_mode is None:
            f_mode = self.find_mode_newton()

        predictions = zeros(len(X_test))

        K = self.gp.K
        K_train_test = self.gp.covariance.compute(self.gp.X, X_test)

        w = -self.gp.likelihood.log_lik_hessian_vector(self.gp.y, f_mode)
        w_sqrt = sqrt(w)

        # diag(w_sqrt).dot(K.dot(diag(w_sqrt))) == (K.T*w_sqrt).T*w_sqrt
        L = cholesky(eye(len(K)) + (K.T * w_sqrt).T * w_sqrt)

        # iterator for all testing points
        for i in range(len(X_test)):
            k = K_train_test[:, i]
            k_self = self.gp.covariance.compute([X_test[i]], [X_test[i]])[0]

            f_mean = k.dot(
                self.gp.likelihood.log_lik_grad_vector(self.gp.y, f_mode))
            v = solve_triangular(L, w_sqrt * k, lower=True)
            f_var = k_self - v.T.dot(v)

            predictions[i] = integrate.quad(
                lambda x: norm.pdf(x, f_mean, f_var), -inf, inf)[0]
#            # integrate over Gaussian using some crude numerical integration
#            samples=randn(1000)*sqrt(f_var) + f_mean
#
#            log_liks=self.gp.likelihood.log_lik_vector(1.0, samples)
#            predictions[i]=1.0/len(samples)*GPTools.log_sum_exp(log_liks)

        return predictions
    def predict(self, X_test, f_mode=None):
        """
        Predictions for GP with Laplace approximation.
        
        from GP book, algorithm 3.2,
        
        """
        if f_mode is None:
            f_mode = self.find_mode_newton()

        predictions = zeros(len(X_test))

        K = self.gp.K
        K_train_test = self.gp.covariance.compute(self.gp.X, X_test)

        w = -self.gp.likelihood.log_lik_hessian_vector(self.gp.y, f_mode)
        w_sqrt = sqrt(w)

        # diag(w_sqrt).dot(K.dot(diag(w_sqrt))) == (K.T*w_sqrt).T*w_sqrt
        L = cholesky(eye(len(K)) + (K.T * w_sqrt).T * w_sqrt)

        # iterator for all testing points
        for i in range(len(X_test)):
            k = K_train_test[:, i]
            k_self = self.gp.covariance.compute([X_test[i]], [X_test[i]])[0]

            f_mean = k.dot(self.gp.likelihood.log_lik_grad_vector(self.gp.y, f_mode))
            v = solve_triangular(L, w_sqrt * k, lower=True)
            f_var = k_self - v.T.dot(v)

            predictions[i] = integrate.quad(lambda x: norm.pdf(x, f_mean, f_var), -inf, inf)[0]
        #            # integrate over Gaussian using some crude numerical integration
        #            samples=randn(1000)*sqrt(f_var) + f_mean
        #
        #            log_liks=self.gp.likelihood.log_lik_vector(1.0, samples)
        #            predictions[i]=1.0/len(samples)*GPTools.log_sum_exp(log_liks)

        return predictions
 def update(self, mcmc_chain, step_output):
     if mcmc_chain.iteration > self.plot_from and mcmc_chain.iteration % self.lag == 0:
         
         # plot "traces"
         num_plots = mcmc_chain.mcmc_sampler.distribution.dimension
         samples = mcmc_chain.samples[0:mcmc_chain.iteration]
         likelihoods = mcmc_chain.log_liks[0:mcmc_chain.iteration]
         num_y = round(sqrt(num_plots))
         num_x = num_plots / num_y + 1
         for i in range(num_plots):
             subplot(num_y, num_x, i + 1)
             plot(samples[:, i], 'b.')
             ylim([-0.2, 1.2])
             title("Trace $x_" + str(i) + "$. Mean: %f" % mean(samples[:, i]))
             
         subplot(num_y, num_x, num_plots + 1)
         plot(likelihoods)
         title("Log-Likelihood")
             
         suptitle(mcmc_chain.mcmc_sampler.__class__.__name__)
         show()
         draw()
         clf()
Beispiel #30
0
def bfield_segment(x, y, z, lx0, ly0, lz0, lx1, ly1, lz1):
    '''Calculates magnetic field using Biot-Savart'''
    # Midpoint of the line segment
    mx = (lx0 + lx1) / 2
    my = (ly0 + ly1) / 2
    mz = (lz0 + lz1) / 2

    # Distance from the midpoint of the line segment to the point
    r = sqrt((x - mx)**2 + (y - my)**2 + (z - mz)**2)

    # Check for divide by zero!
    if r == 0:
        return 0, 0, 0

    # Multiply by constants.
    c = k * I * r**(-3)

    # Algebraic cross product
    dbx = c * ((ly1 - ly0) * (z - mz) - (y - my) * (lz1 - lz0))
    dby = -c * ((lx1 - lx0) * (z - mz) - (x - mx) * (lz1 - lz0))
    dbz = c * ((lx1 - lx0) * (y - my) - (x - mx) * (ly1 - ly0))

    return dbx, dby, dbz
    def update(self, mcmc_chain, step_output):
        if mcmc_chain.iteration > self.plot_from and mcmc_chain.iteration % self.lag == 0:

            # plot "traces"
            num_plots = mcmc_chain.mcmc_sampler.distribution.dimension
            samples = mcmc_chain.samples[0:mcmc_chain.iteration]
            likelihoods = mcmc_chain.log_liks[0:mcmc_chain.iteration]
            num_y = round(sqrt(num_plots))
            num_x = num_plots / num_y + 1
            for i in range(num_plots):
                subplot(num_y, num_x, i + 1)
                plot(samples[:, i], 'b.')
                ylim([-0.2, 1.2])
                title("Trace $x_" + str(i) +
                      "$. Mean: %f" % mean(samples[:, i]))

            subplot(num_y, num_x, num_plots + 1)
            plot(likelihoods)
            title("Log-Likelihood")

            suptitle(mcmc_chain.mcmc_sampler.__class__.__name__)
            show()
            draw()
            clf()
    def __process_results__(self):
        lines = []
        if len(self.experiments) == 0:
            lines.append("no experiments to process")
            return
        
        # burnin is the same for all chains
        burnin = self.experiments[0].mcmc_chain.mcmc_params.burnin
        
        quantiles = zeros((len(self.experiments), len(self.ref_quantiles)))
        norm_of_means = zeros(len(self.experiments))
        acceptance_rates = zeros(len(self.experiments))
#         ess_0 = zeros(len(self.experiments))
#         ess_1 = zeros(len(self.experiments))
#         ess_minima = zeros(len(self.experiments))
#         ess_medians = zeros(len(self.experiments))
#         ess_maxima = zeros(len(self.experiments))
        times = zeros(len(self.experiments))
        
        for i in range(len(self.experiments)):
            burned_in = self.experiments[i].mcmc_chain.samples[burnin:, :]
            
            # use precomputed quantiles if they match with the provided ones
            if hasattr(self.experiments[i], "ref_quantiles") and \
               hasattr(self.experiments[i], "quantiles") and \
               allclose(self.ref_quantiles, self.experiments[i].ref_quantiles):
                quantiles[i, :] = self.experiments[i].quantiles
            else:
                try:
                    quantiles[i, :] = self.experiments[i].mcmc_chain.mcmc_sampler.distribution.emp_quantiles(\
                                      burned_in, self.ref_quantiles)
                except NotImplementedError:
                    print "skipping quantile computations, distribution does", \
                          "not support it."
            
            # quantiles should be about average error rather than average quantile
            quantiles[i,:]=abs(quantiles[i,:]-self.ref_quantiles)
            
            dim = self.experiments[i].mcmc_chain.mcmc_sampler.distribution.dimension
            norm_of_means[i] = norm(mean(burned_in, 0))
            acceptance_rates[i] = mean(self.experiments[i].mcmc_chain.accepteds[burnin:])
            
            # dump burned in samples to disc
            # sample_filename=self.experiments[0].experiment_dir + self.experiments[0].name + "_burned_in.txt"
            # savetxt(sample_filename, burned_in)
            
            # store minimum ess for every experiment
            #ess_per_covariate = asarray([RCodaTools.ess_coda(burned_in[:, cov_idx]) for cov_idx in range(dim)])
#             ess_per_covariate = asarray([0 for _ in range(dim)])
#             ess_0=ess_per_covariate[0]
#             ess_1=ess_per_covariate[1]
#             ess_minima[i] = min(ess_per_covariate)
#             ess_medians[i] = median(ess_per_covariate)
#             ess_maxima[i] = max(ess_per_covariate)
            
            # save chain time needed
            ellapsed = self.experiments[i].mcmc_chain.mcmc_outputs[0].times
            times[i] = int(round(sum(ellapsed)))

        mean_quantiles = mean(quantiles, 0)
        std_quantiles = std(quantiles, 0)
        
        sqrt_num_trials=sqrt(len(self.experiments))
        
        # print median kernel width sigma
        #sigma=GaussianKernel.get_sigma_median_heuristic(burned_in.T)
        #lines.append("median kernel sigma: "+str(sigma))
        
        lines.append("quantiles:")
        for i in range(len(self.ref_quantiles)):
            lines.append(str(mean_quantiles[i]) + " +- " + str(std_quantiles[i]/sqrt_num_trials))
        
        lines.append("norm of means:")
        lines.append(str(mean(norm_of_means)) + " +- " + str(std(norm_of_means)/sqrt_num_trials))
        
        lines.append("acceptance rate:")
        lines.append(str(mean(acceptance_rates)) + " +- " + str(std(acceptance_rates)/sqrt_num_trials))
        
#         lines.append("ess dimension 0:")
#         lines.append(str(mean(ess_0)) + " +- " + str(std(ess_0)/sqrt_num_trials))
#         
#         lines.append("ess dimension 1:")
#         lines.append(str(mean(ess_1)) + " +- " + str(std(ess_1)/sqrt_num_trials))
#         
#         lines.append("minimum ess:")
#         lines.append(str(mean(ess_minima)) + " +- " + str(std(ess_minima)/sqrt_num_trials))
#         
#         lines.append("median ess:")
#         lines.append(str(mean(ess_medians)) + " +- " + str(std(ess_medians)/sqrt_num_trials))
#         
#         lines.append("maximum ess:")
#         lines.append(str(mean(ess_maxima)) + " +- " + str(std(ess_maxima)/sqrt_num_trials))
        
        lines.append("times:")
        lines.append(str(mean(times)) + " +- " + str(std(times)/sqrt_num_trials))
        
        # mean as a function of iterations, normalised by time
        step = round((self.experiments[0].mcmc_chain.mcmc_params.num_iterations - burnin)/5)
        iterations = arange(self.experiments[0].mcmc_chain.mcmc_params.num_iterations - burnin, step=step)
        
        running_means = zeros(len(iterations))
        running_errors = zeros(len(iterations))
        for i in arange(len(iterations)):
            # norm of mean of chain up 
            norm_of_means_yet = zeros(len(self.experiments))
            for j in range(len(self.experiments)):
                samples_yet = self.experiments[j].mcmc_chain.samples[burnin:(burnin + iterations[i] + 1 + step), :]
                norm_of_means_yet[j] = norm(mean(samples_yet, 0))
            
            running_means[i] = mean(norm_of_means_yet)
            error_level = 1.96
            running_errors[i] = error_level * std(norm_of_means_yet) / sqrt(len(norm_of_means_yet))
        
        ioff()
        figure()
        plot(iterations, running_means*mean(times))
        fill_between(iterations, (running_means - running_errors)*mean(times), \
                     (running_means + running_errors)*mean(times), hold=True, color="gray")
        
        # make sure path to save exists
        try:
            os.makedirs(self.experiments[0].experiment_dir)
        except OSError as exception:
            if exception.errno != errno.EEXIST:
                raise
        
        savefig(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean.png")
        close()
        
        # also store plot X and Y
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_X.txt", \
                iterations)
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_Y.txt", \
                running_means*mean(times))
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_errors.txt", \
                running_errors*mean(times))
        
        # dont produce quantile convergence plots here for now
        """# quantile convergence of a single one
        desired_quantile=0.5
        running_quantiles=zeros(len(iterations))
        running_quantile_errors=zeros(len(iterations))
        for i in arange(len(iterations)):
            quantiles_yet = zeros(len(self.experiments))
            for j in range(len(self.experiments)):
                samples_yet = self.experiments[j].mcmc_chain.samples[burnin:(burnin + iterations[i] + 1 + step), :]
                
                # just compute one quantile for now
                quantiles_yet[j]=self.experiments[j].mcmc_chain.mcmc_sampler.distribution.emp_quantiles(samples_yet, \
                                                                                          array([desired_quantile]))
                quantiles_yet[j]=abs(quantiles_yet[j]-desired_quantile)
            running_quantiles[i] = mean(quantiles_yet)
            error_level = 1.96
            running_quantile_errors[i] = error_level * std(quantiles_yet) / sqrt(len(quantiles_yet))
        
        
        ioff()
        figure()
        plot(iterations, running_quantiles*mean(times))
        fill_between(iterations, (running_quantiles - running_quantile_errors)*mean(times), \
                     (running_quantiles + running_quantile_errors)*mean(times), hold=True, color="gray")
        
        plot([iterations.min(),iterations.max()], [desired_quantile*mean(times) for _ in range(2)])
        
        title(str(desired_quantile)+"-quantile convergence")
        savefig(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile.png")
        close()
        
        # also store plot X and Y
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_X.txt", \
                iterations)
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_Y.txt", \
                running_quantiles*mean(times))
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_errors.txt", \
                running_quantile_errors*mean(times))
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_reference.txt", \
                [desired_quantile*mean(times)])
        """
        # add latex table line
#         latex_lines = []
#         latex_lines.append("Sampler & Acceptance & ESS2 & Norm(mean) & ")
#         for i in range(len(self.ref_quantiles)):
#             latex_lines.append('%.1f' % self.ref_quantiles[i] + "-quantile")
#             if i < len(self.ref_quantiles) - 1:
#                 latex_lines.append(" & ")
#         latex_lines.append("\\\\")
#         lines.append("".join(latex_lines))
#         
#         latex_lines = []
#         latex_lines.append(self.experiments[0].mcmc_chain.mcmc_sampler.__class__.__name__)
#         latex_lines.append('$%.3f' % mean(acceptance_rates) + " \pm " + '%.3f$' % (std(acceptance_rates)/sqrt_num_trials))
#         latex_lines.append('$%.3f' % mean(norm_of_means) + " \pm " + '%.3f$' % (std(norm_of_means)/sqrt_num_trials))
#         for i in range(len(self.ref_quantiles)):
#             latex_lines.append('$%.3f' % mean_quantiles[i] + " \pm " + '%.3f$' % (std_quantiles[i]/sqrt_num_trials))
#         
#         
#         lines.append(" & ".join(latex_lines) + "\\\\")
        
        return lines
Beispiel #33
0
    def __process_results__(self):
        lines = []
        if len(self.experiments) == 0:
            lines.append("no experiments to process")
            return

        # burnin is the same for all chains
        burnin = self.experiments[0].mcmc_chain.mcmc_params.burnin

        quantiles = zeros((len(self.experiments), len(self.ref_quantiles)))
        norm_of_means = zeros(len(self.experiments))
        acceptance_rates = zeros(len(self.experiments))
        #         ess_0 = zeros(len(self.experiments))
        #         ess_1 = zeros(len(self.experiments))
        #         ess_minima = zeros(len(self.experiments))
        #         ess_medians = zeros(len(self.experiments))
        #         ess_maxima = zeros(len(self.experiments))
        times = zeros(len(self.experiments))

        for i in range(len(self.experiments)):
            burned_in = self.experiments[i].mcmc_chain.samples[burnin:, :]

            # use precomputed quantiles if they match with the provided ones
            if hasattr(self.experiments[i], "ref_quantiles") and \
               hasattr(self.experiments[i], "quantiles") and \
               allclose(self.ref_quantiles, self.experiments[i].ref_quantiles):
                quantiles[i, :] = self.experiments[i].quantiles
            else:
                try:
                    quantiles[i, :] = self.experiments[i].mcmc_chain.mcmc_sampler.distribution.emp_quantiles(\
                                      burned_in, self.ref_quantiles)
                except NotImplementedError:
                    print "skipping quantile computations, distribution does", \
                          "not support it."

            # quantiles should be about average error rather than average quantile
            quantiles[i, :] = abs(quantiles[i, :] - self.ref_quantiles)

            dim = self.experiments[
                i].mcmc_chain.mcmc_sampler.distribution.dimension
            norm_of_means[i] = norm(mean(burned_in, 0))
            acceptance_rates[i] = mean(
                self.experiments[i].mcmc_chain.accepteds[burnin:])

            # dump burned in samples to disc
            # sample_filename=self.experiments[0].experiment_dir + self.experiments[0].name + "_burned_in.txt"
            # savetxt(sample_filename, burned_in)

            # store minimum ess for every experiment
            #ess_per_covariate = asarray([RCodaTools.ess_coda(burned_in[:, cov_idx]) for cov_idx in range(dim)])
            #             ess_per_covariate = asarray([0 for _ in range(dim)])
            #             ess_0=ess_per_covariate[0]
            #             ess_1=ess_per_covariate[1]
            #             ess_minima[i] = min(ess_per_covariate)
            #             ess_medians[i] = median(ess_per_covariate)
            #             ess_maxima[i] = max(ess_per_covariate)

            # save chain time needed
            ellapsed = self.experiments[i].mcmc_chain.mcmc_outputs[0].times
            times[i] = int(round(sum(ellapsed)))

        mean_quantiles = mean(quantiles, 0)
        std_quantiles = std(quantiles, 0)

        sqrt_num_trials = sqrt(len(self.experiments))

        # print median kernel width sigma
        #sigma=GaussianKernel.get_sigma_median_heuristic(burned_in.T)
        #lines.append("median kernel sigma: "+str(sigma))

        lines.append("quantiles:")
        for i in range(len(self.ref_quantiles)):
            lines.append(
                str(mean_quantiles[i]) + " +- " +
                str(std_quantiles[i] / sqrt_num_trials))

        lines.append("norm of means:")
        lines.append(
            str(mean(norm_of_means)) + " +- " +
            str(std(norm_of_means) / sqrt_num_trials))

        lines.append("acceptance rate:")
        lines.append(
            str(mean(acceptance_rates)) + " +- " +
            str(std(acceptance_rates) / sqrt_num_trials))

        #         lines.append("ess dimension 0:")
        #         lines.append(str(mean(ess_0)) + " +- " + str(std(ess_0)/sqrt_num_trials))
        #
        #         lines.append("ess dimension 1:")
        #         lines.append(str(mean(ess_1)) + " +- " + str(std(ess_1)/sqrt_num_trials))
        #
        #         lines.append("minimum ess:")
        #         lines.append(str(mean(ess_minima)) + " +- " + str(std(ess_minima)/sqrt_num_trials))
        #
        #         lines.append("median ess:")
        #         lines.append(str(mean(ess_medians)) + " +- " + str(std(ess_medians)/sqrt_num_trials))
        #
        #         lines.append("maximum ess:")
        #         lines.append(str(mean(ess_maxima)) + " +- " + str(std(ess_maxima)/sqrt_num_trials))

        lines.append("times:")
        lines.append(
            str(mean(times)) + " +- " + str(std(times) / sqrt_num_trials))

        # mean as a function of iterations, normalised by time
        step = round(
            (self.experiments[0].mcmc_chain.mcmc_params.num_iterations -
             burnin) / 5)
        iterations = arange(
            self.experiments[0].mcmc_chain.mcmc_params.num_iterations - burnin,
            step=step)

        running_means = zeros(len(iterations))
        running_errors = zeros(len(iterations))
        for i in arange(len(iterations)):
            # norm of mean of chain up
            norm_of_means_yet = zeros(len(self.experiments))
            for j in range(len(self.experiments)):
                samples_yet = self.experiments[j].mcmc_chain.samples[burnin:(
                    burnin + iterations[i] + 1 + step), :]
                norm_of_means_yet[j] = norm(mean(samples_yet, 0))

            running_means[i] = mean(norm_of_means_yet)
            error_level = 1.96
            running_errors[i] = error_level * std(norm_of_means_yet) / sqrt(
                len(norm_of_means_yet))

        ioff()
        figure()
        plot(iterations, running_means * mean(times))
        fill_between(iterations, (running_means - running_errors)*mean(times), \
                     (running_means + running_errors)*mean(times), hold=True, color="gray")

        # make sure path to save exists
        try:
            os.makedirs(self.experiments[0].experiment_dir)
        except OSError as exception:
            if exception.errno != errno.EEXIST:
                raise

        savefig(self.experiments[0].experiment_dir + self.experiments[0].name +
                "_running_mean.png")
        close()

        # also store plot X and Y
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_X.txt", \
                iterations)
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_Y.txt", \
                running_means*mean(times))
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_errors.txt", \
                running_errors*mean(times))

        # dont produce quantile convergence plots here for now
        """# quantile convergence of a single one
        desired_quantile=0.5
        running_quantiles=zeros(len(iterations))
        running_quantile_errors=zeros(len(iterations))
        for i in arange(len(iterations)):
            quantiles_yet = zeros(len(self.experiments))
            for j in range(len(self.experiments)):
                samples_yet = self.experiments[j].mcmc_chain.samples[burnin:(burnin + iterations[i] + 1 + step), :]
                
                # just compute one quantile for now
                quantiles_yet[j]=self.experiments[j].mcmc_chain.mcmc_sampler.distribution.emp_quantiles(samples_yet, \
                                                                                          array([desired_quantile]))
                quantiles_yet[j]=abs(quantiles_yet[j]-desired_quantile)
            running_quantiles[i] = mean(quantiles_yet)
            error_level = 1.96
            running_quantile_errors[i] = error_level * std(quantiles_yet) / sqrt(len(quantiles_yet))
        
        
        ioff()
        figure()
        plot(iterations, running_quantiles*mean(times))
        fill_between(iterations, (running_quantiles - running_quantile_errors)*mean(times), \
                     (running_quantiles + running_quantile_errors)*mean(times), hold=True, color="gray")
        
        plot([iterations.min(),iterations.max()], [desired_quantile*mean(times) for _ in range(2)])
        
        title(str(desired_quantile)+"-quantile convergence")
        savefig(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile.png")
        close()
        
        # also store plot X and Y
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_X.txt", \
                iterations)
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_Y.txt", \
                running_quantiles*mean(times))
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_errors.txt", \
                running_quantile_errors*mean(times))
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_reference.txt", \
                [desired_quantile*mean(times)])
        """
        # add latex table line
        #         latex_lines = []
        #         latex_lines.append("Sampler & Acceptance & ESS2 & Norm(mean) & ")
        #         for i in range(len(self.ref_quantiles)):
        #             latex_lines.append('%.1f' % self.ref_quantiles[i] + "-quantile")
        #             if i < len(self.ref_quantiles) - 1:
        #                 latex_lines.append(" & ")
        #         latex_lines.append("\\\\")
        #         lines.append("".join(latex_lines))
        #
        #         latex_lines = []
        #         latex_lines.append(self.experiments[0].mcmc_chain.mcmc_sampler.__class__.__name__)
        #         latex_lines.append('$%.3f' % mean(acceptance_rates) + " \pm " + '%.3f$' % (std(acceptance_rates)/sqrt_num_trials))
        #         latex_lines.append('$%.3f' % mean(norm_of_means) + " \pm " + '%.3f$' % (std(norm_of_means)/sqrt_num_trials))
        #         for i in range(len(self.ref_quantiles)):
        #             latex_lines.append('$%.3f' % mean_quantiles[i] + " \pm " + '%.3f$' % (std_quantiles[i]/sqrt_num_trials))
        #
        #
        #         lines.append(" & ".join(latex_lines) + "\\\\")

        return lines
Beispiel #34
0
 def computeK(self):
     self.K = pow(self.K_v*self.K_tau*sqrt(2*self.Rho*self.A_swept)/self.K_t, 2)
Beispiel #35
0
def compute_ssim(im1, im2, gaussian_kernel_sigma=1.5, gaussian_kernel_width=11):
    """
    The function to compute SSIM
    @param im1: PIL Image object
    @param im2: PIL Image object
    @return: SSIM float value
    """

    # 1D Gaussian kernel definition
    gaussian_kernel_1d = numpy.ndarray((gaussian_kernel_width))
    mu = int(gaussian_kernel_width / 2)

    #Fill Gaussian kernel
    for i in xrange(gaussian_kernel_width):
            gaussian_kernel_1d[i] = (1 / (sqrt(2 * pi) * (gaussian_kernel_sigma))) * \
                exp(-(((i - mu) ** 2)) / (2 * (gaussian_kernel_sigma ** 2)))

    # convert the images to grayscale
    img_mat_1, img_alpha_1 = _to_grayscale(im1)
    img_mat_2, img_alpha_2 = _to_grayscale(im2)

    # don't count pixels where both images are both fully transparent
    if img_alpha_1 is not None and img_alpha_2 is not None:
        img_mat_1[img_alpha_1 == 255] = 0
        img_mat_2[img_alpha_2 == 255] = 0

    #Squares of input matrices
    img_mat_1_sq = img_mat_1 ** 2
    img_mat_2_sq = img_mat_2 ** 2
    img_mat_12 = img_mat_1 * img_mat_2

    #Means obtained by Gaussian filtering of inputs
    img_mat_mu_1 = convolve_gaussian_2d(img_mat_1, gaussian_kernel_1d)
    img_mat_mu_2 = convolve_gaussian_2d(img_mat_2, gaussian_kernel_1d)

    #Squares of means
    img_mat_mu_1_sq = img_mat_mu_1 ** 2
    img_mat_mu_2_sq = img_mat_mu_2 ** 2
    img_mat_mu_12 = img_mat_mu_1 * img_mat_mu_2

    #Variances obtained by Gaussian filtering of inputs' squares
    img_mat_sigma_1_sq = convolve_gaussian_2d(img_mat_1_sq, gaussian_kernel_1d)
    img_mat_sigma_2_sq = convolve_gaussian_2d(img_mat_2_sq, gaussian_kernel_1d)

    #Covariance
    img_mat_sigma_12 = convolve_gaussian_2d(img_mat_12, gaussian_kernel_1d)

    #Centered squares of variances
    img_mat_sigma_1_sq -= img_mat_mu_1_sq
    img_mat_sigma_2_sq -= img_mat_mu_2_sq
    img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12

    #set k1,k2 & c1,c2 to depend on L (width of color map)
    l = 255
    k_1 = 0.01
    c_1 = (k_1 * l) ** 2
    k_2 = 0.03
    c_2 = (k_2 * l) ** 2

    #Numerator of SSIM
    num_ssim = (2 * img_mat_mu_12 + c_1) * (2 * img_mat_sigma_12 + c_2)

    #Denominator of SSIM
    den_ssim = (img_mat_mu_1_sq + img_mat_mu_2_sq + c_1) * \
               (img_mat_sigma_1_sq + img_mat_sigma_2_sq + c_2)

    #SSIM
    ssim_map = num_ssim / den_ssim
    index = numpy.average(ssim_map)

    return index
Beispiel #36
0
 def get_sigma_median_heuristic(X):
     dists=squareform(pdist(X, 'euclidean'))
     median_dist=median(dists[dists>0])
     sigma=sqrt(0.5*median_dist)
     return sigma
Beispiel #37
0
    def compute_ssim(self, im1, im2):
        """
        The function to compute SSIM
        @param im1: PIL Image object, or grayscale ndarray
        @param im2: PIL Image object, or grayscale ndarray
        @return: SSIM float value
        """

        # 1D Gaussian kernel definition
        gaussian_kernel_2d = np.ndarray((self.gaussian_kernel_width))
        mu = int(self.gaussian_kernel_width / 2)

        #Fill Gaussian kernel
        for i in xrange(self.gaussian_kernel_width):
            gaussian_kernel_2d[i] = (1 / (sqrt(2 * pi) * (self.gaussian_kernel_sigma))) * \
              exp(-(((i - mu) ** 2)) / (2 * (self.gaussian_kernel_sigma ** 2)))
        gshape = (self.gaussian_kernel_width, self.gaussian_kernel_width)
        gsigma = (self.gaussian_kernel_sigma, self.gaussian_kernel_sigma)
        gaussian_kernel_2d = gaussian(shape=gshape, sigma=gsigma)
        # pdb.set_trace()
        # convert the images to grayscale
        if im1.__class__.__name__ == 'Image':
            img_mat_1, img_alpha_1 = _to_grayscale(im1)
            # don't count pixels where both images are both fully transparent
            #if img_alpha_1 is not None:
            #img_mat_1[img_alpha_1 == 255] = 0
        else:
            img_mat_1 = im1
        if im2.__class__.__name__ == 'Image':
            img_mat_2, img_alpha_2 = _to_grayscale(im2)
            #if img_alpha_2 is not None:
            #img_mat_2[img_alpha_2 == 255] = 0
        else:
            img_mat_2 = im2

        #Squares of input matrices
        img_mat_1_sq = img_mat_1**2
        img_mat_2_sq = img_mat_2**2
        img_mat_12 = img_mat_1 * img_mat_2

        #Means obtained by Gaussian filtering of inputs
        img_mat_mu_1 = self.convolve_gaussian_2d(img_mat_1, gaussian_kernel_2d)
        img_mat_mu_2 = self.convolve_gaussian_2d(img_mat_2, gaussian_kernel_2d)

        #Squares of means
        img_mat_mu_1_sq = img_mat_mu_1**2
        img_mat_mu_2_sq = img_mat_mu_2**2
        img_mat_mu_12 = img_mat_mu_1 * img_mat_mu_2

        #Variances obtained by Gaussian filtering of inputs' squares
        img_mat_sigma_1_sq = self.convolve_gaussian_2d(img_mat_1_sq,
                                                       gaussian_kernel_2d)
        img_mat_sigma_2_sq = self.convolve_gaussian_2d(img_mat_2_sq,
                                                       gaussian_kernel_2d)

        #Covariance
        img_mat_sigma_12 = self.convolve_gaussian_2d(img_mat_12,
                                                     gaussian_kernel_2d)

        #Centered squares of variances
        img_mat_sigma_1_sq -= img_mat_mu_1_sq
        img_mat_sigma_2_sq -= img_mat_mu_2_sq
        img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12

        #set k1,k2 & c1,c2 to depend on L (width of color map)
        #l = 255
        k_1 = self.K[0]
        c_1 = (k_1 * self.L)**2
        k_2 = self.K[1]
        c_2 = (k_2 * self.L)**2

        #Numerator of SSIM
        num_ssim = (2 * img_mat_mu_12 + c_1) * (2 * img_mat_sigma_12 + c_2)

        #Denominator of SSIM
        den_ssim = (img_mat_mu_1_sq + img_mat_mu_2_sq + c_1) * \
          (img_mat_sigma_1_sq + img_mat_sigma_2_sq + c_2)

        #SSIM
        ssim_map = num_ssim / den_ssim
        index = np.average(ssim_map)

        return index
Beispiel #38
0
def write_dispersion_diff_files(path, twiss_cor, twiss_no):
    try:
        twiss_getdx = Python_Classes4MAD.metaclass.twiss(os.path.join(path, 'getDx.out'))
        file_dx = open(os.path.join(path, "dx.out"), "w")
        print >> file_dx, "* NAME S MEA ERROR MODEL"
        print >> file_dx, "$ %s %le %le %le %le"
        for i in range(len(twiss_getdx.NAME)):
            bpm_name = twiss_getdx.NAME[i]
            bpm_included = True
            try:
                check = twiss_cor.NAME[twiss_cor.indx[bpm_name]]  # @UnusedVariable
            except:
                print "No ", bpm_name
                bpm_included = False
            if bpm_included:
                j = twiss_cor.indx[bpm_name]
                print >> file_dx, bpm_name, twiss_getdx.S[i], twiss_getdx.DX[i] - twiss_getdx.DXMDL[i], twiss_getdx.STDDX[i], twiss_cor.DX[j] - twiss_no.DX[j]

        file_dx.close()
    except IOError:
        print "NO dispersion"
    except AttributeError:
        print "Empty table in getDx.out?! NO dispersion"
    try:
        twiss_getdy = Python_Classes4MAD.metaclass.twiss(os.path.join(path, 'getDy.out'))
        file_dy = open(os.path.join(path, "dy.out"), "w")
        print >> file_dy, "* NAME S MEA ERROR MODEL"
        print >> file_dy, "$ %s %le %le %le %le"
        for i in range(len(twiss_getdy.NAME)):
            bpm_name = twiss_getdy.NAME[i]
            bpm_included = True
            try:
                check = twiss_cor.NAME[twiss_cor.indx[bpm_name]]  # @UnusedVariable
            except:
                print "No ", bpm_name
                bpm_included = False
            if bpm_included:
                j = twiss_cor.indx[bpm_name]
                print >> file_dy, bpm_name, twiss_getdy.S[i], twiss_getdy.DY[i] - twiss_getdy.DYMDL[i], twiss_getdy.STDDY[i], twiss_cor.DY[j] - twiss_no.DY[j]

        file_dy.close()
    except IOError:
        print "NO dispersion."
    except AttributeError:
        print "Empty table in getDy.out?! NO dispersion"
    try:
        twiss_getndx = Python_Classes4MAD.metaclass.twiss(os.path.join(path, 'getNDx.out'))
        file_ndx = open(os.path.join(path, "ndx.out"), "w")
        print >> file_ndx, "* NAME S MEA ERROR MODEL"
        print >> file_ndx, "$ %s %le %le %le %le"
        for i in range(len(twiss_getndx.NAME)):
            bpm_name = twiss_getndx.NAME[i]
            bpm_included = True
            try:
                check = twiss_cor.NAME[twiss_cor.indx[bpm_name]]  # @UnusedVariable
            except:
                print "No ", bpm_name
                bpm_included = False
            if bpm_included:
                j = twiss_cor.indx[bpm_name]
                print >> file_ndx, bpm_name, twiss_getndx.S[i], twiss_getndx.NDX[i] - twiss_getndx.NDXMDL[i], twiss_getndx.STDNDX[i], (twiss_cor.DX[j] / sqrt(twiss_cor.BETX[j])) - (twiss_no.DX[j] / sqrt(twiss_no.BETX[j]))

        file_ndx.close()
    except IOError:
        print "NO normalized dispersion"
    except AttributeError:
        print "Empty table in getNDx.out?! NO normalized dispersion"
Beispiel #39
0
    def update(self, mcmc_chain, step_output):
        if mcmc_chain.iteration > self.plot_from and mcmc_chain.iteration%self.lag==0:
            if mcmc_chain.mcmc_sampler.distribution.dimension==2:
                subplot(2, 3, 1)
                if self.distribution is not None:
                    Visualise.plot_array(self.Xs, self.Ys, self.P)
                
                # only plot a number of random samples otherwise this is too slow
                if self.num_samples_plot>0:
                    num_plot=min(mcmc_chain.iteration-1,self.num_samples_plot)
                    indices=permutation(mcmc_chain.iteration)[:num_plot]
                else:
                    num_plot=mcmc_chain.iteration-1
                    indices=arange(num_plot)
                    
                samples=mcmc_chain.samples[0:mcmc_chain.iteration]
                samples_to_plot=mcmc_chain.samples[indices]
                
                # still plot all likelihoods
                likelihoods=mcmc_chain.log_liks[0:mcmc_chain.iteration]
                likelihoods_to_plot=mcmc_chain.log_liks[indices]
                proposal_1d=step_output.proposal_object.samples[0,:]
                
                y = samples[len(samples) - 1]
                
                # plot samples, coloured by likelihood, or just connect
                if self.colour_by_likelihood:
                    likelihoods_to_plot=likelihoods_to_plot.copy()
                    likelihoods_to_plot=likelihoods_to_plot-likelihoods_to_plot.min()
                    likelihoods_to_plot=likelihoods_to_plot/likelihoods_to_plot.max()
                    
                    cm=get_cmap("jet")
                    for i in range(len(samples_to_plot)):
                        color = cm(likelihoods_to_plot[i])
                        plot(samples_to_plot[i,0], samples_to_plot[i,1]  ,"o",
                             color=color, zorder=1)
                else:
                    plot(samples_to_plot[:,0], samples_to_plot[:,1], "m", zorder=1)
                
                plot(y[0], y[1], 'r*', markersize=15.0)
                plot(proposal_1d[0], proposal_1d[1], 'y*', markersize=15.0)
                if self.distribution is not None:
                    Visualise.contour_plot_density(mcmc_chain.mcmc_sampler.Q, self.Xs, \
                                                   self.Ys, log_domain=False)
                else:
                    Visualise.contour_plot_density(mcmc_chain.mcmc_sampler.Q)
#                    axis('equal')
                
                xlabel("$x_1$")
                ylabel("$x_2$")
                if self.num_samples_plot>0:
                    title(str(self.num_samples_plot) + " random samples")
            
                subplot(2, 3, 2)
                plot(samples[:, 0], 'b')
                title("Trace $x_1$")
                
                subplot(2, 3, 3)
                plot(samples[:, 1], 'b')
                title("Trace $x_2$")
                
                subplot(2, 3, 4)
                plot(mcmc_chain.log_liks[0:mcmc_chain.iteration], 'b')
                title("Log-likelihood")
                
                if len(samples) > 2:
                    subplot(2, 3, 5)
                    hist(samples[:, 0])
                    title("Histogram $x_1$")
            
                    subplot(2, 3, 6)
                    hist(samples[:, 1])
                    title("Histogram $x_2$")
            else:
                # if target dimension is not two, plot traces
                num_plots=mcmc_chain.mcmc_sampler.distribution.dimension
                samples=mcmc_chain.samples[0:mcmc_chain.iteration]
                likelihoods=mcmc_chain.log_liks[0:mcmc_chain.iteration]
                num_y=round(sqrt(num_plots))
                num_x=num_plots/num_y+1
                for i in range(num_plots):
                    subplot(num_y, num_x, i+1)
                    plot(samples[:, i], 'b')
                    title("Trace $x_" +str(i) + "$")
                    
                subplot(num_y, num_x, num_plots+1)
                plot(likelihoods)
                title("Log-Likelihood")
                
            suptitle(mcmc_chain.mcmc_sampler.__class__.__name__)
            show()
            draw()
            clf()
Beispiel #40
0
def write_dispersion_diff_files(path, twiss_cor, twiss_no):
    try:
        twiss_getdx = Python_Classes4MAD.metaclass.twiss(
            os.path.join(path, 'getDx.out'))
        file_dx = open(os.path.join(path, "dx.out"), "w")
        print >> file_dx, "* NAME S MEA ERROR MODEL"
        print >> file_dx, "$ %s %le %le %le %le"
        for i in range(len(twiss_getdx.NAME)):
            bpm_name = twiss_getdx.NAME[i]
            bpm_included = True
            try:
                check = twiss_cor.NAME[
                    twiss_cor.indx[bpm_name]]  # @UnusedVariable
            except:
                print "No ", bpm_name
                bpm_included = False
            if bpm_included:
                j = twiss_cor.indx[bpm_name]
                print >> file_dx, bpm_name, twiss_getdx.S[i], twiss_getdx.DX[
                    i] - twiss_getdx.DXMDL[i], twiss_getdx.STDDX[
                        i], twiss_cor.DX[j] - twiss_no.DX[j]

        file_dx.close()
    except IOError:
        print "NO dispersion"
    except AttributeError:
        print "Empty table in getDx.out?! NO dispersion"
    try:
        twiss_getdy = Python_Classes4MAD.metaclass.twiss(
            os.path.join(path, 'getDy.out'))
        file_dy = open(os.path.join(path, "dy.out"), "w")
        print >> file_dy, "* NAME S MEA ERROR MODEL"
        print >> file_dy, "$ %s %le %le %le %le"
        for i in range(len(twiss_getdy.NAME)):
            bpm_name = twiss_getdy.NAME[i]
            bpm_included = True
            try:
                check = twiss_cor.NAME[
                    twiss_cor.indx[bpm_name]]  # @UnusedVariable
            except:
                print "No ", bpm_name
                bpm_included = False
            if bpm_included:
                j = twiss_cor.indx[bpm_name]
                print >> file_dy, bpm_name, twiss_getdy.S[i], twiss_getdy.DY[
                    i] - twiss_getdy.DYMDL[i], twiss_getdy.STDDY[
                        i], twiss_cor.DY[j] - twiss_no.DY[j]

        file_dy.close()
    except IOError:
        print "NO dispersion."
    except AttributeError:
        print "Empty table in getDy.out?! NO dispersion"
    try:
        twiss_getndx = Python_Classes4MAD.metaclass.twiss(
            os.path.join(path, 'getNDx.out'))
        file_ndx = open(os.path.join(path, "ndx.out"), "w")
        print >> file_ndx, "* NAME S MEA ERROR MODEL"
        print >> file_ndx, "$ %s %le %le %le %le"
        for i in range(len(twiss_getndx.NAME)):
            bpm_name = twiss_getndx.NAME[i]
            bpm_included = True
            try:
                check = twiss_cor.NAME[
                    twiss_cor.indx[bpm_name]]  # @UnusedVariable
            except:
                print "No ", bpm_name
                bpm_included = False
            if bpm_included:
                j = twiss_cor.indx[bpm_name]
                print >> file_ndx, bpm_name, twiss_getndx.S[
                    i], twiss_getndx.NDX[i] - twiss_getndx.NDXMDL[
                        i], twiss_getndx.STDNDX[i], (twiss_cor.DX[j] / sqrt(
                            twiss_cor.BETX[j])) - (twiss_no.DX[j] /
                                                   sqrt(twiss_no.BETX[j]))

        file_ndx.close()
    except IOError:
        print "NO normalized dispersion"
    except AttributeError:
        print "Empty table in getNDx.out?! NO normalized dispersion"
def incomplete_cholesky(X, kernel, eta, power=1, blocksize=100):
    """
    Computes the incomplete Cholesky factorisation of the kernel matrix defined
    by samples X and a given kernel. The kernel is evaluated on-the-fly.
    The optional power parameter is used to multiply the kernel output with
    itself.
    
    Original code from "Kernel Methods for Pattern Analysis" by Shawe-Taylor and
    Cristianini.
    Modified to compute kernel on the fly, to use kernels multiplied with 
    themselves (tensor product), and optimised speed via using vector
    operations and not pre-allocate full kernel matrix memory, but rather
    allocate memory of low-rank kernel block-wise
    Changes by Heiko Strathmann
    
    parameters:
    X         - list of input vectors to evaluate kernel on
    kernel    - a kernel object with a kernel method that takes 2d-arrays
                and returns a psd kernel matrix
    eta       - precision cutoff parameter for the low-rank approximation.
                Lies is (0,1) where smaller means more accurate.
    power     - every kernel evaluation is multiplied with itself this number
                of times. Zero is supported
    blocksize - tuning parameter for speed, determines how rows elements are
                allocated in a block for the (growing) kernel matrix. Larger
                means faster algorithm (to some extend if low rank dimension
                is larger than blocksize)
    
    output:
    K_chol, ell, I, R, W, where
    K    - is the kernel using only the pivot index features
    I    - is a list containing the pivots used to compute K_chol
    R    - is a low-rank factor such that R.T.dot(R) approximates the
           original K
    W    - is a matrix such that W.T.dot(K_chol.dot(W)) approximates the
           original K
    
    """
    assert(eta>0 and eta<1)
    assert(power>=0)
    assert(blocksize>=0)
    assert(len(X)>=0)
    
    m=len(X)

    # growing low rank basis
    R=zeros((blocksize,m))
    
    # diagonal (assumed to be one)
    d=ones(m)
    
    # used indices
    I=[]
    nu=[]
    
    # algorithm is executed as long as a is bigger than eta precision
    a=d.max()
    I.append(d.argmax())
    
    # growing set of evaluated kernel values
    K=zeros((blocksize,m))
    
    j=0
    while a>eta:
        nu.append(sqrt(a))
        
        if power>=1:
            K[j,:]=kernel.kernel([X[I[j]]], X)**power
        else:
            K[j,:]=ones(m)
            
        if j==0:
            R_dot_j=0
        elif j==1:
            R_dot_j=R[:j,:]*R[:j,I[j]]
        else:
            R_dot_j=R[:j,:].T.dot(R[:j,I[j]])
                        
        R[j,:]=(K[j,:] - R_dot_j)/nu[j]
        d=d-R[j,:]**2
        a=d.max()
        I.append(d.argmax())
        j=j+1
        
        # allocate more space for kernel
        if j>=len(K):
            K=vstack((K, zeros((blocksize,m))))
            R=vstack((R, zeros((blocksize,m))))
            
    # remove un-used rows which were located unnecessarily
    K=K[:j,:]
    R=R[:j,:]

    # remove list pivot index since it is not used
    I=I[:-1]
    
    # from low rank to full rank
    W=solve(R[:,I], R)
    
    # low rank K
    K_chol=K[:,I]
    
    return K_chol, I, R, W
Beispiel #42
0
def bfield_mag(x, y, z, w):
    '''Magntitude of b field at the given point from the wires.'''
    bx, by, bz = bfield(x, y, z, w)
    return sqrt(bx**2 + by**2 + bz**2)
Beispiel #43
0
def compute_ssim(im1, im2, gaussian_kernel_sigma=1.5, gaussian_kernel_width=11):
    """
    The function to compute SSIM
    @param im1: PIL Image object
    @param im2: PIL Image object
    @return: SSIM float value
    """
    
    # 1D Gaussian kernel definition
    gaussian_kernel_1d = numpy.ndarray((gaussian_kernel_width))
    mu = int(gaussian_kernel_width / 2)

    #Fill Gaussian kernel
    for i in xrange(gaussian_kernel_width):
            gaussian_kernel_1d[i] = (1 / (sqrt(2 * pi) * (gaussian_kernel_sigma))) * \
                exp(-(((i - mu) ** 2)) / (2 * (gaussian_kernel_sigma ** 2)))

    # convert the images to grayscale
    img_mat_1, img_alpha_1 = _to_grayscale(im1)
    img_mat_2, img_alpha_2 = _to_grayscale(im2)
    
    # don't count pixels where both images are both fully transparent
    if img_alpha_1 is not None and img_alpha_2 is not None:
        img_mat_1[img_alpha_1 == 255] = 0
        img_mat_2[img_alpha_2 == 255] = 0
    
    #Squares of input matrices
    img_mat_1_sq = img_mat_1 ** 2
    img_mat_2_sq = img_mat_2 ** 2
    img_mat_12 = img_mat_1 * img_mat_2
    
    #Means obtained by Gaussian filtering of inputs
    img_mat_mu_1 = convolve_gaussian_2d(img_mat_1, gaussian_kernel_1d)
    img_mat_mu_2 = convolve_gaussian_2d(img_mat_2, gaussian_kernel_1d)
    
    #Squares of means
    img_mat_mu_1_sq = img_mat_mu_1 ** 2
    img_mat_mu_2_sq = img_mat_mu_2 ** 2
    img_mat_mu_12 = img_mat_mu_1 * img_mat_mu_2
    
    #Variances obtained by Gaussian filtering of inputs' squares
    img_mat_sigma_1_sq = convolve_gaussian_2d(img_mat_1_sq, gaussian_kernel_1d)
    img_mat_sigma_2_sq = convolve_gaussian_2d(img_mat_2_sq, gaussian_kernel_1d)
    
    #Covariance
    img_mat_sigma_12 = convolve_gaussian_2d(img_mat_12, gaussian_kernel_1d)
    
    #Centered squares of variances
    img_mat_sigma_1_sq -= img_mat_mu_1_sq
    img_mat_sigma_2_sq -= img_mat_mu_2_sq
    img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12
    
    #set k1,k2 & c1,c2 to depend on L (width of color map)
    l = 255
    k_1 = 0.01
    c_1 = (k_1 * l) ** 2
    k_2 = 0.03
    c_2 = (k_2 * l) ** 2
    
    #Numerator of SSIM
    num_ssim = (2 * img_mat_mu_12 + c_1) * (2 * img_mat_sigma_12 + c_2)
    
    #Denominator of SSIM
    den_ssim = (img_mat_mu_1_sq + img_mat_mu_2_sq + c_1) * \
               (img_mat_sigma_1_sq + img_mat_sigma_2_sq + c_2)
    
    #SSIM
    ssim_map = num_ssim / den_ssim
    index = numpy.average(ssim_map)

    return index
Beispiel #44
0
 def norm (obj):
     return sqrt(obj.x*obj.x + obj.y*obj.y + obj.z*obj.z)
Beispiel #45
0
sampler_names_short = ["SM","AM-FS","AM-LS","KAMH-LS"]
sampler_names = ["StandardMetropolis","AdaptiveMetropolis","AdaptiveMetropolisLearnScale","KameleonWindowLearnScale"]

colours = ['blue', 'red', 'magenta', 'green']


ii=0
for sampler_name in sampler_names:
    filename = directory+sampler_name+"_mmds.bin"
    f = open(filename,"r")
    upto, mmds, mean_dist = load(f)
    trials=shape(mean_dist)[1]
    figure(1)
    if which_plot == "mean":
        stds = std(mean_dist,1)/sqrt(trials)
        means = mean(mean_dist,1)
    if which_plot == "mmd":
        stds = std(mmds,1)/sqrt(trials)
        means = mean(mmds,1)
    zscore=1.28
    yerr = zscore*stds
    if highlight == "SM":
        condition = sampler_name == "StandardMetropolis"
    elif highlight == "AM":
        condition = sampler_name == "AdaptiveMetropolis" or sampler_name == "AdaptiveMetropolisLearnScale"
    elif highlight == "KAMH":
        condition = sampler_name == "KameleonWindowLearnScale"
    else:
        condition = True
    
    def find_mode_newton(self, return_full=False):
        """
        Newton search for mode of p(y|f)p(f)
        
        from GP book, algorithm 3.1, added step size
        """
        K = self.gp.K

        if self.newton_start is None:
            f = zeros(len(K))
        else:
            f = self.newton_start

        if return_full:
            steps = [f]

        iteration = 0
        norm_difference = inf
        objective_value = -inf

        while iteration < self.newton_max_iterations and norm_difference > self.newton_epsilon:
            # from GP book, algorithm 3.1, added step size
            # scale log_lik_grad_vector and K^-1 f = a

            w = -self.gp.likelihood.log_lik_hessian_vector(self.gp.y, f)
            w_sqrt = sqrt(w)

            # diag(w_sqrt).dot(K.dot(diag(w_sqrt))) == (K.T*w_sqrt).T*w_sqrt
            L = cholesky(eye(len(K)) + (K.T * w_sqrt).T * w_sqrt)
            b = f * w + self.newton_step * \
                self.gp.likelihood.log_lik_grad_vector(self.gp.y, f)

            # a=b-diag(w_sqrt).dot(inv(eye(len(K)) + (K.T*w_sqrt).T*w_sqrt).dot(diag(w_sqrt).dot(K.dot(b))))
            a = (w_sqrt * (K.dot(b)))
            a = solve_triangular(L, a, lower=True)
            a = solve_triangular(L.T, a, lower=False)
            a = w_sqrt * a
            a = b - a

            f_new = K.dot(self.newton_step * a)

            # convergence stuff and next iteration
            objective_value_new = -0.5 * a.T.dot(f) + \
                                sum(self.gp.likelihood.log_lik_vector(self.gp.y, f))
            norm_difference = norm(f - f_new)

            if objective_value_new > objective_value:
                f = f_new
                if return_full:
                    steps.append(f)
            else:
                self.newton_step /= 2

            iteration += 1
            objective_value = objective_value_new

        self.computed = True

        if return_full:
            return f, L, asarray(steps)
        else:
            return f
    def fixed_data(self):
        """
        Uses some fixed data from the ToyModel and pre-computes all matrices.
        Results are asserted to be correct (against Matlab implementation) and
        output files can be used to test the KernelBP implementation.
        """
        model = ToyModel()
        graph = model.get_moralised_graph()

        # one observation at node 4
        observations = {4: 0.0}

        # directed edges for kernel BP implementation
        edges = model.extract_edges(observations)

        print "graph:", graph
        print "observations:", observations
        print "edges:", edges

        # we sample the data jointly, so edges will share data along vertices
        joint_data = {}
        joint_data[1] = [-0.274722354853981, 0.044011207316815, 0.073737451640458]
        joint_data[2] = [-0.173264814517908, 0.213918664844409, 0.123246012188621]
        joint_data[3] = [-0.348879413536605, -0.081766464397055, -0.117171083361484]
        joint_data[4] = [-0.014012058355118, -0.145789276405117, -0.317649695308685]
        joint_data[5] = [-0.291794859908481, 0.260902212951398, -0.276258182225143]

        # generate data in format that works for the dense matrix class, i.e., a pair of
        # points for every edge
        data = {}
        for edge in edges:
            # only sample once per undirected edge
            inverse_edge = (edge[1], edge[0])
            data[edge] = (joint_data[edge[0]], joint_data[edge[1]])
            data[inverse_edge] = (joint_data[edge[1]], joint_data[edge[0]])

        # Gaussian kernel used in matlab files
        kernel = GaussianKernel(sigma=sqrt(0.15))

        # use the example class for dense matrix data that can be stored in memory

        precomputer = PrecomputeDenseMatrixKernelBP(
            graph, edges, data, observations, kernel, reg_lambda=0.1, output_filename=self.output_filename
        )

        precomputer.precompute()

        # go through all the files and make sure they contain the correct matrices

        # files created by matlab implementation
        filenames = [
            "1->2->3_non_obs_kernel.txt",
            "1->2->4_non_obs_kernel.txt",
            "1->3->2_non_obs_kernel.txt",
            "1->3->4_non_obs_kernel.txt",
            "1->3->5_non_obs_kernel.txt",
            "2->1->3_non_obs_kernel.txt",
            "2->3->1_non_obs_kernel.txt",
            "2->3->4_non_obs_kernel.txt",
            "2->3->5_non_obs_kernel.txt",
            "2->4->3_non_obs_kernel.txt",
            "3->1->2_non_obs_kernel.txt",
            "3->2->1_non_obs_kernel.txt",
            "3->2->4_non_obs_kernel.txt",
            "3->4->2_non_obs_kernel.txt",
            "4->2->1_non_obs_kernel.txt",
            "4->2->3_non_obs_kernel.txt",
            "4->3->1_non_obs_kernel.txt",
            "4->3->2_non_obs_kernel.txt",
            "4->3->5_non_obs_kernel.txt",
            "5->3->1_non_obs_kernel.txt",
            "5->3->2_non_obs_kernel.txt",
            "5->3->4_non_obs_kernel.txt",
            "3->4_obs_kernel.txt",
            "2->4_obs_kernel.txt",
            "1->2_L_s.txt",
            "1->3_L_s.txt",
            "2->1_L_s.txt",
            "2->3_L_s.txt",
            "2->4_L_s.txt",
            "2->4_L_t.txt",
            "3->1_L_s.txt",
            "3->2_L_s.txt",
            "3->4_L_s.txt",
            "3->4_L_t.txt",
            "3->5_L_s.txt",
            "5->3_L_s.txt",
        ]

        # from matlab implementation
        matrices = {
            "2->1->3_non_obs_kernel.txt": asarray(
                [[1.000000, 0.712741, 0.667145], [0.712741, 1.000000, 0.997059], [0.667145, 0.997059, 1.000000]]
            ),
            "3->1->2_non_obs_kernel.txt": asarray(
                [[1.000000, 0.712741, 0.667145], [0.712741, 1.000000, 0.997059], [0.667145, 0.997059, 1.000000]]
            ),
            "1->2->3_non_obs_kernel.txt": asarray(
                [[1.000000, 0.606711, 0.745976], [0.606711, 1.000000, 0.972967], [0.745976, 0.972967, 1.000000]]
            ),
            "1->2->4_non_obs_kernel.txt": asarray(
                [[1.000000, 0.606711, 0.745976], [0.606711, 1.000000, 0.972967], [0.745976, 0.972967, 1.000000]]
            ),
            "3->2->1_non_obs_kernel.txt": asarray(
                [[1.000000, 0.606711, 0.745976], [0.606711, 1.000000, 0.972967], [0.745976, 0.972967, 1.000000]]
            ),
            "3->2->4_non_obs_kernel.txt": asarray(
                [[1.000000, 0.606711, 0.745976], [0.606711, 1.000000, 0.972967], [0.745976, 0.972967, 1.000000]]
            ),
            "4->2->1_non_obs_kernel.txt": asarray(
                [[1.000000, 0.606711, 0.745976], [0.606711, 1.000000, 0.972967], [0.745976, 0.972967, 1.000000]]
            ),
            "4->2->3_non_obs_kernel.txt": asarray(
                [[1.000000, 0.606711, 0.745976], [0.606711, 1.000000, 0.972967], [0.745976, 0.972967, 1.000000]]
            ),
            "1->3->2_non_obs_kernel.txt": asarray(
                [[1.000000, 0.788336, 0.836137], [0.788336, 1.000000, 0.995830], [0.836137, 0.995830, 1.000000]]
            ),
            "1->3->4_non_obs_kernel.txt": asarray(
                [[1.000000, 0.788336, 0.836137], [0.788336, 1.000000, 0.995830], [0.836137, 0.995830, 1.000000]]
            ),
            "1->3->5_non_obs_kernel.txt": asarray(
                [[1.000000, 0.788336, 0.836137], [0.788336, 1.000000, 0.995830], [0.836137, 0.995830, 1.000000]]
            ),
            "2->3->1_non_obs_kernel.txt": asarray(
                [[1.000000, 0.788336, 0.836137], [0.788336, 1.000000, 0.995830], [0.836137, 0.995830, 1.000000]]
            ),
            "2->3->4_non_obs_kernel.txt": asarray(
                [[1.000000, 0.788336, 0.836137], [0.788336, 1.000000, 0.995830], [0.836137, 0.995830, 1.000000]]
            ),
            "2->3->5_non_obs_kernel.txt": asarray(
                [[1.000000, 0.788336, 0.836137], [0.788336, 1.000000, 0.995830], [0.836137, 0.995830, 1.000000]]
            ),
            "4->3->1_non_obs_kernel.txt": asarray(
                [[1.000000, 0.788336, 0.836137], [0.788336, 1.000000, 0.995830], [0.836137, 0.995830, 1.000000]]
            ),
            "4->3->2_non_obs_kernel.txt": asarray(
                [[1.000000, 0.788336, 0.836137], [0.788336, 1.000000, 0.995830], [0.836137, 0.995830, 1.000000]]
            ),
            "4->3->5_non_obs_kernel.txt": asarray(
                [[1.000000, 0.788336, 0.836137], [0.788336, 1.000000, 0.995830], [0.836137, 0.995830, 1.000000]]
            ),
            "5->3->1_non_obs_kernel.txt": asarray(
                [[1.000000, 0.788336, 0.836137], [0.788336, 1.000000, 0.995830], [0.836137, 0.995830, 1.000000]]
            ),
            "5->3->2_non_obs_kernel.txt": asarray(
                [[1.000000, 0.788336, 0.836137], [0.788336, 1.000000, 0.995830], [0.836137, 0.995830, 1.000000]]
            ),
            "5->3->4_non_obs_kernel.txt": asarray(
                [[1.000000, 0.788336, 0.836137], [0.788336, 1.000000, 0.995830], [0.836137, 0.995830, 1.000000]]
            ),
            "2->4->3_non_obs_kernel.txt": asarray(
                [[1.000000, 0.943759, 0.735416], [0.943759, 1.000000, 0.906238], [0.735416, 0.906238, 1.000000]]
            ),
            "3->4->2_non_obs_kernel.txt": asarray(
                [[1.000000, 0.943759, 0.735416], [0.943759, 1.000000, 0.906238], [0.735416, 0.906238, 1.000000]]
            ),
            "2->4_obs_kernel.txt": asarray([[0.999346], [0.931603], [0.714382]]),
            "2->4_L_s.txt": asarray(
                [[1.048809, 0.000000, 0.000000], [0.578476, 0.874852, 0.000000], [0.711260, 0.641846, 0.426782]]
            ),
            "2->4_L_t.txt": asarray(
                [[1.048809, 0.000000, 0.000000], [0.899839, 0.538785, 0.000000], [0.701191, 0.510924, 0.589311]]
            ),
            "3->4_obs_kernel.txt": asarray([[0.999346], [0.931603], [0.714382]]),
            "3->4_L_s.txt": asarray(
                [[1.048809, 0.000000, 0.000000], [0.751649, 0.731453, 0.000000], [0.797226, 0.542204, 0.412852]]
            ),
            "3->4_L_t.txt": asarray(
                [[1.048809, 0.000000, 0.000000], [0.899839, 0.538785, 0.000000], [0.701191, 0.510924, 0.589311]]
            ),
            "2->1_L_s.txt": asarray(
                [[1.048809, 0.000000, 0.000000], [0.578476, 0.874852, 0.000000], [0.711260, 0.641846, 0.426782]]
            ),
            "3->1_L_s.txt": asarray(
                [[1.048809, 0.000000, 0.000000], [0.751649, 0.731453, 0.000000], [0.797226, 0.542204, 0.412852]]
            ),
            "1->2_L_s.txt": asarray(
                [[1.048809, 0.000000, 0.000000], [0.679572, 0.798863, 0.000000], [0.636098, 0.706985, 0.442211]]
            ),
            "3->2_L_s.txt": asarray(
                [[1.048809, 0.000000, 0.000000], [0.751649, 0.731453, 0.000000], [0.797226, 0.542204, 0.412852]]
            ),
            "1->3_L_s.txt": asarray(
                [[1.048809, 0.000000, 0.000000], [0.679572, 0.798863, 0.000000], [0.636098, 0.706985, 0.442211]]
            ),
            "2->3_L_s.txt": asarray(
                [[1.048809, 0.000000, 0.000000], [0.578476, 0.874852, 0.000000], [0.711260, 0.641846, 0.426782]]
            ),
            "5->3_L_s.txt": asarray(
                [[1.048809, 0.000000, 0.000000], [0.344417, 0.990645, 0.000000], [0.952696, 0.054589, 0.435191]]
            ),
            "3->5_L_s.txt": asarray(
                [[1.048809, 0.000000, 0.000000], [0.751649, 0.731453, 0.000000], [0.797226, 0.542204, 0.412852]]
            ),
        }

        assert len(filenames) == len(matrices)

        for filename in filenames:
            self.assertTrue(self.assert_file_matrix(self.output_folder + filename, matrices[filename]))