Пример #1
0
    def up_and_out_calculator(S, t):
        tau = time_to_maturity - t
        nd = [np.nan
              ] + [norm.cdf(di(S, K, Su, r, sigma, tau)) for di in d_formula]

        return S*(nd[1] - nd[3] - np.float_power(Su/S, 1+2*r/sigma/sigma)*(nd[6] - nd[8])) \
          - K*np.exp(-r*tau)*(nd[2] - nd[4] - np.float_power(Su/S, -1 + 2*r/sigma/sigma)*(nd[5] - nd[7]))
Пример #2
0
    def eval_wavefunction(self, x_array: iterable):
        """
        Computes wave function, based on eigenfunctions psi_n(x) = A_n * H_n(alpha * x) * exp(-alpha^2 * x^2 / 2)
        :parameter x_array: array containing points on the x-position axis
        :returns dictionary of two arrays containing wavefunction's real and imaginary parts evaluated in x_array
        key = 'real' : real part
        key = 'imaginary': imaginary part
        """
        # in general psi_tot will be a complex function
        psi_tot = {
            'real': np.zeros(len(x_array)),
            'imaginary': np.zeros(len(x_array))
        }

        for n in self.n_coeffs.keys():
            # compute factorial via np.math.factorial()
            if n <= 16:
                a_n = np.float_power(self.mass * self.omega / (np.pi * self.h_bar), 0.25) \
                      / np.sqrt(2**n * np.math.factorial(n))
            # compute factorial via Stirling Approximation
            else:
                a_n = np.float_power(self.mass * self.omega / (np.pi * self.h_bar), 0.25) \
                      / (np.float_power(2 * np.pi * n, 0.25) * np.float_power(2 * n / np.e, n * 0.5))
            hermite_n = special.eval_hermite(n, self.alpha * x_array)
            psi_n = a_n * hermite_n * np.exp((-(self.alpha * x_array)**2 / 2))

            # iteratively add real and imaginary parts of psi_tot
            psi_tot['real'] += complex(self.n_coeffs[n]).real * psi_n
            psi_tot['imaginary'] += complex(self.n_coeffs[n]).imag * psi_n

        return psi_tot
Пример #3
0
def blend_img(background, overlay_rgba, gamma=0.2):  # taken from VIR
    alpha = overlay_rgba[:, :, 3]
    over_corr = np.float_power(overlay_rgba[:, :, :3], gamma)
    bg_corr = np.float_power(background, gamma)
    return np.float_power(over_corr * alpha[..., None] +
                          (1 - alpha)[..., None] * bg_corr,
                          1 / gamma)  # dark magic
Пример #4
0
def apply_symmetry(vn, sigma, eta_list):
    vn_sym = np.full_like(vn, float('NaN'))
    sigma_sym = np.full_like(sigma, float('NaN'))
    for n in range(0, 3):
        for c in range(0, 9):
            for eta_bin in range(0, 34):
                eta = eta_list[eta_bin]
                if abs(eta) < 3.4:
                    neg_eta_bin = 27 - eta_bin
                    #print 1 / np.float_power(sigma[n, eta_bin, c], 2)
                    weights = [
                        1 / np.float_power(sigma[n, eta_bin, c], 2),
                        1 / np.float_power(sigma[n, neg_eta_bin, c], 2)
                    ]
                    vn_sym[n, eta_bin,
                           c] = (weights[0] * vn[n, eta_bin, c] +
                                 weights[1] * vn[n, neg_eta_bin, c]) / (
                                     weights[0] + weights[1])
                    sigma_sym[n, eta_bin,
                              c] = 1 / np.sqrt(weights[0] + weights[1])
                    vn_sym[n, neg_eta_bin, c] = vn_sym[n, eta_bin, c]
                    sigma_sym[n, neg_eta_bin, c] = sigma_sym[n, eta_bin, c]

                else:
                    vn_sym[n, eta_bin, c] = vn[n, eta_bin, c]
                    sigma_sym[n, eta_bin, c] = sigma[n, eta_bin, c]
    return vn_sym, sigma_sym
Пример #5
0
def moveToPoint(current_x, current_y, current_angle,
                desired_x, desired_y, kv, kh):
    """Implements a simple proportional controller for Move to Point

    Parameters
    ----------
    current_x : float
        The current x position of the robot
    current_y : float
        The current y position of the robot
    current_angle : float
        The current heading angle of the robot
    desired_x : float
        The desired x position of the robot
    desired_y : float
        The desired y position of the robot
    kv : float
        The proportional gain for velocity
    kh : float
        The proportional gain for heading/steering angle, kh > 0.

    Returns
    ----------
    newV : float
        The newly calculated velocity for the robot
    newSteer : float
        The newly calculated steering angle for the robot
    """
    newV = kv * np.sqrt(np.float_power(desired_x - current_x, 2)
                        + np.float_power(desired_y - current_y, 2))
    desired_angle = np.arctan((desired_y - current_y) /
                              (desired_x - current_x))
    newSteer = kh * angdiff(desired_angle, current_angle)
    return newV, newSteer
Пример #6
0
def DoCV(X, fx, y, kernel, k=10):
	n = X.shape[0]	
	
	if(kernel == rbf):
		hypers = np.float_power( 10, np.arange(-3, 4, .25) )
	elif(kernel == poly):
		hypers = np.arange(1, 100, 2)
	#print(hypers)
	Ls = np.float_power( 10, np.arange(-10, 5, .25) )
	
	results = []
	for hyper in hypers:	
		K = makeKernel(X, kernel, hyper = hyper)
		# leave one out cross validation
		K_trains, y_trains, K_vals, y_vals =  kfold(K, y, k = k)
		for L in Ls:
			mses = []
			for i in range(k):
				K_train = K_trains[i]; y_train = y_trains[i]; K_val = K_vals[i]; y_val = y_vals[i]
				#print(K_train.shape, y_train.shape, K_val.shape, y_val.shape)
				alpha = train(K_train, y_train, L=L)
				f = predict(K_val, alpha)
				mse = MSE(f, y_val)
				mses.append(mse)
			results.append( ( np.mean(mses), hyper, L ) )

	best = np.inf; bestidx = 0
	for idx, line in enumerate(results):
		if(line[0] < best):
			best = line[0]
			bestidx = idx
	mse, hyper, L = results[bestidx]
	print(mse, hyper, L)
	return(hyper, L)
Пример #7
0
def interval2ratio_np(intervals: np.ndarray) -> np.ndarray:
    """
    Vectorized version of i2r
    """
    out = intervals / 12.
    np.float_power(2, out, out=out)
    return out
    def test_squared_emphasized_loss(self):
        from model.loss import squared_emphasized_loss

        alpha = 0.4
        beta = 0.6
        axis = 1

        labels = np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [4.0, 5.0, 6.0]])
        predictions = np.array([[0.0, 2.0, 3.0], [3.0, 1.0, 4.0],
                                [6.0, 4.0, 7.0]])
        corrupted_inds = [0]
        uncorrupted_inds = np.delete(np.arange(labels.shape[axis]),
                                     corrupted_inds)

        x_c = np.take(labels, corrupted_inds, axis)
        z_c = np.take(predictions, corrupted_inds, axis)
        x = np.take(labels, uncorrupted_inds, axis)
        z = np.take(predictions, uncorrupted_inds, axis)

        expected_output = alpha * np.float_power(
            x_c - z_c, 2).sum() + beta * np.float_power(x - z, 2).sum()

        labels = tf.convert_to_tensor(labels)
        predictions = tf.convert_to_tensor(predictions)
        loss = squared_emphasized_loss(labels,
                                       predictions,
                                       corrupted_inds,
                                       axis=axis,
                                       alpha=alpha,
                                       beta=beta)
        with tf.Session() as sess:
            actual_output = sess.run(loss)
            self.assertAlmostEqual(expected_output, actual_output, places=4)
Пример #9
0
    def scale_at_time(self, time):
        # This method takes an input time (in seconds) and gives an approximation of the scale factor at that time.
        # Since no general a(t) is known, we break up the timeline into epochs of radiation, matter or cosmological
        # constant domination and use approximations given in Introduction to Cosmology in the various sections of
        # chapter 5

        # First, I define the scale factors and times when radiation's density was equal to matter's, and when matter's
        # was equal to the density of the cosmological constant
        scale_rm = self.rad_density/self.matter_density
        scale_ml = np.float_power(self.matter_density/self.lambda_density, 1.0/3.0)
        time_rm = (4.0/3.0)*(1-1/np.sqrt(2)) * np.power(scale_rm, 2)/np.sqrt(self.rad_density) \
                  * Conversions.kilometers_in_mpc / self.hubble
        time_ml = 2 / self.hubble * Conversions.kilometers_in_mpc / (3 * np.sqrt(1 - self.matter_density))\
                  * np.log(1 + np.sqrt(2))

        # Here are the various epochs. Some fudging was done to attempt smoother transitions
        if 0 < time < .75*time_rm:
            # radiation and matter, with a < a_rm
            return np.sqrt((2 * np.sqrt(self.rad_density) * time * self.hubble / Conversions.kilometers_in_mpc))
        elif .75*time_rm <= time < 1.25*time_ml:
            # radiation and matter, with a > a_rm
            return np.float_power(time * (3.0 / 2.0) * np.sqrt(self.matter_density) * self.hubble
                                  / Conversions.kilometers_in_mpc, 2.0 / 3.0)
        elif time >= 1.25*time_ml:
            # matter and lambda, with a > a_ml
            return scale_ml*np.exp(np.sqrt(1-self.matter_density) * time * self.hubble /
                                   Conversions.kilometers_in_mpc / 2.975)               # 2.975 is another fudge factor
                                                                                        # to ensure the scale factor at
                                                                                        # 13.75 Gyr is 1
        else:
            print("Time must be greater than 0!")
            return 0
Пример #10
0
def qffl_aggregation_centered(OnlineClients, Server, online_clients, lr):
    """Aggregate gradients for federated learning.

    Each local model first gets the difference between current model and
    previous synchronized model, and then all-reduce these difference by SUM.

    """
    Server.optimizer.zero_grad()
    num_online_clients = len(online_clients)
    h = 0.0
    for o in online_clients:
        for i, (server_param, client_param) in enumerate(zip(Server.model.parameters(), OnlineClients[o].model.parameters())):
            # get model difference.
            param_diff = (server_param.data - client_param.data) * \
                            ( np.float_power(OnlineClients[o].full_loss + 1e-10, Server.args.qffl_q) / lr )
            server_param.grad.data.add_(param_diff)
            h += Server.args.qffl_q * np.float_power(OnlineClients[o].full_loss + 1e-10, Server.args.qffl_q-1.0) * \
                param_diff.norm().pow(2).item()
        h += np.float_power(OnlineClients[o].full_loss + 1e-10, Server.args.qffl_q) / lr

    for server_param in Server.model.parameters():
        server_param.grad.data.div_(h+1e-10)

    Server.optimizer.step(
        apply_lr=False,
        scale=Server.args.lr_scale_at_sync,
        apply_in_momentum=False,
        apply_out_momentum=Server.args.out_momentum,
    ) 
    return 
Пример #11
0
def w(x, y, r=2, delta=0.1, nq=1000):
    """ Delta-Trimmed r-Wasserstein distance between the empirical measures of two
        one-dimensional samples.
    
        Parameters
        ----------
        x : np.ndarray (n,) 
            sample from P
        y : np.ndarray (m,)
            sample from Q
        r : int, optional
            order of the Wasserstein distance
        delta : float, optional
            trimming constant, between 0 and 0.5.
        nq : int, optional
            number of quantiles to use in Monte Carlo integral approximations

        Returns
        -------
        W : float
            delta-trimmed r-Wasserstein distance
    """
    n = x.size
    m = y.size

    us = np.linspace(delta, 1 - delta, nq)

    x_quant = aux._sample_quantile(x, us)
    y_quant = aux._sample_quantile(y, us)

    integ = np.mean(np.float_power(np.abs(x_quant - y_quant), r))

    return np.float_power(((1 / (1 - 2 * delta)) * integ), 1 / r)
Пример #12
0
def get_feature_from_wordmap_SPM(wordmap, layer_num, dict_size):
    '''
    Compute histogram of visual words using spatial pyramid matching.

    [input]
    * wordmap: numpy.ndarray of shape (H,W)
    * layer_num: number of spatial pyramid layers
    * dict_size: dictionary size K

    [output]
    * hist_all: numpy.ndarray of shape (K*(4^layer_num-1)/3) (as given in the write-up)
    '''

    layers = np.arange(0, layer_num, 1)
    hist_all = []
    for l in layers:
        if l == 0 or l == 1:
            weight = np.float_power(2, (-layer_num))
        else:
            weight = np.float_power(2, (-l - (layer_num) - 1))

        #declare the dimensions of the cell
        dimension_of_cells = 2**l
        #splitting the rows and columns in number of cells
        rows = np.array_split(wordmap, dimension_of_cells, axis=1)

        for i in rows:
            columns = np.array_split(i, dimension_of_cells, axis=0)
            for cell in columns:
                hist = get_feature_from_wordmap(cell, dict_size)
                hist = hist * weight
                hist_all = np.append(hist_all, hist)

    hist_all = hist_all / np.max(hist_all)
    return hist_all
Пример #13
0
    def computegaussian(self):
        print("Computing gaussian curvature. Be patient...")

        kernely = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]],
                           dtype=np.float32)
        kernelx = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]],
                           dtype=np.float32)

        h = self.pgrads.shape[0]
        w = self.pgrads.shape[1]
        self.gaussgrad = np.zeros((h, w, 1), dtype=np.float32)

        Ixx = cv.filter2D(self.pgrads, cv.CV_32F, kernelx)
        Ixy = cv.filter2D(self.pgrads, cv.CV_32F, kernely)
        Iyy = cv.filter2D(self.qgrads, cv.CV_32F, kernely)
        Iyx = cv.filter2D(self.qgrads, cv.CV_32F, kernelx)

        self.gaussgrad = ((Ixx * Iyy) - Ixy * Iyx) / np.power(
            (1 + np.float_power(self.pgrads, 2) +
             np.float_power(self.qgrads, 2)), 2)

        print("Gaussian curvature computation end.")
        gaussgrad_norm = cv.normalize(self.gaussgrad, None, 0, 255,
                                      cv.NORM_MINMAX, cv.CV_8U)
        if self.display:
            cv.imshow('gaussgrad', gaussgrad_norm)
            cv.waitKey(0)
            cv.destroyAllWindows()
        return gaussgrad_norm
Пример #14
0
def _ll_hdlnegbin2(y, x1, x2, beta1, beta2, alpha):
    """
  The function calculates the log likelihood function of the hurdle negative 
  binomial regression.
  Parameters:
    y     : the frequency outcome
    x1    : variables for the probability model in the hurdle negative binomial regression
    x2    : variables for the count model in the hurdle negative binomial regression
    beta1 : coefficients for the probability model in the hurdle negative binomial regression
    beta2 : coefficients for the count model in the hurdle negative binomial regression
    alpha : the dispersion parameter in the negative binomial distribution 
  """

    xb1 = numpy.dot(x1, beta1)
    xb2 = numpy.dot(x2, beta2)
    p0 = numpy.exp(xb1) / (1 + numpy.exp(xb1))
    mu = numpy.exp(xb2)
    i0 = numpy.where(y == 0, 1, 0)
    a1 = 1 / alpha
    pr = p0 * i0 + \
         (1 - p0) / (1 - numpy.float_power(a1 / (a1 + mu), a1)) * \
         scipy.special.gamma(y + a1) / (scipy.special.gamma(y + 1) * scipy.special.gamma(a1)) * \
         numpy.float_power(a1 / (a1 + mu), a1) * numpy.float_power(mu / (a1 + mu), y) * (1 - i0)
    ll = numpy.log(pr)
    return (ll)
Пример #15
0
    def channel_gain_type1(self, cluster):
        n_group = len(cluster)
        n_su = n_group * self.n_su_group_type1
        SU_x = np.zeros(n_su)
        SU_y = np.zeros(n_su)
        for k in range(n_group):
            group = cluster[k]

            SU_x[k * self.n_su_group_type1: (k+1) * self.n_su_group_type1] = \
                self.SU_x_type1[group][:]
            SU_y[k * self.n_su_group_type1: (k+1) * self.n_su_group_type1] = \
                self.SU_y_type1[group][:]

        channel_gain = np.zeros((n_su, n_su))
        for k1 in range(n_su):
            for k2 in range(n_su):
                d = np.sqrt(
                    np.float_power(SU_x[k1] - SU_x[k2], 2) +
                    np.float_power(SU_y[k1] - SU_y[k2], 2))
                if (k1 == k2):
                    channel_gain[k1, k2] = 0
                else:
                    channel_gain[k1, k2] = np.float_power(
                        10, -((46.4 + 35 * np.log10(d) +
                               20 * np.log10(self.fc / 5)) / 10))

        return channel_gain
Пример #16
0
    def computedepthmap(self):
        h = self.normalmap.shape[0]
        w = self.normalmap.shape[1]
        P = np.zeros((h, w, 2), dtype=np.float32)
        Q = np.zeros((h, w, 2), dtype=np.float32)
        tempZ = np.zeros((h, w, 2), dtype=np.float32)
        self.Z = np.zeros((h, w), dtype=np.float32)
        landa = 1.0
        mu = 1.0
        cv.dft(self.pgrads, P, cv.DFT_COMPLEX_OUTPUT)
        cv.dft(self.qgrads, Q, cv.DFT_COMPLEX_OUTPUT)

        for i in range(1, h):
            for j in range(1, w):
                u = np.sin(i * 2 * np.pi / h)
                v = np.sin(j * 2 * np.pi / w)
                uv = np.float_power(u, 2) + np.float_power(v, 2)
                d = (1 + landa) * uv + mu * np.float_power(uv, 2)
                tempZ[i, j, 0] = (u * P[i, j, 1] + v * Q[i, j, 1]) / d
                tempZ[i, j, 1] = (-u * P[i, j, 0] - v * Q[i, j, 0]) / d
        tempZ[0, 0, 0] = 0
        tempZ[0, 0, 1] = 0
        flags = cv.DFT_INVERSE + cv.DFT_SCALE + cv.DFT_REAL_OUTPUT
        cv.dft(tempZ, self.Z, flags)
        z_norm = cv.normalize(self.Z, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U)
        if self.display:
            cv.imshow('z_norm', z_norm)
            cv.waitKey(0)
            cv.destroyAllWindows()
        return z_norm
Пример #17
0
 def a_state(N, alpha):
     unnormalised = np.array([
         np.float_power(i + 1, -alpha) + np.float_power((N - i), -alpha)
         for i in range(N)
     ])
     norm = np.sqrt(np.inner(unnormalised, unnormalised))
     return (1 / norm) * unnormalised
Пример #18
0
def residual_2D_ThomasFermiBEC(pars,x,y,data=None, eps=None):
    """
    This is the Thormas-Fermi profile from eq. 44 from Making, Probing, Understanding
    BEC by Ketterle et al. 

    residual_2D(pars,x,y,data=None, eps=None)
    """
    parvals = pars.valuesdict() # a Parameters() object is passed as "pars"
    max_density_cond = parvals["OD_cond_0"]
    Thomas_Fermi_rad_x = parvals["x_TF_cond"]
    Thomas_Fermi_rad_y = parvals["y_TF_cond"]
    center_x_cond = parvals["x0_cond"]
    center_y_cond = parvals["y0_cond"]
    bgr = parvals["backgr"]
    #NOTE! X corresponds to axis 1 (horizontal), Y corresponds to axis 0 (vertical)

    model = max_density_cond*np.float_power(np.clip(0,1-np.float_power((x-center_x_cond)/Thomas_Fermi_rad_x,2.)-\
                              np.float_power((y-center_y_cond)/Thomas_Fermi_rad_y,2.),None),1.5) + bgr
    if data is None:
        return np.array(model) # we don't flatten here because this is for plotting
    if eps is None:
        resid = np.array(model - data)
        #print(resid.flatten())
        return resid.flatten() # minimization array must be flattened (LMFIT FAQ)
    else:
        resid = np.array((model - data)/eps)
        return resid.flatten()
Пример #19
0
    def getlowerbound(est_mean, t, Nt):
        delta = np.float_power(10, -8)
        eps = np.float_power(10, -5)
        u = est_mean
        if (u > 0.0):
            if (u >= 1 - delta):
                u = 1 - delta
            q = u - delta
            for i in range(0, 2000):
                f = np.log(
                    (4 * e + 4) * arms * np.float_power(t, 2) / delt) + np.log(
                        np.log((4 * e + 4) * arms * np.float_power(t, 2) /
                               delt)) - kl_div(u, q) * Nt

                df = -1 * Nt * (q - u) / (q * (1.0 - q))

                q = np.maximum(((0 + delta)),
                               np.minimum(((q - f / df)), ((u - delta))))

                if (f * f < eps):

                    break
            return q
        else:
            if u == 0:
                return 0
Пример #20
0
def generative_likelihood_probability(digits, means, covariances):
    '''
    digits: n x 64   | 10 comes from the set of labels
    Compute the generative log-likelihood:
        log p(x|y,mu,Sigma)

    Should return an n x 10 numpy array in terms of probability
    '''
    # for storing digit likelihood 0 - 9
    likelihood_set = []
    n = digits.shape[0]
    for i in range(10):
        cov_inv = np.linalg.inv(covariances[i])
        det = np.linalg.det(covariances[i])
        # shape of (n, 64)
        digits_diff = digits - means[i]
        # n is number of digits
        n_by_n_matrix = np.dot(np.dot(digits_diff, cov_inv),
                               np.transpose(digits_diff))
        # shape (n,)
        n_by_one_tmp = np.diag(n_by_n_matrix)
        n_by_one = n_by_one_tmp.reshape(n, 1)

        # d is 64!! # of x dimension!!!
        term_1 = np.float_power(2 * np.pi, -64 / 2)
        term_2 = np.float_power(det, -0.5)
        term_3 = np.exp(-0.5 * n_by_one)

        p_i = term_1 * term_2 * term_3
        likelihood_set.append(p_i)

    all_concat = np.concatenate(likelihood_set, axis=1)
    # should be shape of nx10
    print("generative_likelihood_probability shape: ", all_concat.shape)
    return all_concat
Пример #21
0
    def train(self):
        print('Training with {} workers ---'.format(self.clients_per_round))
        num_clients = len(self.clients)
        pk = np.ones(num_clients) * 1.0 / num_clients

        batches = {}
        for c in self.clients:
            batches[c] = gen_epoch(c.train_data, self.num_rounds+2)

        print('Have generated training batches for all clients...')

        for i in trange(self.num_rounds+1, desc='Round: ', ncols=120):
            # test model
            if i % self.eval_every == 0:
                num_test, num_correct_test = self.test() # have set the latest model for all clients
                num_train, num_correct_train = self.train_error()  
                num_val, num_correct_val = self.validate()  
                tqdm.write('At round {} testing accuracy: {}'.format(i, np.sum(np.array(num_correct_test)) * 1.0 / np.sum(np.array(num_test))))
                tqdm.write('At round {} training accuracy: {}'.format(i, np.sum(np.array(num_correct_train)) * 1.0 / np.sum(np.array(num_train))))
                tqdm.write('At round {} validating accuracy: {}'.format(i, np.sum(np.array(num_correct_val)) * 1.0 / np.sum(np.array(num_val))))
                
                if self.track_individual_accuracy==1:
                    test_accuracies = np.divide(np.array(num_correct_test), np.array(num_test))
                    for idx in range(len(self.clients)):
                        tqdm.write('Client {} testing accuracy: {}'.format(self.clients[idx].id, test_accuracies[idx]))

            if i % self.log_interval == 0 and i > int(self.num_rounds/2):
                test_accuracies = np.divide(np.asarray(num_correct_test), np.asarray(num_test))
                np.savetxt(self.output + "_" + str(i) + "_test.csv", test_accuracies, delimiter=",")
                train_accuracies = np.divide(np.asarray(num_correct_train), np.asarray(num_train))
                np.savetxt(self.output + "_" + str(i) + "_train.csv", train_accuracies, delimiter=",")
                validation_accuracies = np.divide(np.asarray(num_correct_val), np.asarray(num_val))
                np.savetxt(self.output + "_" + str(i) + "_validation.csv", validation_accuracies, delimiter=",")

            indices, selected_clients = self.select_clients(round=i, pk=pk, num_clients=self.clients_per_round)

            Deltas = []
            hs = []

            selected_clients = selected_clients.tolist()
            selected_clients_grads = []

            for c in selected_clients:

                # communicate the latest model
                c.set_params(self.latest_model)
                weights_before = c.get_params()

                # solve minimization locally
                batch = next(batches[c])
                _, grads, loss = c.solve_sgd(batch)   

                Deltas.append([np.float_power(loss+1e-10, self.q) * grad for grad in grads[1]])
                if self.static_step_size:
                    hs.append(1.0/self.learning_rate)
                else:
                    hs.append(self.q * np.float_power(loss+1e-10, (self.q-1)) * norm_grad(grads[1]) + (1.0/self.learning_rate) * np.float_power(loss+1e-10, self.q))

            self.latest_model = self.aggregate2(weights_before, Deltas, hs)
Пример #22
0
    def computemedian(self):
        print("Computing median curvature. Be patient...")

        h = self.pgrads.shape[0]
        w = self.pgrads.shape[1]
        self.meangrad = np.zeros((h, w, 1), dtype=np.float32)
        scale = 1
        delta = 0
        ddepth = cv.CV_32F

        Ixx = cv.Sobel(self.pgrads,
                       ddepth,
                       1,
                       0,
                       ksize=1,
                       scale=scale,
                       delta=delta,
                       borderType=cv.BORDER_DEFAULT)
        Ixy = cv.Sobel(self.pgrads,
                       ddepth,
                       0,
                       1,
                       ksize=1,
                       scale=scale,
                       delta=delta,
                       borderType=cv.BORDER_DEFAULT)
        Iyy = cv.Sobel(self.qgrads,
                       ddepth,
                       0,
                       1,
                       ksize=1,
                       scale=scale,
                       delta=delta,
                       borderType=cv.BORDER_DEFAULT)
        Iyx = cv.Sobel(self.qgrads,
                       ddepth,
                       1,
                       0,
                       ksize=1,
                       scale=scale,
                       delta=delta,
                       borderType=cv.BORDER_DEFAULT)

        a = (1 + np.float_power(self.pgrads, 2)) * Iyy
        b = self.pgrads * self.qgrads * (Ixy + Iyx)
        c = (1 + np.float_power(self.qgrads, 2)) * Ixx
        d = np.float_power(
            1 + np.float_power(self.pgrads, 2) +
            np.float_power(self.qgrads, 2), 3 / 2)
        self.meangrad = (a - b + c) / d

        print("Median curvature computation end.")
        meangrad_norm = cv.normalize(self.meangrad, None, 0, 255,
                                     cv.NORM_MINMAX, cv.CV_8U)
        if self.display:
            cv.imshow('meangrad', meangrad_norm)
            cv.waitKey(0)
            cv.destroyAllWindows()
        return meangrad_norm
Пример #23
0
 def _np_log_integrate_1x(x, b):
     with np.errstate(invalid='ignore'):
         choices = [
             np.log(np.log(x)),
             np.log1p(-np.float_power(x, -b)) - np.log(b),
             np.log((1 - np.float_power(x, -b)) / b)
         ]
     return np.select([b == 0, b > 0, b < 0], choices, np.nan)
Пример #24
0
def get_polynomial(X, degree):
	X_poly = []
	X1 = [x[1] for x in X]
	X2 = [x[2] for x in X]
	for i in range(0, degree+1): 
		for j in range(i+1):
			X_poly.append( list( np.float_power( X1, (i-j) ) * np.float_power(X2, j) ) )
	return np.transpose(X_poly)		
Пример #25
0
 def test_float_power_array(self):
     assert np.all(
         np.float_power(np.array([1., 2., 3.]) *
                        u.m, 3.) == np.array([1., 8., 27.]) * u.m**3)
     # regression check on #1696
     assert np.all(
         np.float_power(np.arange(4.) * u.m, 0.) == 1. *
         u.dimensionless_unscaled)
Пример #26
0
def select_param_rbf(X, y):
    """
    Sweeps different settings for the hyperparameters of an rbf-kernel SVM,
    calculating the k-fold CV performance for each setting on X, y. This function has some
    preset variables that I describe right below ---v

    NOTE: After researching SVMs and kernels, I found that the the rbf kernel
    is the most flexible kernel and usually better-performing than others with multiclass
    classification. The rbf kernel get even stronger with one-vs-one method of multiclass
    classification, as compared to one-vs-all, when dealing multiple labels. Also, I found
    out that f1_score is a good metric to use in this situation as well. And 5-fold cv takes
    a while to run, so I'm going with 3-fold cv.
    Input:
        X: (n,d) array of feature vectors, where n is the number of examples
            and d is the number of features
        y: (n,) array of labels {1, 0, -1}
    Returns:
        The parameter value(s) for a quadratic-kernel SVM that maximize
        the average 3-fold CV performance and the best performnce
    """
    #our c and gamma ranges to test
    C_range = np.float_power(np.repeat(np.float(10), 4),
                             range(-1, 3),
                             dtype=np.float64)
    gamma_range = np.float_power(np.repeat(np.float(10), 3),
                                 range(-2, 1),
                                 dtype=np.float64)

    #the best parameteres and their performance
    best_params = (C_range[0], gamma_range[0])
    best_perf = np.float64('-inf')

    #labels for multiclass classification
    labels = [-1, 0, 1]

    #go through each c value
    for c in C_range:
        #go through each gamma value
        for gamma in gamma_range:
            #create our classifier with the according c and gamma values, test it's 3fold f1_score performance
            clf = SVC(kernel='rbf',
                      C=c,
                      gamma=gamma,
                      decision_function_shape='ovo')
            current_perf = cv_performance(clf,
                                          X,
                                          y,
                                          k=3,
                                          metric='f1_score',
                                          labels=labels)

            #save best performances accordingly
            if (current_perf > best_perf):
                best_params = (c, gamma)
                best_perf = current_perf

    return best_perf, best_params
Пример #27
0
def Semimajor(R_star, sinT_t, Depth, b):
    """
    Input : Radius of the star, sin^2(T_t*pi/Period),
    Depth (or Delta Flux), impact parameter b
    """
    A = np.float_power((1 + np.sqrt(Depth)), 2)
    a = R_star * np.sqrt(
        abs(A - (1 - sinT_t) * np.float_power(b, 2)) / (sinT_t))
    return a
Пример #28
0
def Impact_parameter(sinT_t, sinT_f, Depth):
    """
    Input : sin^2(T_t*pi/Period), sin^2(T_f*pi/Period),
    Depth
    """
    A = np.float_power((1 + np.sqrt(Depth)), 2)
    B = np.float_power((1 - np.sqrt(Depth)), 2)
    b = np.sqrt(abs(B - sinT_f * A / sinT_t) / (1 - sinT_f / sinT_t))
    return b
Пример #29
0
 def predict_inst(self, entry, return_probs):
     prob_0 = np.log10(self.cls_priors[0]) + entry.dot(np.log10(self.cond_probs[0])) 
     prob_1 = np.log10(self.cls_priors[1]) + entry.dot(np.log10(self.cond_probs[1])) 
     prob_2 = np.log10(self.cls_priors[2]) + entry.dot(np.log10(self.cond_probs[2])) 
     
     if return_probs:
         return sorted([(np.float_power(10, prob_0),0), (np.float_power(10, prob_1),1), (np.float_power(10, prob_2),2)], reverse=True)
     
     return np.argmax((prob_0, prob_1, prob_2))
Пример #30
0
 def trans_formula(self, freqs: np.ndarray, freq: float = 1.) -> np.ndarray:
     '''
     Make Fourier transformed morse wavelet.
     '''
     freqs = freqs / freq
     step = np.heaviside(freqs, freqs)
     wave = 2. * (step * np.float_power(freqs, self.b) * np.exp(
         (self.b / self.r) * (1. - np.float_power(freqs, self.r))))
     return wave
Пример #31
0
 def load_model(self, filename, d):
     self.model = None
     self.init_model()
     values = self.load_param_values(filename, d)
     self.axes = OrderedDict()
     for axis, labels in self.labels_t.items():
         _, size = labels
         assert size, "Size limit for '%s' axis labels is %s" % (axis, size)
         self.axes[axis] = AxisModel(axis, size, self.config, self.model, self.birnn_type)
     for model in self.sub_models():
         model.load_sub_model(d, *values)
         del values[:len(model.params)]  # Take next len(model.params) values
         if self.config.args.verbose <= 3:
             self.config.print(model.params_str)
     self.copy_shared_birnn(filename, d)
     assert not values, "Loaded values: %d more than expected" % len(values)
     if self.weight_decay and self.config.args.dynet_apply_weight_decay_on_load:
         t = tqdm(list(self.all_params(as_array=False).items()),
                  desc="Applying weight decay of %g" % self.weight_decay, unit="param", file=sys.stdout)
         for key, param in t:
             t.set_postfix(param=key)
             try:
                 value = param.as_array() * np.float_power(1 - self.weight_decay, self.updates)
             except AttributeError:
                 continue
             try:
                 param.set_value(value)
             except AttributeError:
                 param.init_from_array(value)
     self.config.print(self, level=1)
Пример #32
0
    def estimate(self, query, logged_ranking, new_ranking, logged_value):
        exactMatch=numpy.absolute(new_ranking-logged_ranking).sum() == 0
        currentValue=0.0
        if exactMatch:
            numAllowedDocs=self.loggingPolicy.dataset.docsPerQuery[query]
            validDocs=logged_ranking.size
            invPropensity=None
            if self.loggingPolicy.allowRepetitions:
                invPropensity=numpy.float_power(numAllowedDocs, validDocs)
            else:
                invPropensity=numpy.prod(range(numAllowedDocs+1-validDocs, numAllowedDocs+1), dtype=numpy.float64)
                
            currentValue=logged_value*invPropensity

        self.updateRunningAverage(currentValue)
        return self.runningMean
Пример #33
0
 def pick_move(self, game, side):
     possible_moves = game.possible_moves(side)
     if len(possible_moves) == 0:
         possible_moves.append((-1,-1))
     monte_prob = self.monte_carlo(game, side)
     
     if self.train:
         self.temp_state.append((self.preprocess_input(game.board, side), np.divide(monte_prob, np.sum(monte_prob))))
     
     monte_prob = np.float_power(monte_prob, 1/self.tau)
     monte_prob = np.divide(monte_prob, np.sum(monte_prob))
     
     r = random()
     for i, move in enumerate(possible_moves):
         r -= monte_prob[Othello.move_id(move)]
         if r <= 0:
             return move
     return possible_moves[-1]
Пример #34
0
def month_pay(total, r, n):
    return total * (r * numpy.float_power(1 + r, n))/(numpy.float_power(1 + r, n) -1 )
Пример #35
0
def weight_decay(model):
    try:
        return np.float_power(1 - model.classifier.weight_decay, model.classifier.updates)
    except AttributeError:
        return 1
 def test_float_power_array(self):
     assert np.all(np.float_power(np.array([1., 2., 3.]) * u.m, 3.)
                   == np.array([1., 8., 27.]) * u.m ** 3)
     # regression check on #1696
     assert np.all(np.float_power(np.arange(4.) * u.m, 0.) ==
                   1. * u.dimensionless_unscaled)