예제 #1
1
파일: wave.py 프로젝트: andressl91/UiO
    def standing(self, version, plot):
        dx = 0.1
        dy = 0.1
        self.dx = dx
        self.dy = dy
        b = 0.0
        V = 0.0
        Lx = 5
        Ly = 5
        T = 6

        test = "plug"
        A = 1
        B = 1
        mx = 2.0
        my = 2.0
        kx = mx * np.pi / Lx
        ky = my * np.pi / Ly

        I = np.vectorize(lambda x, y: A * np.cos(kx * x) * np.cos(ky * y))
        q = np.vectorize(lambda x, y: 2)
        f = np.vectorize(lambda x, y, t: 0)
        c = np.sqrt(q(1, 1))

        w = np.sqrt(q(1, 1) * (kx ** 2 + ky ** 2))
        ue = np.vectorize(lambda x, y, t: A * np.cos(kx * x) * np.cos(ky * y) * np.cos(w * t))

        m = 6
        h = 1.0
        err = np.zeros(m + 1)
        R, T, V = convergence_rates(version, solver, ue, m, h, Lx, Ly, T, b, V, I, q, f, plot=False)
        for i in range(len(R)):
            print "For dt = %.4f, the convergence rate is %.4f" % (T[i], R[i])
def calc_Eauxf(Ll, Lw, Mww, Qcsf, Qcsf_0, Qhsf, Qhsf_0, Qww, Qwwf, Qwwf_0, Tcs_re, Tcs_sup,
               Ths_re, Ths_sup, Vw, Year, fforma, gv, nf_ag, nfp, qv_req, sys_e_cooling,
               sys_e_heating, Ehs_lat_aux):
    Eaux_cs = np.zeros(8760)
    Eaux_ve = np.zeros(8760)
    Eaux_fw = np.zeros(8760)
    Eaux_hs = np.zeros(8760)
    Imax = 2 * (Ll + Lw / 2 + gv.hf + (nf_ag * nfp) + 10) * fforma
    deltaP_des = Imax * gv.deltaP_l * (1 + gv.fsr)
    if Year >= 2000:
        b = 1
    else:
        b = 1.2
    Eaux_ww = np.vectorize(calc_Eauxf_ww)(Qww, Qwwf, Qwwf_0, Imax, deltaP_des, b, Mww)
    if sys_e_heating != "T0":
        Eaux_hs = np.vectorize(calc_Eauxf_hs_dis)(Qhsf, Qhsf_0, Imax, deltaP_des, b, Ths_sup, Ths_re, gv.Cpw)
    if sys_e_cooling != "T0":
        Eaux_cs = np.vectorize(calc_Eauxf_cs_dis)(Qcsf, Qcsf_0, Imax, deltaP_des, b, Tcs_sup, Tcs_re, gv.Cpw)
    if nf_ag > 5:  # up to 5th floor no pumping needs
        Eaux_fw = calc_Eauxf_fw(Vw, nf_ag, gv)
    if sys_e_heating == 'T3' or sys_e_cooling == 'T3':
        Eaux_ve = np.vectorize(calc_Eauxf_ve)(Qhsf, Qcsf, gv.Pfan, qv_req, sys_e_heating, sys_e_cooling)

    Eauxf = Eaux_hs + Eaux_cs + Eaux_ve + Eaux_ww + Eaux_fw + Ehs_lat_aux

    return Eauxf, Eaux_hs, Eaux_cs, Eaux_ve, Eaux_ww, Eaux_fw
예제 #3
0
 def __init__(self, 
              patch_coord, 
              max_x, 
              initial_pop, 
              carrying_capacity,  
              growthrate, 
              a, 
              mutation_rate, 
              nr_kernel_steps, 
              two_pi):
     self.initial_pop = initial_pop
     self.carrying_capacity = carrying_capacity
     self.growthrate = growthrate
     self.two_pi = two_pi
     self.max_x = max_x
     self.patch_coord = patch_coord
     self.population = []
     self.disperser_pop = []
     self.a = a
     self.mutation_rate = mutation_rate
     self.nr_kernel_steps = nr_kernel_steps
     self.rounding_neg_to_zero = np.vectorize(lambda x:0 if x<0 else x)
     self.scaling = np.vectorize(lambda x, y: 100*x/float(y))
     self.matrix_size = (self.nr_kernel_steps*2)-1
     Patch.initialize_individuals(self)
예제 #4
0
 def calc(self):
     t = np.arange(0, 5, 0.1)
     inp = np.vectorize(self.func1)(t)
     inp1 = np.vectorize(self.func1)(t)
     
     tar = np.sin(t)
     size = len(inp)
     p0=2.
     a0=2.
     nc = 20
     w0 = 0.1
     w1 = 0.1
     #ts = wn.TrainStrategy.BFGS
     ts = wn.TrainStrategy.Gradient
     w = wn.Net(nc, np.min(inp), np.max(inp), np.average(tar),
                      a0, w0, w1, p0)
     track = w.train(t, t, inp, ts, 100, 0.3, 1, True, True)
     track = w.train(t, t, inp1, ts, 100, 0.3, 1, True, True)
     #track = w.train(inp, inp, tar, ts, 100, 0.3, 1, False, False)
     
     #import pdb; pdb.set_trace()
     tool.plot(t, t, tar, w, track, xlabel='x', ylabel='f(x)')
     print (w.energy(t, t, tar))
     plb.show()
     sys.exit()
예제 #5
0
def make_gaussian_conv_plots(m, nvals):
    """
    Compute mean of means, std of means for many values of n
    Plot against expected mean, std of mean as function of n
    """
    mean_of_means = np.vectorize(lambda x: np.mean(mean_unif_samp(m,x)))
    std_of_means = np.vectorize(lambda x: np.std(mean_unif_samp(m,x)))
    
    mns = mean_of_means(nvals)
    stds = std_of_means(nvals)
    
    mu = 0.5
    std_expect = std_of_mean_unif(m)
    
    plt.plot(nvals,mns, 'ko')
    plt.axhline(mu)
    plt.xscale('log')
    plt.xlabel('Number of %d-sample sums' % (m))
    plt.ylabel('Mean of means')
    plt.show()
    
    plt.plot(nvals,stds, 'ko')
    plt.axhline(std_expect)
    plt.xscale('log')
    plt.xlabel('Number of %d-sample sums' % (m))
    plt.ylabel('Standard deviations of means')
    plt.show()
    def fire(self, x):
        sigmoid = lambda x: 1. / (1. + np.exp(-x))

        z = np.vectorize(sigmoid)(self.hidden_weight.dot(np.r_[np.array([1]), x]))
        y = np.vectorize(sigmoid)(self.output_weight.dot(np.r_[np.array([1]), z]))

        return (z, y)
예제 #7
0
    def Visualize(self, state_history, display=True, save_path=None):
        """ Visualize 2d environments
        """

        XGrid = np.arange(self.grid_domain[0][0], self.grid_domain[0][1] - 1e-10, self.grid_gap)
        YGrid = np.arange(self.grid_domain[1][0], self.grid_domain[1][1] - 1e-10, self.grid_gap)
        XGrid, YGrid = np.meshgrid(XGrid, YGrid)

        ground_truth = np.vectorize(lambda x, y: self.model([x, y]))
        posterior_mean_before = np.vectorize(
            lambda x, y: self.gp.GPMean(state_history[-2].history.locations, state_history[-2].history.measurements,
                                        [x, y]))
        posterior_mean_after = np.vectorize(
            lambda x, y: self.gp.GPMean(state_history[-1].history.locations, state_history[-1].history.measurements,
                                        [x, y]))
        posterior_variance_before = np.vectorize(
            lambda x, y: self.gp.GPVariance2(state_history[-2].history.locations, [x, y]))
        posterior_variance_after = np.vectorize(
            lambda x, y: self.gp.GPVariance2(state_history[-1].history.locations, [x, y]))

        # Plot graph of locations
        vis = Vis2d()
        vis.MapPlot(grid_extent=[self.grid_domain[0][0], self.grid_domain[0][1], self.grid_domain[1][0],
                                 self.grid_domain[1][1]],
                    ground_truth=ground_truth(XGrid, YGrid),
                    posterior_mean_before=posterior_mean_before(XGrid, YGrid),
                    posterior_mean_after=posterior_mean_after(XGrid, YGrid),
                    posterior_variance_before=posterior_variance_before(XGrid, YGrid),
                    posterior_variance_after=posterior_mean_after(XGrid, YGrid),
                    path_points=[x.physical_state for x in state_history],
                    display=display,
                    save_path=save_path)
예제 #8
0
파일: boundaries.py 프로젝트: cmbiwer/pycbc
 def __init__(self, min_bound=-numpy.inf, max_bound=numpy.inf,
         btype_min='closed', btype_max='open', cyclic=False):
     # check boundary values
     if min_bound >= max_bound:
         raise ValueError("min_bound must be < max_bound")
     if cyclic and not (
             numpy.isfinite(min_bound) and numpy.isfinite(max_bound)):
         raise ValueError("if using cyclic, min and max bounds must both "
             "be finite")
     # store bounds
     try:
         self._min = boundary_types[btype_min](min_bound)
     except KeyError:
         raise ValueError("unrecognized btype_min {}".format(btype_min))
     try:
         self._max = boundary_types[btype_max](max_bound)
     except KeyError:
         raise ValueError("unrecognized btype_max {}".format(btype_max))
     # store cyclic conditions
     self._cyclic = bool(cyclic)
     # store reflection conditions; we'll vectorize them here so that they
     # can be used with arrays
     if self._min.name == 'reflected' and self._max.name == 'reflected':
         self._reflect = numpy.vectorize(self._reflect_well)
     elif self._min.name == 'reflected':
         self._reflect = numpy.vectorize(self._min.reflect_right)
     elif self._max.name == 'reflected':
         self._reflect = numpy.vectorize(self._max.reflect_left)
     else:
         self._reflect = _pass
예제 #9
0
    def predict(self, X, V, W, return_Z=False):
        '''
        The logistics of prediction follow similar logic to that presented in the write up.

        Once we've trained weight matrices V and W, we compute the hidden layer output for
        each sample in X (the data matrix) by computing H = tanh(np.dot(V, X.T)) using np.vectorize.
        Note that H.shape = (n_hid, num_samples). This is a bit of an issue since we would
        like to add a bias term to the model, so we append a row of 1s to the matrix in order
        to make H.shape = (n_hid + 1, num_samples).

        We then compute the matrix Z = s(np.dot(W, H)).
        This results in a matrix of size Z.shape = (n_out, num_samples). By taking the argmax over
        the columns of the matrix, we compute num_samples predictions, and complete the
        classification algorithm.
        '''
        print("Starting the prediction algorithm.")

        sigmoid_vectorized = np.vectorize(compute_sigmoid)
        tanh_vectorized = np.vectorize(math.tanh)

        X = np.append(X, np.ones(X.shape[0]).reshape(X.shape[0], 1), 1)
        H = tanh_vectorized(np.dot(V, X.T))
        H = np.vstack((H, np.ones(H.shape[1])))
        Z = sigmoid_vectorized(np.dot(W, H))

        print("Completed the prediction algorithm.")

        classifications = np.argmax(Z, 0)
        classifications_as_vector = classifications.reshape(len(classifications), 1)
        if not return_Z:
            return classifications_as_vector
        else:
            return classifications_as_vector, Z
예제 #10
0
파일: exact-bkw.py 프로젝트: olegrog/latex
def solve_linalg(k, T, F0, F1, f):
    N, h = len(X), L/len(X)
    I = np.eye(N)
    S,Y = np.meshgrid(X,X)
    abs_func = np.vectorize(apply_abs)
    F0, F1 = partial(F0, k), partial(F1, k)
    G0 = lambda i,j: abs_func(i, j, F0, F0) - b*h
    G1 = lambda i,j: abs_func(i, j, F1, lambda x: -F1(x)) - b*h*h*(i+j-.5)
    A = weight_matrix(
        lambda i,j: (j-i)*G0(-i,j) - G1(-i,j)/h,
        lambda i,j: G1(-i,j)/h - (j-i-1)*G0(-i,j)
    )
    B = weight_matrix(
        lambda i,j: (j+i)*G0(i,j) - G1(i,j)/h,
        lambda i,j: G1(i,j)/h - (j+i-1)*G0(i,j)
    )
    #splot(X, A*T(np.abs(S-Y)/k)/k)
    #splot(X, B*T((S+Y)/k)/k)
    #py.show()
    phi = solve(a*I - A*T(np.abs(S-Y)/k)/k + B*T((S+Y)/k)/k, f(X))
    p_xy = -(k*(T2(0)-T2(1./k)) + np.trapz((T1((L-X)/k) - T1((L+X)/k))*phi, X))*2/a
    Phi = np.outer(phi,np.ones(N))
    Q = np.trapz(T2((L-X)/k)-T2((L+X)/k) + np.trapz((T1(np.abs(S-Y)/k) - T1((S+Y)/k))*Phi, X)/k, X)/2/a
    #splot(X, K(*XX))
    #py.plot(X, phi)
    w = np.vectorize(lambda x: 0 if x==0 else x*np.log(x)/a)
    ww = lambda x: k*x*x*(2*np.log(x)-1)/4/a
    #print >> sys.stderr, k, np.trapz(phi, X), np.trapz(phi - w((L-X)/k), X) + ww(L/k)
    print >> sys.stderr, k, p_xy, np.trapz(phi, X)/2, Q
    #np.savetxt(sys.stdout, np.transpose((X, phi)), fmt='%1.4e')
    return k, p_xy, np.trapz(phi, X)/2, Q
예제 #11
0
 def unpack(self):
     """
     creates the vectorized functions
     intention: call this method after loading this object from serialization
     """
     self.fnc = vectorize(self.fnc_nv)
     self.deriv_fnc = vectorize(self.deriv_fnc_nv)
예제 #12
0
파일: plf.py 프로젝트: keflavich/imf
    def __call__(self, luminosity, taper=False, integral_form=False, **kwargs):
        """ Unclear if integral_form is right..."""
        if taper:

            def num_func(x, luminosity_):
                tf = (1-(luminosity_/x)**(1-self.j))**0.5
                return self.imf(x)*x**(self.j-self.jf-1) * tf

            def integrate(lolim, luminosity_):
                integral = scipy.integrate.quad(num_func, lolim, self.mmax, args=(luminosity_,), **kwargs)[0]
                return integral

            numerator = np.vectorize(integrate)(np.where(self.mmin < luminosity, luminosity, self.mmin), luminosity)

        else:
            def num_func(x):
                return self.imf(x)*x**(self.j-self.jf-1)

            def integrate(lolim):
                integral = scipy.integrate.quad(num_func, lolim, self.mmax, **kwargs)[0]
                return integral

            numerator = np.vectorize(integrate)(np.where(self.mmin < luminosity, luminosity, self.mmin))

        result = (1-self.j) * luminosity**(1-self.j) * numerator / self.denominator
        if integral_form:
            warnings.warn("The 'integral form' of the Chabrier PMF is not correctly normalized; "
                          "it is just PMF(m) * m")
            return result * self.normfactor * luminosity
            raise ValueError("Integral version not yet computed")
        else:
            return result * self.normfactor
예제 #13
0
def filter_xaod_to_numpy(files, max_events=None):
    """Processes some files by converting to numpy and applying filtering"""
    # Branch name remapping for convenience
    branch_dict = {
        'CaloCalTopoClustersAuxDyn.calEta' : 'clusEta',
        'CaloCalTopoClustersAuxDyn.calPhi' : 'clusPhi',
        'CaloCalTopoClustersAuxDyn.calE' : 'clusE',
        'CaloCalTopoClustersAuxDyn.EM_PROBABILITY' : 'clusEM',
        'AntiKt10LCTopoTrimmedPtFrac5SmallR20JetsAux.pt' : 'fatJetPt',
        'AntiKt10LCTopoTrimmedPtFrac5SmallR20JetsAux.eta' : 'fatJetEta',
        'AntiKt10LCTopoTrimmedPtFrac5SmallR20JetsAux.phi' : 'fatJetPhi',
        'AntiKt10LCTopoTrimmedPtFrac5SmallR20JetsAux.m' : 'fatJetM',
        'EventInfoAuxDyn.mcChannelNumber' : 'dsid',
        'EventInfoAuxDyn.mcEventWeights' : 'genWeight',
        'InDetTrackParticlesAuxDyn.theta' : 'trackTheta',
        'InDetTrackParticlesAuxDyn.phi' : 'trackPhi',
    }
    # Convert the data to numpy
    print('Now processing:', files)
    tree = get_tree(files, branch_dict, tree_name='CollectionTree',
                    max_events=max_events)
    if tree is None:
        return None
    # Apply physics
    results = process_events(tree)
    skimTree = results['tree']

    # Get the track coordinates
    vtan = np.vectorize(np.tan, otypes=[np.ndarray])
    vlog = np.vectorize(np.log, otypes=[np.ndarray])
    trackTheta = skimTree['trackTheta']
    results['trackEta'] = -vlog(vtan(trackTheta / 2))
    results['trackPhi'] = skimTree['trackPhi']

    return results
예제 #14
0
    def get_dir_tot_ele(x, y, z, slope, aspect):
        sun_gap = get_sun_gap_ele(x, y)
        ele = z

        def get_incidence_ele(zeni, azi, slope_in, aspect_in):
            inc = np.arccos(np.cos(np.deg2rad(zeni)) * np.cos(np.deg2rad(slope_in))
                            + np.sin(np.deg2rad(zeni)) * np.sin(np.deg2rad(slope_in))
                            * np.cos(azi - np.deg2rad(aspect_in)))
            return inc

        f_incidence = np.vectorize(get_incidence_ele, excluded=['slope_in', 'aspect_in'])
        incidence = f_incidence(zeni_c, azi_c, slope, aspect)

        def get_trans_ele(zeni, elevation, transmittance):
            m = np.exp(-0.000118 * elevation - 1.638 * np.power(10, -9) * elevation * elevation) / \
                (np.cos(np.deg2rad(zeni)) + 0.50572 * np.power((96.07995 - zeni), -1.6364))
            return np.power(transmittance, m)

        f_trans = np.vectorize(get_trans_ele, excluded=['elevation', 'transmittance'])
        trans = f_trans(zeni_c, ele, beta)

        def get_dir_ele(time_p_ele, sun_gap_ele, incidence_ele, trans_ele):
            # incidence = get_incidence_angle(zeni_c_ele, azi_c_ele, slope, aspect)
            dir_ele = 1.367 * trans_ele * time_p_ele * sun_gap_ele * np.cos(incidence_ele)
            if dir_ele < 0.0:
                return 0.0
            return dir_ele

        f_dir = np.vectorize(get_dir_ele)
        dir_array = f_dir(time_p, sun_gap, incidence, trans)
        dir_tot = np.sum(dir_array)
        return dir_tot
예제 #15
0
def hashin_R(input_stress, properties):
    sigma_x, sigma_y, sigma_s = numpy.vstack(input_stress).T[:]
    xt = numpy.vectorize(lambda x, s: \
            numpy.sqrt(((x / properties['xt']) ** 2
                         + (s / properties['sc']) ** 2) ** -1
    ) if x > 0 else 0.0)
    xc = numpy.vectorize(lambda x: properties['xc'] / numpy.abs(x) \
        if x < 0 else 0.0)
    yt = numpy.vectorize(lambda y, s: \
             numpy.sqrt(((y / properties['yt']) ** 2
                            + (s / properties['sc']) ** 2) ** -1
    ) if y > 0 else 0.0)
    # For yc we need to solve a quadratic.
    # We find roots for all and then multiply by a boolean to eliminate the y > 0
    A = (1.0 / 4.0) * (sigma_y / properties['sc'])**2 \
    					+ (sigma_s/properties['sc'])**2
    B = (1.0/4.0* (properties['yc']/properties['sc'])**2 - 1) * sigma_y \
    				/ properties['yc']
    C = -1.0 * numpy.ones_like(A)
    yc_roots = quadractic_roots(A, B, C)[:, 0]
    condition = numpy.array(sigma_y < 0, dtype=int)
    yc = yc_roots * condition

    R = numpy.vstack((xt(sigma_x, sigma_s),
                      xc(sigma_x),
                      yt(sigma_y, sigma_s),
                      yc)).T

    return R
예제 #16
0
파일: utils.py 프로젝트: repstd/ASM
def getDataFromModel(ModelName):
	fin=open(ModelName,"r")
	fin.readline()
	fin.readline()
	# meanShape=[]
	# pcaMatrix=None
	cnt=0
	alignedSet=[]

	for line in fin.readlines():
		temp=line.strip().split(":")
		label=int(temp[0])
		data=temp[1].split(" ")
		# print data
		if label==1:
			pcaMatrix=np.array(np.vectorize(float)(data))
		elif label==2:
			meanShape=np.array(np.vectorize(float)(data))
		else:
			alignedSet.append(np.vectorize(float)(data))
	szMean=meanShape.size
	szPca=pcaMatrix.size
	pcaMatrix.reshape(szPca)
	pcaMatrix.shape=(szMean,szPca/szMean)
	meanShape.reshape(1,szMean)
	meanShape.shape=(1,szMean)
	# print meanShape.shape,pcaMatrix.shape
	# print pcaMatrix
			
	return pcaMatrix,meanShape,alignedSet
예제 #17
0
  def get_FO(self,x,y,z,Q2,qT2,muR2,muF2,charge,ps='dxdQ2dzdqT2',method='gauss'):
    D=self.D
    D['A1']=1+(2/y-1)**2
    D['A2']=-2
    w2=qT2/Q2*x*z
    w=w2**0.5
    xia_=lambda xib: w2/(xib-z)+x
    xib_=lambda xia: w2/(xia-x)+z

    integrand_xia=lambda xia: self.get_M(xia,xib_(xia),x/xia,z/xib_(xia),Q2,muF2,qT2,charge)
    integrand_xib=lambda xib: self.get_M(xia_(xib),xib,x/xia_(xib),z/xib,Q2,muF2,qT2,charge)

    if method=='quad':
      FO = quad(integrand_xia,x+w,1)[0] + quad(integrand_xib,z+w,1)[0]

    elif method=='gauss':
      integrand_xia=np.vectorize(integrand_xia)
      integrand_xib=np.vectorize(integrand_xib)
      FO = fixed_quad(integrand_xia,x+w,1,n=40)[0] + fixed_quad(integrand_xib,z+w,1,n=40)[0]

    if ps=='dxdQ2dzdqT2':
      s=x*y*Q2
      prefactor = D['alphaEM']**2 * self.SC.get_alphaS(muR2) 
      prefactor/= 2*s**2*Q2*x**2
      prefactor*= D['GeV**-2 -> nb'] 
      return prefactor * FO
    else: 
      print 'ps not inplemented'
      return None
예제 #18
0
def testBrioWu(Npoly=8,Npoint=250):
	"""Run 1D MHD test case BrioWu
		Driver script for solving the 1D Euler equations
	Refer to www.csnu.edu/~jb715473/examples/mhd1d.htm"""
	import globalVar as glb
	import numpy
	import functions
	import matplotlib.pyplot as plt
	
	# Polynomial order used for approximation 
	glb.N = Npoly
	
	# Generate simple mesh
	[glb.Nv, glb.VX, glb.K, EToV] = functions.MeshGen1D(0.0, 1.0, Npoint)
	
	# Initialize solver and construct grid and metric
	execfile("initiate.py")
	gamma = 2.0;		bx=0.75
	
	# Set up initial conditions -- Brio Wu's problem
	p1=1.0;		Ener1=(p1/(gamma-1))+(0.5*(1+bx**2))
	p2=0.1;		Ener2=(p2/(gamma-1))+(0.5*(1+bx**2))
	q1 = numpy.array([1.,0.,0.,0.,1.,0.,Ener1])
	q2 = numpy.array([0.125,0.,0.,0.,-1.,0.,Ener2])
	
	q=numpy.zeros([glb.Np,glb.K,7])
	for n in range(7):
		q[:,:,n] = q1[n]*(((-1)*numpy.vectorize(functions.step)(glb.x,0.5))+1)+(q2[n]*numpy.vectorize(functions.step)(glb.x,0.5))
	FinalTime = 0.12
	
	# Solve Problem
	q = mhd1D(q,FinalTime,gamma,bx)
	return(q)
예제 #19
0
파일: ffnnmat.py 프로젝트: xldenis/proj3
def mat_backprop(weights, x, t):
  gamma = 0.8
  vec_sigmoid = np.vectorize(sigmoid)
  vfunc = np.vectorize(quad_error)

  outputs = [x]

  o = x
  for mat in weights:
    extended_o = extend(o)
    o = vec_sigmoid(extended_o.T.dot(mat)).T
    outputs.append(o)

  e = o - t
  print e
  diags = []

  for oi in outputs[1:]:
    diags.append(np.diagflat(vfunc(oi)))

  deltas = [0]*len(weights)
  deltas[-1] = diags[-1].dot(e)
  for idx in range(len(weights)-1)[::-1]:
    deltas[idx] = diags[idx].dot(weights[idx+1][:-1]).dot(deltas[idx+1])

  wnew = []
  for i in xrange(len(weights)):
    z = deltas[i].dot(extend(outputs[i]).T)
    wnew.append(weights[i] - gamma*z.T)

  return wnew
예제 #20
0
 def testFloatBasic(self):
   x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
   y = (x + .5).astype(np.float32)     # no zero
   z = (x + 15.5).astype(np.float32)   # all positive
   self._compareBoth(x, np.abs, tf.abs)
   self._compareBoth(x, np.abs, _ABS)
   self._compareBoth(x, np.negative, tf.neg)
   self._compareBoth(x, np.negative, _NEG)
   self._compareBoth(y, self._inv, tf.inv)
   self._compareBoth(x, np.square, tf.square)
   self._compareBoth(z, np.sqrt, tf.sqrt)
   self._compareBoth(z, self._rsqrt, tf.rsqrt)
   self._compareBoth(x, np.exp, tf.exp)
   self._compareBoth(z, np.log, tf.log)
   self._compareBoth(x, np.tanh, tf.tanh)
   self._compareBoth(x, self._sigmoid, tf.sigmoid)
   self._compareBoth(y, np.sign, tf.sign)
   self._compareBoth(x, np.sin, tf.sin)
   self._compareBoth(x, np.cos, tf.cos)
   self._compareBoth(
       x,
       np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
       tf.lgamma)
   self._compareBoth(x, np.vectorize(math.erf), tf.erf)
   self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
예제 #21
0
    def fset_two_degree_counts(self, myfset, col_i, col_j, operation, file_path = None):
        # if two columns are the same just return one_degree_count
        if col_i == col_j:
            return self.fset_one_degree_counts(myfset, col_i, file_path)

        if operation == 'per':
            task = 'both'
        if operation == 'num':
            task = 'two'

        self._fset_get_count_tables(myfset, task, [col_i, col_j], file_path)

        col_i_name = myfset.fname_list[col_i]
        col_j_name = myfset.fname_list[col_j]
        col_i_ind = myfset.find_list[col_i]
        col_j_ind = myfset.find_list[col_j]


        i_table = self.one_count_table[col_i_name]
        ij_table = self.two_count_table[(col_i_name, col_j_name)]

        col_i_data_train = myfset.Xtrain[:, col_i_ind]
        col_i_data_test = myfset.Xtest[:, col_i_ind]
        col_j_data_train = myfset.Xtrain[:, col_j_ind]
        col_j_data_test = myfset.Xtest[:, col_j_ind]
        if operation == 'per':  # 'per': percentage of (elem_i, elem_j) in all (elem_i, col_j)  
            vfunc = np.vectorize(lambda x,y: float(ij_table[x][y])/i_table[x])
            col_new_train = vfunc(col_i_data_train, col_j_data_train)
            col_new_test = vfunc(col_i_data_test, col_j_data_test)
        elif operation == 'num':    # 'num': number of different kinds of (elem_i, col_j) 
            vfunc = np.vectorize(lambda x: ij_table[x]['unique'])
            col_new_train = vfunc(col_i_data_train)
            col_new_test = vfunc(col_i_data_test)

        return col_new_train, col_new_test
예제 #22
0
파일: iqsweep.py 프로젝트: bmazin/SDR
def RESDIFF(x,Q,f0,aleak,ph1,da,ang1,Igain,Qgain,Ioff,Qoff):
#       Q = p[0]          ;  Q
#       f0 = p[1]         ;  resonance frequency
#       aleak = p[2]      ;  amplitude of leakage
#       ph1 = p[3]        ;  phase shift of leakage
#       da = p[4]         ;  variation of carrier amplitude
#       ang1 = p[5]       ;  Rotation angle of data
#       Igain = p[6]      ;  Gain of I channel
#       Qgain = p[7]      ;  Gain of Q channel
#       Ioff = p[8]       ;  Offset of I channel
#       Qoff = p[9]       ;  Offset of Q channel

    l = len(x)
    dx = (x - f0) / f0

    # resonance dip function
    s21a = (np.vectorize(complex)(0,2.0*Q*dx)) / (complex(1,0) + np.vectorize(complex)(0,2.0*Q*dx))
    s21a = s21a - complex(.5,0)
    s21b = np.vectorize(complex)(da*dx,0) + s21a + aleak*np.vectorize(complex)(1.0-np.cos(dx*ph1),-np.sin(dx*ph1))

    # scale and rotate
    Ix1 = s21b.real*Igain
    Qx1 = s21b.imag*Qgain
    nI1 = Ix1*np.cos(ang1) + Qx1*np.sin(ang1)
    nQ1 = -Ix1*np.sin(ang1) + Qx1*np.cos(ang1)

    #scale and offset
    nI1 = nI1 + Ioff
    nQ1 = nQ1 + Qoff

    s21 = np.zeros(l*2)
    s21[:l] = nI1
    s21[l:] = nQ1

    return s21
예제 #23
0
def velocity_field(psi): #takes a symbolic function and returns two lambda functions
#to evaluate the derivatives in both x and y.
   global w
   if velocity_components:
      u = lambdify((x,y), eval(x_velocity), modules='numpy')
      v = lambdify((x,y), eval(y_velocity), modules='numpy')
   else:
      if is_complex_potential:
         print "Complex potential, w(z) given"
         #define u, v symbolically as the imaginary part of the derivatives
         u = lambdify((x, y), sympy.im(psi.diff(y)), modules='numpy')
         v = lambdify((x, y), -sympy.im(psi.diff(x)), modules='numpy')
      else:
         #define u,v as the derivatives 
         print "Stream function, psi given"
         u = sympy.lambdify((x, y), psi.diff(y), 'numpy')
         v = sympy.lambdify((x, y), -psi.diff(x), 'numpy')
   if (branch_cuts): # If it's indicated that there are branch cuts in the mapping,
                      # then we need to return vectorized numpy functions to evaluate
                      # everything numerically, instead of symbolically 
                      # This of course results in a SIGNIFICANT time increase
                      #   (I don't know how to handle more than the primitive root
                      #   (symbolically in Sympy
      return np.vectorize(u),np.vectorize(v)
   else:
       # If there are no branch cuts, then return the symbolic lambda functions (MUCH faster)
      return u,v
예제 #24
0
파일: maintf.py 프로젝트: blutooth/gp
def train_data(N=300):
    sig=3
    X=np.array(np.random.uniform(low=0,high=4*np.math.pi,size=[N,1]))
    f_sin=np.vectorize(sp.sin,otypes=[np.float])
    f_cos=np.vectorize(sp.cos,otypes=[np.float])
    Y=30*f_sin(X)+30*f_cos(2*X+4)+sig*np.array(sp.randn(N,1))
    return [X,Y]
예제 #25
0
파일: pmf.py 프로젝트: keflavich/imf
    def __call__(self, mass, taper=False, integral_form=False, **kwargs):
        """ Unclear if integral_form is right..."""
        if taper:

            def num_func(x, mass_):
                tf = (1-(mass_/x)**(1-self.j))**0.5
                return self.imf(x)*(1./x)**(1-self.j) * (2/((1+self.Rmdot**2*x**1.5)**0.5+1)) * tf

            def integrate(lolim, mass_):
                integral = scipy.integrate.quad(num_func, lolim, self.mmax, args=(mass_,), **kwargs)[0]
                return integral

            numerator = np.vectorize(integrate)(np.where(self.mmin < mass, mass, self.mmin), mass)

        else:
            def num_func(x):
                return self.imf(x)*(1./x)**(1-self.j) * (2/((1+self.Rmdot**2*x**1.5)**0.5+1))

            def integrate(lolim):
                integral = scipy.integrate.quad(num_func, lolim, self.mmax, **kwargs)[0]
                return integral

            numerator = np.vectorize(integrate)(np.where(self.mmin < mass, mass, self.mmin))

        result = (1-self.j) * mass**(1-self.j) * numerator / self.denominator
        if integral_form:
            warnings.warn("The 'integral form' of the Chabrier PMF is not correctly normalized; "
                          "it is just PMF(m) * m")
            return result * self.normfactor * mass
            raise ValueError("Integral version not yet computed")
        else:
            return result * self.normfactor
def find_ephemeris_lookup_date(tai_beg, tai_end, obs_md_table):
    '''
    Want to find the 15-minute increment (0, 15, 30, 45) that is sandwiched between the two
    passed datetimes.  However, spans can be less than 15 minutes, so also need handle this
    case; here, will round to whichever increment has the smallest delta between tai_beg
    and tai_end.  I've not seen it, but I have to assume that spans can also be longer than
    15 minutes.
    '''
    vectfunc = np.vectorize(tai_str_to_datetime)
    tai_end_dt = vectfunc(tai_end)
    tai_beg_dt = vectfunc(tai_beg)

    vectfunc = np.vectorize(get_ephemeris_block_in_interval)

    mask = (tai_end_dt - tai_beg_dt) <= ephemeris_max_block_size
    ret = np.zeros((len(tai_end_dt),), dtype=dt.datetime)
    ret[mask] = vectfunc(tai_beg_dt[mask], tai_end_dt[mask])

    def _lookup_str_format(dtval):
        if isinstance(dtval, dt.datetime):
            return dtval.strftime("%Y-%b-%d %H:%M")
        return ""

    vectfunc = np.vectorize(_lookup_str_format)
    ret = vectfunc(ret)

    return ret[mask], obs_md_table[mask]
 def readConfig(self):
     configFile = open(self.CNSconfig, 'r')
     self.CNSconfigInfo = np.append(np.vectorize(lambda x: parseConfigFindPath(x,configFile))(['root_folder', 'pathPython',
                              'checkValidity','conservedFastaPath','pickleSkip','pickleName','fasta2phylip','PhyML',
                               'bootstrap','treeFile','treeOut','ratioCopy','outputTreeImages']),
                                    np.vectorize(lambda x: parseConfigFindList(x,configFile))(['masterListSpecies','intragenus','intergenus','subgenome']))
     configFile.close()
예제 #28
0
파일: analysis.py 프로젝트: sminez/clmate
    def class_results(self, tSet, group_fields):
        filtered = self.dataframe[self.dataframe['teaching_set'].isin([tSet])]
        cols = ['qModule', 'qTopic', 'qMark', 'pMark']
        df = filtered[cols]
        df_grouped = df.groupby(group_fields)

        if self.coalation_type == 'best':
            df = df_grouped.max()
            df['pob'] = np.vectorize(percentage_marks)(df['pMark'], df['qMark'])
            df['Colour'] = np.vectorize(colourise)(df['pob'])
            df.rename(columns={'pob': 'Class Max percentage'}, inplace=True)
            df = df.drop(['pMark', 'qMark'], axis=1)
            if group_fields == ['qTopic']:
                df = df.drop(['qModule'], axis=1)
            elif group_fields == ['qModule']:
                df = df.drop(['qTopic'], axis=1)
        elif self.coalation_type == 'mean':
            df = df_grouped.mean()
            df['pob'] = np.vectorize(percentage_marks)(df['pMark'], df['qMark'])
            # Round off ALL values in the dataframe to 2 decimal places
            # otherwise the mean function will give ridiculous accuracy!
            df = np.round(df, 2)
            df['Colour'] = np.vectorize(colourise)(df['pob'])
            df = df.drop(['pMark', 'qMark'], axis=1)
            df.rename(columns={'pob': 'Class Mean percentage'}, inplace=True)
        else:
            QtGui.QMessageBox.question(
                self,
                'Uh oh...',
                ("Something went wrong."
                 "Please close the analysis screen and try again."))
        return df
예제 #29
0
파일: analysis.py 프로젝트: sminez/clmate
    def cohort_results(self, group_fields):
        df_grouped = self.dataframe.groupby(group_fields)

        if self.coalation_type == 'best':
            df = df_grouped.max()
            df['pob'] = np.vectorize(percentage_marks)(df['pMark'], df['qMark'])
            df = df.drop(['pMark', 'qMark', 'aID', 'qNum'], axis=1)
            df['Colour'] = np.vectorize(colourise)(df['pob'])
            df.rename(columns={'pob': 'Cohort Max percentage'}, inplace=True)
            if group_fields == ['qTopic']:
                df = df.drop(['qModule'], axis=1)
            elif group_fields == ['qModule']:
                df = df.drop(['qTopic'], axis=1)
        elif self.coalation_type == 'mean':
            df = df_grouped.mean()
            df['pob'] = np.vectorize(percentage_marks)(df['pMark'], df['qMark'])
            df = df.drop(['pMark', 'qMark', 'aID', 'qNum'], axis=1)
            # round off ALL values in the dataframe to 2 decimal places
            df = np.round(df, 2)
            df['Colour'] = np.vectorize(colourise)(df['pob'])
            df.rename(columns={'pob': 'Cohort Mean percentage'}, inplace=True)
        else:
            QtGui.QMessageBox.question(
                self,
                'Uh oh...',
                ("Something went wrong."
                 "Please close the analysis screen and try again."))
        return df
예제 #30
0
    def df_two_degree_counts(self, df, col_i, col_j, operation, file_path = None):
        # if two columns are the same just return one_degree_count
        if col_i == col_j:
            return self.df_one_degree_counts(df, col_i, file_path)

        if operation == 'per':
            task = 'both'
        elif operation == 'num':
            task = 'two'
        else:
            print 'unknown operation'
            return
                
        self._df_get_count_tables(df, task, [col_i, col_j], file_path)

        i_table = one_count_table[col_i]
        ij_table = two_count_table[(col_i, col_j)]

        col_i_data = df[col_i].values
        col_j_data = df[col_j].values
        if operation == 'per':  # 'per': percentage of (elem_i, elem_j) in all (elem_i, col_j)  
            vfunc = np.vectorize(lambda x,y: float(ij_table[x][y])/i_table[x])
            col_new = vfunc(col_i_data, col_j_data)
        elif operation == 'num':    # 'num': number of different kinds of (elem_i, col_j) 
            vfunc = np.vectorize(lambda x: ij_table[x]['unique'])
            col_new = vfunc(col_i_data)

        return col_new
예제 #31
0
파일: star.py 프로젝트: mahabul123/pyem
def recenter_modf(df, inplace=False):
    df = df if inplace else df.copy()
    remxy, offsetxy = np.vectorize(modf)(df[Relion.ORIGINS])
    df[Relion.ORIGINS] = remxy
    df[Relion.COORDS] = df[Relion.COORDS] - offsetxy
    return df
예제 #32
0
    def decision_tree(self, f_train = None):
        if f_train is not None:
            # re-vectorize the data
            self.vectorize(f_train)
        
        SPLIT = 30
        
        training_ham = self.get_ham(self.training_X, self.training_label)
        training_spam = self.get_spam(self.training_X, self.training_label)

        hmean = training_ham.mean(axis = 0)
        smean = training_spam.mean(axis = 0)

        freq_diff = abs(hmean - smean) # difference of each word freq in ham ans spam
        arg_fdiff = np.flip(np.argsort(freq_diff)) # arg of freq difference in descending order
        arg_list = list(arg_fdiff[np.where(freq_diff > 0)]) # list of indexes where we do the cuts

        class Node:
            def __init__(self, idx):
                self.idx = idx
#                 print('a new node at index %d' % self.idx)
                self.value = None
                self.left = None
                self.right = None

        def get_idx():
            if arg_list:
                idx = arg_list[0]
                arg_list.pop(0)
                return idx
            else:
                print('Reached maximum number of nodes')
                return None

        def build_tree(rows):
            if np.size(rows) == 1:
                node_labels = self.training_label[rows]
                return node_labels
            else:
                new_N = Node(get_idx())
                col = self.training_X[:, new_N.idx] # the column turned into array
                node_col = col[rows]
                node_labels = self.training_label[rows]

                new_N.value = Gini_min(node_col, node_labels) # node value is whatever with lowest gini index
                l_rows = rows[np.where(node_col <= new_N.value)] # cut rows by N.value
                r_rows = rows[np.where(node_col > new_N.value)]
                if np.size(l_rows) == 0:
                    if np.count_nonzero(self.training_label[r_rows] ==self.HAM) / np.size(self.training_label[r_rows]) > 0.5: # fraction ofself.HAM is higher
                        new_N.right =self.HAM
                        new_N.left =self.SPAM
                    else:
                        new_N.right =self.SPAM
                        new_N.left =self.HAM
                    return new_N    
                elif np.size(r_rows) == 0: # if one side has no other data
                    if np.count_nonzero(self.training_label[l_rows] ==self.HAM) / np.size(self.training_label[l_rows]) > 0.5: # fraction ofself.HAM is higher
                        new_N.right =self.SPAM
                        new_N.left =self.HAM
                    else:
                        new_N.right =self.HAM
                        new_N.left =self.SPAM
                    return new_N    
                else:
                    new_N.left = build_tree(l_rows)
                    new_N.right = build_tree(r_rows)
                    return new_N

        def Gini_min(col, c):
            '''return the minimum gini index given an array of word freq and associated label c'''
            # look for the lowest gini value in the column
            c_max = np.max(col) # the largest value
            c_min = np.min(col) # the smallest value/freq in column
            if c_max == c_min: # if the maximun equals the minimum -> all values are equal
                return c_max # that's the value to split
            else: # if not all elements are zero
                split_value = np.linspace(c_min, c_max, num = SPLIT) # the values of diff split
                gini_idx = Gini_index(split_value, col, c) # list of gini idx at diff split
                return gini_idx.argmin() # return the value for minimum gini index

        # Calculate the Gini index for a split dataset
        def Gini_index(cut, col, c):
            lc = c[np.where(col <= cut)] # labels for the left
            rc = c[np.where(col > cut)] # labels for the right

            len_left, len_right = len(lc), len(rc)
            len_total = len_left + len_right

            unc = 0.0
            if len_left == 0:
                unc += 0 # weighted uncertainty
            else:
                l_p_ham = (np.count_nonzero(lc ==self.HAM) / np.size(lc))
                l_p_spam = (np.count_nonzero(lc ==self.SPAM) / np.size(lc))
                unc += (1 - (l_p_ham**2 + l_p_spam**2)) * len_left / len_total

            if len_right == 0:
                unc += 0
            else:
                r_p_ham = (np.count_nonzero(rc ==self.HAM) / np.size(rc))
                r_p_spam = (np.count_nonzero(rc ==self.SPAM) / np.size(rc))
                unc += (1 - (r_p_ham**2 + r_p_spam**2)) * len_right / len_total

            return unc
        Gini_index = np.vectorize(Gini_index, excluded = [1, 2])
        
        def classify(case, N):
            if N == self.HAM:
                return self.HAM
            elif N == self.SPAM:
                return self.SPAM
            else:
                if case[N.idx] < N.value:
                    return classify(case, N.left)
                else:
                    return classify(case, N.right)
예제 #33
0
from py_vollib.black_scholes.implied_volatility import implied_volatility 
from scipy.stats.kde import gaussian_kde
rho = -0.7165
r = 0
S0 = 100
v_bar = 0.0354
v0 = v_bar 
l = 1.3253
eta = 0.3877
dt = 1/2500
N = 5
q = 0
K = 100
 
# Vectorizing the Implied Volatility function so it can operate on matricies
iv_function = np.vectorize(implied_volatility)
 
# Running Monte Carlo to simulate N paths that follow the Heston Stochastic Volatility Model
def monte_carlo_heston (N):
     
    var_matrix = np.zeros((N,2500)) # creating emtpy variance matrix with N rows and one column for each time step
    var_matrix[:,0] = v0 # intializing t = 0 to the initial variance
    stock_matrix = np.zeros((N,2500))
    stock_matrix[:,0] = np.log(S0)
 
    # Option prices matrices with strikes K = 90,95,100,105,110
    options_matrix_90 = np.zeros((N,2500))
    options_matrix_95 = np.zeros((N,2500))
    options_matrix_100 = np.zeros((N,2500))
    options_matrix_105 = np.zeros((N,2500))
    options_matrix_110 = np.zeros((N,2500))
예제 #34
0
파일: futils.py 프로젝트: VMargot/gessaman
def predict(
    significant_rules: RuleSet,
    insignificant_rules: RuleSet,
    xs: np.ndarray,
    y_train: np.ndarray,
    nb_jobs: int = 2,
) -> (np.ndarray, np.ndarray):
    max_func = np.vectorize(max)

    if len(significant_rules) > 0:
        # noinspection PyProtectedMember
        significant_union_train = reduce(
            operator.add, [rule._activation for rule in significant_rules]
        ).raw

        significant_act_train = [rule.activation for rule in significant_rules]
        significant_act_train = np.array(significant_act_train)
        significant_act_test = Parallel(n_jobs=nb_jobs, backend="multiprocessing")(
            delayed(eval_activation)(rule, xs) for rule in significant_rules
        )
        significant_act_test = np.array(significant_act_test).T

        significant_no_act_test = np.logical_not(significant_act_test)

        nb_rules_active = significant_act_test.sum(axis=1)
        nb_rules_active[nb_rules_active == 0] = -1  # If no rule is activated

        # Activation of the intersection of all NOT activated rules at each row
        no_activation_union = np.dot(significant_no_act_test, significant_act_train)
        no_activation_union = np.array(no_activation_union, dtype="int")

        # Activation of the intersection of all activated rules at each row
        intersection_activation = np.dot(significant_act_test, significant_act_train)
        intersection_activation = np.array(
            [
                np.equal(act, nb_rules)
                for act, nb_rules in zip(intersection_activation, nb_rules_active)
            ],
            dtype="int",
        )

        # Calculation of the binary vector for cells of the partition et each row
        significant_cells = (intersection_activation - no_activation_union) > 0
        no_prediction_points = (significant_cells.sum(axis=1) == 0) & (
            significant_act_test.sum(axis=1) != 0
        )

    else:
        significant_cells = np.zeros(shape=(xs.shape[0], len(y_train)), dtype="bool")
        significant_union_train = np.zeros(len(y_train))
        no_prediction_points = np.zeros(xs.shape[0])

    if len(insignificant_rules) > 0:
        # Activation of all rules in the learning set
        insignificant_act_train = [rule.activation for rule in insignificant_rules]
        insignificant_act_train = np.array(insignificant_act_train)
        insignificant_act_train -= significant_union_train
        insignificant_act_train = max_func(insignificant_act_train, 0)

        insignificant_act_test = Parallel(n_jobs=nb_jobs, backend="multiprocessing")(
            delayed(eval_activation)(rule, xs) for rule in insignificant_rules
        )
        insignificant_act_test = np.array(insignificant_act_test).T

        insignificant_no_act_test = np.logical_not(insignificant_act_test)

        nb_rules_active = insignificant_act_test.sum(axis=1)
        nb_rules_active[nb_rules_active == 0] = -1  # If no rule is activated

        # Activation of the intersection of all NOT activated rules at each row
        no_activation_union = np.dot(insignificant_no_act_test, insignificant_act_train)
        no_activation_union = np.array(no_activation_union, dtype="int")

        # Activation of the intersection of all activated rules at each row
        intersection_activation = np.dot(
            insignificant_act_test, insignificant_act_train
        )
        intersection_activation = np.array(
            [
                np.equal(act, nb_rules)
                for act, nb_rules in zip(intersection_activation, nb_rules_active)
            ],
            dtype="int",
        )

        # Calculation of the binary vector for cells of the partition et each row
        insignificant_cells = (intersection_activation - no_activation_union) > 0
    else:
        insignificant_cells = np.zeros(shape=(xs.shape[0], len(y_train)), dtype="bool")

    # Calculation of the No-rule prediction.
    no_rule_cell = np.ones(len(y_train)) - significant_union_train
    no_rule_prediction = conditional_mean(no_rule_cell, y_train)

    # Calculation of the conditional expectation in each cell
    cells = insignificant_cells ^ significant_cells
    prediction_vector = Parallel(n_jobs=nb_jobs, backend="multiprocessing")(
        delayed(eval_cell)(act, y_train) for act in cells
    )
    prediction_vector = np.array(prediction_vector)
    prediction_vector[no_prediction_points] = np.nan
    prediction_vector[prediction_vector == 0] = no_rule_prediction

    return np.array(prediction_vector), no_prediction_points
    def initialize_d_r_d_enu(self,
                             isotope,
                             mode="mueller",
                             filename=None,
                             th1_name=None,
                             scale=1.0):
        '''
        Initialize a reactor antineutrino spectrum

        Args:
            isotope: String specifying isotopes ("u235", "u238",
                "pu239", "pu241", or "other")
            mode: String specifying how the methods should be
                initialized. Options:
                  - "mueller" - Use the fit functions from 
                      arXiv:1101.2663v3. Set the spectrum below
                      2 MeV to the value at 2 MeV
                  - "zero" - Return 0 for all energies
                  - "txt" - Read in a spectrum from a text file,
                      with energy (MeV) in the first column and
                      neutrinos/MeV/fission in the second
                  - "root" - Read in a spectrum from a TH1 object
                      in a root file. A th1_name must be given, 
                      the x axis must be MeV, and it must be
                      normalized to the number of neutrinos/fission
        '''
        isotope_map = {
            "u235": 0,
            "u238": 1,
            "pu239": 2,
            "pu241": 3,
            "other": 4
        }
        if not isotope in isotope_map.keys():
            print("Invalid isotope selected in initialize_d_r_d_enu")
            print(
                '\tPlease select "u235", "u238", "pu239", "pu241", or "other"')
            return

        if (mode == "txt" or mode == "root"):
            # Create splines
            self._flux_use_functions[isotope_map[isotope]] = False
            enu, spec = list(), list()
            if (mode == "txt"):
                enu, spec = np.loadtxt(filename, usecols=(0, 1), unpack=True)
            elif (mode == "root"):
                rootfile = ROOT.TFile(filename)
                th1 = rootfile.Get(th1_name)
                nxbins = th1.GetNbinsX()
                xaxis = th1.GetXaxis()
                for ibin in range(1, nxbins + 1):
                    enu.append(xaxis.GetBinCenter(ibin))
                    spec.append(th1.GetBinContent(ibin))
            self._flux_spline_graphs[isotope_map[isotope]] = \
                ROOT.TGraph(len(enu), np.ascontiguousarray(enu),
                            scale*np.ascontiguousarray(spec))

            def spl_eval(enu):
                # Graph has energies in MeV, we want keV
                return 1e-3 * self._flux_spline_graphs[
                    isotope_map[isotope]].Eval(enu * 1e-3)

            spl_eval = np.vectorize(spl_eval)
            self._flux_spline_evals[isotope_map[isotope]] = spl_eval
        elif (mode == "zero"):
            self._flux_use_functions[isotope_map[isotope]] = True

            def flux_zero(enu):
                if (type(enu) == float):
                    enu = np.asarray([enu])
                else:
                    enu = np.asarray(enu)
                return 0.0 * np.array(enu)

            self._flux_functions[isotope_map[isotope]] = flux_zero
        if (mode == "mueller" or self._mueller_partial):
            if (isotope == "u235"):

                def flux_u235(enu):
                    if (type(enu) == float):
                        enu = np.asarray([enu])
                    else:
                        enu = np.asarray(enu)
                    enu_mev = enu / 1.e3
                    return scale * 1e-3 * np.exp(3.217 - 3.111*enu_mev + 1.395*(enu_mev**2.0) - \
                                      (3.690e-1)*(enu_mev**3.0) + (4.445e-2)*(enu_mev**4.0) - (2.053e-3)*(enu_mev**5.0))

                self._flux_functions[0] = flux_u235
            elif (isotope == "u238"):

                def flux_u238(enu):
                    if (type(enu) == float):
                        enu = np.asarray([enu])
                    else:
                        enu = np.asarray(enu)
                    enu_mev = enu / 1.e3
                    return scale * 1e-3 * np.exp((4.833e-1) + (1.927e-1)*enu_mev - (1.283e-1)*enu_mev**2.0 - \
          (6.762e-3)*enu_mev**3.0 + (2.233e-3)*enu_mev**4.0 - (1.536e-4)*enu_mev**5.0)

                self._flux_functions[1] = flux_u238
            elif (isotope == "pu239"):

                def flux_pu239(enu):
                    if (type(enu) == float):
                        enu = np.asarray([enu])
                    else:
                        enu = np.asarray(enu)
                    enu_mev = enu / 1.e3
                    return scale * 1e-3 * np.exp(6.413 - 7.432*enu_mev + 3.535*enu_mev**2.0 - \
          (8.82e-1)*enu_mev**3.0 + (1.025e-1)*enu_mev**4.0 - (4.550e-3)*enu_mev**5.0)

                self._flux_functions[2] = flux_pu239
            elif (isotope == "pu241"):

                def flux_pu241(enu):
                    if (type(enu) == float):
                        enu = np.asarray([enu])
                    else:
                        enu = np.asarray(enu)
                    enu_mev = enu / 1.e3
                    return scale * 1e-3 * np.exp(3.251 - 3.204*enu_mev + 1.428*enu_mev**2.0 - \
          (3.675e-1)*enu_mev**3.0 + (4.254e-2)*enu_mev**4.0 - (1.896e-3)*enu_mev**5.0)

                self._flux_functions[3] = flux_pu241
            elif (isotope == "other"):

                def flux_other(enu):
                    if (type(enu) == float):
                        enu = np.asarray([enu])
                    else:
                        enu = np.asarray(enu)
                    return 0.0 * np.array(enu)

                self._flux_functions[4] = flux_other
torch.manual_seed(args.seed)
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from scipy.optimize import linear_sum_assignment

from torch.nn.utils import weight_norm
device = torch.device('cuda:0')

key = {'Orange':0,'Green':1,'Black':2,'Purple':3,'White':4,'LBlue':5,'Blue':6}

extraction_orders = np.genfromtxt('../data/extraction_order.txt',delimiter=',',dtype=str)

images = np.load('../data/cube_ims.npy')
print (images.shape)

actions = np.vectorize(key.get)(extraction_orders)
print(actions.shape)

K = 7
a_one_hot = np.zeros((actions.shape[0],K,K))
for i,a in enumerate(actions):
    oh = np.zeros((K,K))
    oh[np.arange(a.shape[0]),a] = 1
    a_one_hot[i,:,:] = oh

class Sampler(Dataset):
    
    def __init__(self, ims, actions, K=6):
        
        self.ims = torch.FloatTensor(ims.astype('float'))
        self.actions = torch.FloatTensor(actions.astype('float')).long()
예제 #37
0
def bound(prob):
    return 1 if prob > 0.5 else 0


if __name__ == '__main__':
    print weights
    W, cost_log, yhat = train(X,
                              y,
                              weights,
                              lr=0.5,
                              n_iters=40000,
                              batch_size=n_train)
    print "Weights"
    print W
    yhat = predict(X_test, W)
    print "Cost of test: "
    print calc_cost(y_test, yhat)
    make_prob = np.vectorize(bound)  # Vectorizing the function
    y_pred = make_prob(yhat)  # Bound probs to zero and 1
    compute_accuracy(y_test, y_pred)

    # Plot iteration vs cost plot
    import matplotlib.pyplot as plt
    import seaborn as sns
    plt.plot(range(40000), cost_log)
    plt.title("Iterations vs Cost")
    plt.xlabel("n_iterations")
    plt.ylabel("cost")
    plt.savefig('cost_convergence.png')
    plt.show()
import numpy as np
from gdpc import interface, lookup
from gdpc.toolbox import loop2d
from gdpc.worldLoader import WorldSlice

if __name__ == '__main__':
    # see if a different build area was defined ingame
    x1, _, z1, x2, _, z2 = interface.requestBuildArea()

    # load the world data and extract the heightmap(s)
    slice = WorldSlice(x1, z1, x2, z2)

    heightmap = np.array(slice.heightmaps["OCEAN_FLOOR"], dtype=int)

    # calculate the gradient (steepness)
    decrementor = np.vectorize(lambda a: a - 1)
    cvheightmap = np.clip(decrementor(heightmap), 0, 255).astype(np.uint8)
    gradientX = cv2.Scharr(cvheightmap, cv2.CV_16S, 1, 0)
    gradientY = cv2.Scharr(cvheightmap, cv2.CV_16S, 0, 1)

    # create a dictionary mapping block ids ("minecraft:...") to colors
    palette = lookup.PALETTELOOKUP

    # create a 2d map containing the surface block colors
    topcolor = np.zeros((x2 - x1 + 1, z2 - z1 + 1), dtype='int')
    unknownBlocks = set()

    for x, z in loop2d(x1, z1, x2, z2):
        # check up to 5 blocks below the heightmap
        for dy in range(5):
            # calculate absolute coordinates
예제 #39
0
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter

# Prepare meshgrid
X = np.arange(0, 1, 0.025)
Y = np.arange(0, 2 * math.pi, 0.025)
X, Y = np.meshgrid(X, Y)


# Function to strip off alpha and get optimal distance only
def f(distance, beta):
    return catmouse.maxDiffTimeCatMouse(distance, beta)[1]


# Calculate distance difference. Positive means safe zone
Z = np.vectorize(f)(X, Y)

# Create a 3D surface plot
fig = plt.figure()
ax = fig.gca(projection='3d')

# Plot the surface.
surf = ax.plot_surface(X,
                       Y,
                       Z,
                       cmap=cm.coolwarm,
                       linewidth=0,
                       antialiased=False)

# Customize the z axis.
ax.set_zlim(-7, 7)
예제 #40
0
 def __init__(self, tokenizer):
     self.tokenizer = np.vectorize(
         lambda x: np.array(tokenizer(x), dtype='U'), signature='()->(n)')
예제 #41
0
def f_density2(size, bounds):
    vfun = np.vectorize(__f2__)  # <-- Vectorising the function
    t = np.linspace(bounds[0], bounds[1], size)
    f = vfun(t)
    return f
예제 #42
0
def gen_density2(size, bounds):
    vfun = np.vectorize(__f2__)
    return rs.rejection_sampling(vfun, size, bounds)
예제 #43
0
def deriv_sigmoid(num):
    A = np.vectorize(deriv_sigmoid_help)
    return A(num)
예제 #44
0
def error(y, a):
    A = np.vectorize(err)
    return A(y, a)
예제 #45
0
def padding(tensor):
    maxlen = 48
    return sequence.pad_sequences(tensor, maxlen=maxlen)


def import_model():
    global model
    model = load_model(config['MODEL_FILE'])


def infer(names):
    global model

    if model == None:
        import_model()

    tensor = [name_to_tensor(name.lower()) for name in names]
    X = padding(tensor)
    return model.predict(X)


def change_proba(proba):
    if proba < .5:
        proba = 1 - proba
    return round(proba * 100, 2)


change_proba = np.vectorize(change_proba)

if __name__ == '__main__':
    app.run(debug=True, host='0.0.0.0')
예제 #46
0
def sigmoid(num):
    A = np.vectorize(help_s)
    return A(num)
예제 #47
0
def getratio(df, var1, var2):
    x = np.vectorize(ratio)(df[var1], df[var2])
    return x
예제 #48
0
pop = toolbox.population(n=n_pop)
hof = tools.HallOfFame(
    10)  # only record the best 10 individuals ever found in all generations

# start evolution
start = time.time()
pop, log = multigep.gep_multi(pop,
                              toolbox,
                              n_generations=n_gen,
                              n_elites=3,
                              stats=stats,
                              hall_of_fame=hof,
                              verbose=True)
end = time.time()
print("time spent: {} s".format(round(end - start, 2)))

print('\nSymplified best individual: ')
symplified_best_list = []
result_list = []
for i in range(len(hof)):
    symplified_best = gep.simplify(hof[i], sym_map)
    if symplified_best not in symplified_best_list:
        symplified_best_list.append(symplified_best)
        result = mpse.capture_test(func=np.vectorize(toolbox.compile(hof[i])),
                                   loop=loop)
        print(result, '  ', symplified_best)
        result_list.append(result)

print('\n', len(symplified_best_list), 'different items')
for i in range(len(symplified_best_list)):
    print("\'" + str(symplified_best_list[i]) + "\', #" + str(result_list[i]))
예제 #49
0
    >>> def eigenvalue(f, df, ddf):
    ...     r = ((s**2 - h**2)*(s**2 - k**2)*ddf + s*(2*s**2 - h**2 - k**2)*df - n*(n+1)*s**2*f)/f
    ...     return -r.mean(), r.std()
    >>> s = np.linspace(0.1, 10, 200)
    >>> k, h, n, p = 8.0, 2.2, 3, 2
    >>> E = ellip_harm(h**2, k**2, n, p, s)
    >>> E_spl = UnivariateSpline(s, E)
    >>> a, a_err = eigenvalue(E_spl(s), E_spl(s,1), E_spl(s,2))
    >>> a, a_err
    (583.44366156701483, 6.4580890640310646e-11)

    """
    return _ellip_harm(h2, k2, n, p, s, signm, signn)


_ellip_harm_2_vec = np.vectorize(_ellipsoid, otypes='d')


def ellip_harm_2(h2, k2, n, p, s):
    r"""
    Ellipsoidal harmonic functions F^p_n(l)

    These are also known as Lame functions of the second kind, and are
    solutions to the Lame equation:

    .. math:: (s^2 - h^2)(s^2 - k^2)F''(s) + s(2s^2 - h^2 - k^2)F'(s) + (a - q s^2)F(s) = 0

    where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not
    returned) corresponding to the solutions.

    Parameters
import numpy as np

sign = np.vectorize(lambda x: np.where(x>=0, 1, -1))
# sign = np.sign

class Hopfield:
    def train(self, samples, delete_diagonal=False):
        width = samples.shape[1]
        self.W = np.zeros((width, width))
        for x in samples:
            self.W += np.outer(x, x)

        # Do we get rid of diagonal?
        if delete_diagonal:
            np.fill_diagonal(self.W,0)

        self.W = self.W / len(samples)h

    def predict_sync(self, x, max_iter=200):

        self.past_energy = []
        x_cur = np.copy(x.T)

        for _ in range(max_iter):
            self.past_energy.append(self.energy(x_cur))
            x_next = sign(self.W @ x_cur)
            if np.all(x_next == x_cur):
                break
            x_cur = x_next
        return x_cur.T.astype(int)
예제 #51
0
str_num = 512
col_num = 512
# kdr_num = 16
n_num = 512
all_kdr_num = files_num * kdr_num
all_Gbytes = pix_bytes * str_num * col_num * all_kdr_num / 1024**3


def gen_video(n):
    with open('video/video_v_' + str(n) + '.pickle', 'wb') as file:
        pickle.dump(
            np.random.randint(65536, size=(kdr_num, str_num,
                                           col_num)).astype(np.uint16), file)


gen_video_V = np.vectorize(gen_video, [bool])
iter_files = range(files_num)
"""
with Timer() as t:
    gen_video_V(iter_files)
time = t.secs
# print(res.__sizeof__())
print("gen video", time, time / all_Gbytes)
# input()
# """
Sum_kdr = np.zeros((kdr_num, str_num, col_num), dtype=np.uint32)


def sum_video(n):
    global Sum_kdr
    with open("video/video_v_" + str(n) + ".pickle", 'rb') as file:
예제 #52
0
파일: replace.py 프로젝트: tnir/pandas
def compare_or_regex_search(a: ArrayLike, b: Scalar | Pattern, regex: bool,
                            mask: npt.NDArray[np.bool_]) -> ArrayLike | bool:
    """
    Compare two array-like inputs of the same shape or two scalar values

    Calls operator.eq or re.search, depending on regex argument. If regex is
    True, perform an element-wise regex matching.

    Parameters
    ----------
    a : array-like
    b : scalar or regex pattern
    regex : bool
    mask : np.ndarray[bool]

    Returns
    -------
    mask : array-like of bool
    """
    if isna(b):
        return ~mask

    def _check_comparison_types(result: ArrayLike | bool, a: ArrayLike,
                                b: Scalar | Pattern):
        """
        Raises an error if the two arrays (a,b) cannot be compared.
        Otherwise, returns the comparison result as expected.
        """
        if is_scalar(result) and isinstance(a, np.ndarray):
            type_names = [type(a).__name__, type(b).__name__]

            type_names[0] = f"ndarray(dtype={a.dtype})"

            raise TypeError(
                f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}"
            )

    if not regex or not should_use_regex(regex, b):
        # TODO: should use missing.mask_missing?
        op = lambda x: operator.eq(x, b)
    else:
        op = np.vectorize(lambda x: bool(re.search(b, x)) if isinstance(
            x, str) and isinstance(b, (str, Pattern)) else False)

    # GH#32621 use mask to avoid comparing to NAs
    if isinstance(a, np.ndarray):
        a = a[mask]

    if is_numeric_v_string_like(a, b):
        # GH#29553 avoid deprecation warnings from numpy
        return np.zeros(a.shape, dtype=bool)

    elif is_datetimelike_v_numeric(a, b):
        # GH#29553 avoid deprecation warnings from numpy
        _check_comparison_types(False, a, b)
        return False

    result = op(a)

    if isinstance(result, np.ndarray) and mask is not None:
        # The shape of the mask can differ to that of the result
        # since we may compare only a subset of a's or b's elements
        tmp = np.zeros(mask.shape, dtype=np.bool_)
        np.place(tmp, mask, result)
        result = tmp

    _check_comparison_types(result, a, b)
    return result
    def calculate_mass(self):
        # input array: f_con at zi, deltaMh at zi
        # return an array of M*

        ## kwargs
        kwargs = self.kwargs
        f0 = self.kwargs["f0"]

        z_array = self.z_array
        delta_mh_array = self.delta_mh_array

        # Now M_stellar_interval Mh and M* are just numbers, not arrays.

        # Revert the array

        z_array_inverse = z_array[::-1]
        delta_mh_array_inverse = delta_mh_array[::-1]
        Mh_all_reverse = self.Mh_all[::-1]

        #### Now we need to input Mh and M* for f_con

        # initial values

        # Time start at big z, small

        M_stellar_interval = []

        # intial is from the f_con in Jeremy's  (2)
        Ms_now = Mh_all_reverse[0] * 0.1 * 0.0045

        for i in range(0, len(z_array_inverse)):
            Mh = Mh_all_reverse[i]

            M_stellar_interval_i = self.stellar_mass_per_interval(
                f_con=self.f_con_z(z=z_array_inverse[i], Mh=Mh, Ms=Ms_now),
                delta_Mh=delta_mh_array_inverse[i])

            Ms_now += M_stellar_interval_i

            M_stellar_interval.append(M_stellar_interval_i)

        M_stellar_interval = np.array(M_stellar_interval)
        #### Add f_q

        cal_fq = np.vectorize(self.f_q)

        f_q_array_temp = np.array(cal_fq(z=z_array_inverse))
        # print(z_array)
        # print(f_q_array_temp)
        # print(f_q_array_temp.shape,M_stellar_interval.shape)

        # multiply them (f_q)

        M_stellar_interval = M_stellar_interval * f_q_array_temp

        M_stellar_interval = np.array(M_stellar_interval)

        # calculate

        M_stellar = []

        # Now the m_stellar_interval is inversed!!

        for j in range(0, len(M_stellar_interval)):
            # attention! small t small a big z: You Use Z in your calculation!
            # And now they are inversed!

            M_stellar.append(np.sum(M_stellar_interval[0:j]))

        self.M_stellar = np.array(M_stellar)
        # print(M_stellar_interval)
        # print(M_stellar)
        return M_stellar
예제 #54
0
    def handle_dataset(self, in_arr):

        out_dims = [1, 1, 1]
        rank = len(in_arr.shape)
        num_ones = 3 - rank
        in_dims = [x for x in in_arr.shape] + [1] * num_ones

        if np.iscomplexobj(in_arr):
            in_arr_re = np.real(in_arr)
            in_arr_im = np.imag(in_arr)
        else:
            in_arr_re = in_arr
            in_arr_im = None

        if self.verbose:
            fmt = "Input data is rank {}, size {}x{}x{}."
            print(fmt.format(rank, in_dims[0], in_dims[1], in_dims[2]))

        if self.resolution > 0:
            out_dims[0] = math.floor(self.Rout.c1.norm() * self.resolution + 0.5)
            out_dims[1] = math.floor(self.Rout.c2.norm() * self.resolution + 0.5)
            out_dims[2] = math.floor(self.Rout.c3.norm() * self.resolution + 0.5)
        else:
            for i in range(3):
                out_dims[i] = in_dims[i] * self.multiply_size[i]

        for i in range(rank, 3):
            out_dims[i] = 1

        N = 1
        for i in range(3):
            out_dims[i] = int(max(out_dims[i], 1))
            N *= out_dims[i]

        if self.verbose:
            print("Output data {}x{}x{}".format(out_dims[0], out_dims[1], out_dims[2]))

        out_arr_re = np.zeros(int(N))

        if isinstance(in_arr_im, np.ndarray):
            out_arr_im = np.zeros(int(N))
        else:
            out_arr_im = np.array([])

        flat_in_arr_re = in_arr_re.ravel()
        flat_in_arr_im = in_arr_im.ravel() if isinstance(in_arr_im, np.ndarray) else np.array([])

        if self.kpoint:
            kvector = [self.kpoint.x, self.kpoint.y, self.kpoint.z]
        else:
            kvector = []

        map_data(flat_in_arr_re, flat_in_arr_im, np.array(in_dims, dtype=np.intc),
                 out_arr_re, out_arr_im, np.array(out_dims, dtype=np.intc), self.coord_map,
                 kvector, self.pick_nearest, self.verbose, False)

        if np.iscomplexobj(in_arr):
            # multiply * scaleby for complex data
            complex_out = np.vectorize(complex)(out_arr_re, out_arr_im)
            complex_out *= self.scaleby

            return np.reshape(complex_out, out_dims[:rank])

        return np.reshape(out_arr_re, out_dims[:rank])
예제 #55
0
def run(name, dataset, config, all_users, all_movies, tests, initial_v, sep):
    config_name = config['name']
    number_hidden = config['number_hidden']
    epochs = config['epochs']
    ks = config['ks']
    momentums = config['momentums']
    l_w = config['l_w']
    l_v = config['l_v']
    l_h = config['l_h']
    decay = config['decay']

    config_result = config.copy()
    config_result['results'] = []

    vis = T.matrix()
    vmasks = T.matrix()

    rbm = CFRBM(len(all_users) * 5, number_hidden)

    profiles = defaultdict(list)

    with open(dataset, 'rt') as data:
        for i, line in enumerate(data):
            uid, mid, rat, timstamp = line.strip().split(sep)
            profiles[mid].append((uid, float(rat)))

    print("Users and ratings loaded")

    for j in range(epochs):

        def get_index(col):
            if j / (epochs / len(col)) < len(col):
                return j / (epochs / len(col))
            else:
                return -1

        index = get_index(ks)
        mindex = get_index(momentums)
        icurrent_l_w = get_index(l_w)
        icurrent_l_v = get_index(l_v)
        icurrent_l_h = get_index(l_h)

        k = ks[index]
        momentum = momentums[mindex]
        current_l_w = l_w[icurrent_l_w]
        current_l_v = l_v[icurrent_l_v]
        current_l_h = l_h[icurrent_l_h]

        train = rbm.cdk_fun(vis,
                            vmasks,
                            k=k,
                            w_lr=current_l_w,
                            v_lr=current_l_v,
                            h_lr=current_l_h,
                            decay=decay,
                            momentum=momentum)
        predict = rbm.predict(vis)

        batch_size = 10
        for batch_i, batch in enumerate(
                utils.chunker(profiles.keys(), batch_size)):
            size = min(len(batch), batch_size)

            # create needed binary vectors
            bin_profiles = {}
            masks = {}
            for movieid in batch:
                movie_profile = [0.] * len(all_users)
                mask = [0] * (len(all_users) * 5)

                for user_id, rat in profiles[movieid]:
                    movie_profile[all_users.index(user_id)] = rat
                    for _i in range(5):
                        mask[5 * all_users.index(user_id) + _i] = 1

                example = expand(np.array([movie_profile])).astype('float32')
                bin_profiles[movieid] = example
                masks[movieid] = mask

            movies_batch = [bin_profiles[id] for id in batch]
            masks_batch = [masks[id] for id in batch]
            train_batch = np.array(movies_batch).reshape(
                size,
                len(all_users) * 5)
            train_masks = np.array(masks_batch).reshape(
                size,
                len(all_users) * 5)
            train_masks = train_masks.astype('float32')
            train(train_batch, train_masks)
            sys.stdout.write('.')
            sys.stdout.flush()

        batch_size = 10
        ratings = []
        predictions = []

        for batch in utils.chunker(tests.keys(), batch_size):
            size = min(len(batch), batch_size)

            # create needed binary vectors
            bin_profiles = {}
            masks = {}
            for movieid in batch:
                movie_profile = [0.] * len(all_users)
                mask = [0] * (len(all_users) * 5)

                for userid, rat in profiles[movieid]:
                    movie_profile[all_users.index(userid)] = rat
                    for _i in range(5):
                        mask[5 * all_users.index(userid) + _i] = 1

                example = expand(np.array([movie_profile])).astype('float32')
                bin_profiles[movieid] = example
                masks[movieid] = mask

            positions = {movie_id: pos for pos, movie_id in enumerate(batch)}
            movies_batch = [bin_profiles[el] for el in batch]
            test_batch = np.array(movies_batch).reshape(
                size,
                len(all_users) * 5)
            movie_predictions = revert_expected_value(predict(test_batch))
            for movie_id in batch:
                test_users = tests[movie_id]
                try:
                    for user, rating in test_users:
                        current_movie = movie_predictions[positions[movie_id]]
                        predicted = current_movie[all_users.index(user)]
                        rating = float(rating)
                        ratings.append(rating)
                        predictions.append(predicted)
                except Exception:
                    pass

        vabs = np.vectorize(abs)
        distances = np.array(ratings) - np.array(predictions)

        mae = vabs(distances).mean()
        rmse = sqrt((distances**2).mean())

        iteration_result = {
            'iteration': j,
            'k': k,
            'momentum': momentum,
            'mae': mae,
            'rmse': rmse,
            'lrate': current_l_w
        }

        config_result['results'].append(iteration_result)

        print(iteration_str.format(j, k, current_l_w, momentum, mae, rmse))

        with open('experiments/{}_{}.json'.format(config_name, name),
                  'wt') as res_output:
            res_output.write(json.dumps(config_result, indent=4))
예제 #56
0
 def recall(self, patterns, steps=2):
     sgn = np.vectorize(lambda x: -1 if x < 0 else +1)
     for _ in trange(steps):
         patterns.patterns = sgn(np.dot(patterns.patterns, self.W))
     return patterns
    if x > 0:
        return math.log10(x)
    else:
        return -np.inf


"""

    if x>0:
        return math.log10(x)
    else:
        return float("nan")

"""

log10 = np.vectorize(log10)


def exp(x):
    try:
        return math.exp(x)
    except OverflowError:
        if x > 0:
            return np.inf
        else:
            return 0


"""

    try:
예제 #58
0
    def handle_cvector_dataset(self, in_arr, multiply_bloch_phase):
        in_x_re = np.real(in_arr[:, :, :, 0]).ravel()
        in_x_im = np.imag(in_arr[:, :, :, 0]).ravel()
        in_y_re = np.real(in_arr[:, :, :, 1]).ravel()
        in_y_im = np.imag(in_arr[:, :, :, 1]).ravel()
        in_z_re = np.real(in_arr[:, :, :, 2]).ravel()
        in_z_im = np.imag(in_arr[:, :, :, 2]).ravel()

        d_in = [[in_x_re, in_x_im], [in_y_re, in_y_im], [in_z_re, in_z_im]]
        in_dims = [in_arr.shape[0], in_arr.shape[1], 1]
        rank = 2

        if self.verbose:
            print("Found complex vector dataset...")

        if self.verbose:
            fmt = "Input data is rank {}, size {}x{}x{}."
            print(fmt.format(rank, in_dims[0], in_dims[1], in_dims[2]))

        # rotate vector field according to cart_map
        if self.verbose:
            fmt1 = "Rotating vectors by matrix [ {:.10g}, {:.10g}, {:.10g}"
            fmt2 = "                             {:.10g}, {:.10g}, {:.10g}"
            fmt3 = "                             {:.10g}, {:.10g}, {:.10g} ]"
            print(fmt1.format(self.cart_map.c1.x, self.cart_map.c2.x, self.cart_map.c3.x))
            print(fmt2.format(self.cart_map.c1.y, self.cart_map.c2.y, self.cart_map.c3.y))
            print(fmt3.format(self.cart_map.c1.z, self.cart_map.c2.z, self.cart_map.c3.z))

        N = in_dims[0] * in_dims[1]
        for ri in range(2):
            for i in range(N):
                v = mp.Vector3(d_in[0][ri][i], d_in[1][ri][i], d_in[2][ri][i])
                v = self.cart_map * v
                d_in[0][ri][i] = v.x
                d_in[1][ri][i] = v.y
                d_in[2][ri][i] = v.z

        out_dims = [1, 1, 1]

        if self.resolution > 0:
            out_dims[0] = self.Rout.c1.norm() * self.resolution + 0.5
            out_dims[1] = self.Rout.c2.norm() * self.resolution + 0.5
            out_dims[2] = self.Rout.c3.norm() * self.resolution + 0.5
        else:
            for i in range(3):
                out_dims[i] = in_dims[i] * self.multiply_size[i]

        out_dims[2] = 1

        N = 1
        for i in range(3):
            out_dims[i] = int(max(out_dims[i], 1))
            N *= out_dims[i]

        if self.verbose:
            fmt = "Output data {}x{}x{}."
            print(fmt.format(out_dims[0], out_dims[1], out_dims[2]))

        if self.kpoint:
            kvector = [self.kpoint.x, self.kpoint.y, self.kpoint.z]
        else:
            kvector = []

        converted = []
        for dim in range(3):
            out_arr_re = np.zeros(int(N))
            out_arr_im = np.zeros(int(N))

            map_data(d_in[dim][0].ravel(), d_in[dim][1].ravel(), np.array(in_dims, dtype=np.intc),
                     out_arr_re, out_arr_im, np.array(out_dims, dtype=np.intc), self.coord_map,
                     kvector, self.pick_nearest, self.verbose, multiply_bloch_phase)

            # multiply * scaleby
            complex_out = np.vectorize(complex)(out_arr_re, out_arr_im)
            complex_out *= self.scaleby
            converted.append(complex_out)

        result = np.zeros(np.prod(out_dims) * 3, np.complex128)
        result[0::3] = converted[0]
        result[1::3] = converted[1]
        result[2::3] = converted[2]

        return np.reshape(result, (out_dims[0], out_dims[1], 3))
예제 #59
0
    rois = (
        generate_rois_rotated(roi_counts, im_dims)
        if rotated
        else generate_rois(roi_counts, im_dims)
    )
    box_dim = 5 if rotated else 4
    deltas = np.random.randn(total_rois, box_dim * num_classes).astype(np.float32)
    im_info = np.zeros((batch_size, 3)).astype(np.float32)
    im_info[:, 0] = im_dims
    im_info[:, 1] = im_dims
    im_info[:, 2] = 1.0
    return rois, deltas, im_info


# Eigen/Python round 0.5 away from 0, Numpy rounds to even
round_to_nearest = np.vectorize(round)


def bytes_to_floats(byte_matrix):
    floats = np.empty([np.shape(byte_matrix)[0], 1], dtype=np.float32)
    for i, byte_values in enumerate(byte_matrix):
        floats[i], = struct.unpack('f', bytearray(byte_values))
    return floats


def floats_to_bytes(floats):
    byte_matrix = np.empty([np.shape(floats)[0], 4], dtype=np.uint8)
    for i, value in enumerate(floats):
        assert isinstance(value, np.float32), (value, floats)
        as_bytes = struct.pack('f', value)
        # In Python3 bytes will be a list of int, in Python2 a list of string
예제 #60
0
        return np.cos(l_star * ((width / 2) + (r * np.sin(theta))))


def yEigFun(r, theta, m_star):
    return np.sin(m_star * (L0 - (r * np.cos(theta))))


def zEigFun(zeta, n):
    if n == 0:
        return 1
    else:
        return np.sqrt(2) * np.cos(n * math.pi * zeta)


# Eigen fuctions that can take vector inputs
xfunc = np.vectorize(xEigFun, otypes=[np.float])
yfunc = np.vectorize(yEigFun, otypes=[np.float])
zfunc = np.vectorize(zEigFun, otypes=[np.float])


# Funtion to Calculate the double integral for respective 'l' and 'm' values.
# Optional output: plot of first integral vs. u
def double_integral(ll, mm):
    integral_xy = np.zeros(sampleRate)
    l_star = 2 * math.pi * ll / width
    m_star = (mm + 1 / 2) * math.pi / length
    for i in range(0, sampleRate):
        integral_xy[i] = (8 / (width * length)) * np.trapz(
            np.exp(-2 * np.square(r[i]) / np.square(w0)) *
            xEigFun(r[i], theta, l_star) * yEigFun(r[i], theta, m_star) * r[i],
            x=theta)  # Calculation of the first integral