Exemplo n.º 1
0
def plot_original_signal_from_intrinsic_mode_functions(sample_frequency,
                                                       imfs,
                                                       residue,
                                                       channel,
                                                       plotter=plt):
    '''

    :param sample_frequency:
    :param imfs:
    :param residue:
    :param channel:
    :param plotter:
    :return:
    '''
    n_rows = len(imfs)
    data_length = len(imfs[0])
    final_signal = scipy.zeros(len(imfs[1]))
    time_axis = scipy.linspace(start=0,
                               stop=data_length / sample_frequency,
                               num=data_length)

    for i in range(n_rows):
        final_signal = scipy.add(final_signal, imfs[i + 1])

    final_signal = scipy.add(final_signal, residue)
    f, axis = plotter.subplots(1, 1)
    sup_title = "Channel " + channel
    f.suptitle(sup_title, fontsize=18)
    axis.plot(time_axis, final_signal)
    axis.grid()
Exemplo n.º 2
0
 def __init__(self, fc, c_vel, alp_g, mu_los, mu_nlos, a, b, noise_var, hUAV, xUAV, yUAV, xUE, yUE):
     dist = sp.sqrt( sp.add(sp.square(sp.subtract(yUAV, yUE)), sp.square(sp.subtract(xUAV, xUE))) )
     R_dist = sp.sqrt( sp.add(sp.square(dist), sp.square(hUAV)) )
     temp1 = sp.multiply(10, sp.log10(sp.power(fc*4*sp.pi*R_dist/c_vel, alp_g)))
     temp2 = sp.multiply(sp.subtract(mu_los, mu_nlos), sp.divide(1, (1+a*sp.exp(-b*sp.arctan(hUAV/dist)-a))))
     temp3 = sp.add(sp.add(temp1, temp2), mu_nlos)
     self.pathloss = sp.divide(sp.real(sp.power(10, -sp.divide(temp3, 10))), noise_var)
Exemplo n.º 3
0
 def __init__(self, id, coverage_r, origin_x=0, origin_y=0, low_tx=0.5):
     tx_d = sp.add(sp.multiply(sp.subtract(coverage_r, 200), sp.random.uniform(low=low_tx)), 50)
     tx_angle = sp.multiply(2, sp.multiply(sp.pi, sp.random.rand(1, 1)))
     self.tx_x = sp.add(origin_x, sp.multiply(tx_d, sp.cos(tx_angle)))
     self.tx_y = sp.add(origin_y, sp.multiply(tx_d, sp.sin(tx_angle)))
     plt.scatter(self.tx_x, self.tx_y, s=20, c='blue', marker='o')
     plt.annotate(id, (self.tx_x + 10, self.tx_y + 10))
 def __init__(self, id, coverage_r, origin_x=0, origin_y=0, low_tx=0.1):
     tx_d = sp.multiply(coverage_r, sp.random.uniform(low=low_tx))
     tx_angle = sp.multiply(0.5, sp.multiply(sp.pi, (sp.random.rand())))
     self.tx_x = sp.add(origin_x, sp.multiply(tx_d, sp.sin(tx_angle)))
     self.tx_y = sp.add(origin_y, sp.multiply(tx_d, sp.cos(tx_angle)))
     plt.scatter(self.tx_x, self.tx_y, s=20, c='red')
     plt.annotate(id, (self.tx_x + 10, self.tx_y + 10))
Exemplo n.º 5
0
 def __init__(self, id, coverage_r, origin_x=0, origin_y=0, low_tx=0.2):
     # uav_d = sp.add(sp.multiply(sp.subtract(coverage_r, 100), sp.random.rand(1, 1)), 50)
     uav_d = sp.add(sp.multiply(sp.subtract(coverage_r, 100), sp.random.uniform(low=low_tx)), 50)
     uav_angle = sp.multiply(2, sp.multiply(sp.pi, sp.random.rand(1, 1)))
     self.uav_x = sp.add(origin_x, sp.multiply(uav_d, sp.cos(uav_angle)))
     self.uav_y = sp.add(origin_y, sp.multiply(uav_d, sp.sin(uav_angle)))
     plt.scatter(self.uav_x, self.uav_y, s=40, c='red', marker='D')
     plt.annotate(id, (self.uav_x + 10, self.uav_y + 10))
Exemplo n.º 6
0
	def european_option_rho(self):
		"Price of the call option"
		"the vectorized method can compute price of multiple options in array"
		numerator = sp.add(
			sp.log(
				sp.divide(
					self.spot_price,
					self.strike_price,
				)
			),
			sp.multiply(
				(
					self.interest_rate - self.dividend_yield +
					0.5*sp.power(self.sigma,2)
				),
				self.time_to_maturity)
		)
		d1 = sp.divide(
			numerator,
			sp.prod(
				[
					self.sigma,
					sp.sqrt(self.time_to_maturity)
				],
				axis=0,
			)
		)
		d2 = sp.add(
			d1,
			-sp.multiply(
				self.sigma,
				sp.sqrt(self.time_to_maturity)
			)
		)


		j = sp.product(
			[
				self.spot_price,
				self.time_to_maturity,
				sp.exp(
					sp.multiply(
						-self.interest_rate,
						self.time_to_maturity
					)
				),
			],
			axis=0
		)

		c_rho = j * self.bls_erf_value(d2)
		p_rho = -j * self.bls_erf_value(-d2)

		return c_rho, p_rho
def oht_alg(d2d_to_d2d_gains_diag, uav_to_d2d_gains, d2d_to_d2d_gains_diff, eta, power_UAV, power_cir_UAV):
    theta_ini = Parameter(value=1 / 0.5)
    iter = 0
    epsilon = 1
    theta_sol = 0
    iter_phi = []
    while epsilon >= 1e-2 and iter <= 20:
        iter += 1
        if iter == 1:
            theta_ref = theta_ini.value
        else:
            theta_ref = theta_sol

        term_x = sp.divide(1,
                           sp.multiply(sp.subtract(theta_ref, 1), sp.matmul(d2d_to_d2d_gains_diag, uav_to_d2d_gains)))
        term_y = sp.add(
            sp.multiply(sp.subtract(theta_ref, 1), sp.matmul(sp.transpose(d2d_to_d2d_gains_diff), uav_to_d2d_gains)),
            sp.divide(1, eta * power_UAV))

        a_1 = sp.add(sp.divide(sp.multiply(2, sp.log(sp.add(1, sp.divide(1, sp.multiply(term_x, term_y))))), theta_ref),
                     sp.divide(2, sp.multiply(theta_ref, sp.add(sp.multiply(term_x, term_y), 1))))
        b_1 = sp.divide(1, sp.multiply(theta_ref, sp.multiply(term_x, sp.add(sp.multiply(term_x, term_y), 1))))
        c_1 = sp.divide(1, sp.multiply(theta_ref, sp.multiply(term_y, sp.add(sp.multiply(term_x, term_y), 1))))
        d_1 = sp.divide(sp.log(sp.add(1, sp.divide(1, sp.multiply(term_x, term_y)))), sp.square(theta_ref))

        theta = NonNegative(1)
        t_max = NonNegative(1)
        obj_opt = Maximize(t_max)

        constraints = [theta >= 1]
        constraints.append(
            t_max <= a_1 - sp.divide(b_1, sp.matmul(d2d_to_d2d_gains_diag, uav_to_d2d_gains)) * inv_pos(theta - 1)
            - mul_elemwise(c_1,
                           sp.matmul(sp.transpose(d2d_to_d2d_gains_diff), uav_to_d2d_gains) * (theta - 1)
                           + sp.divide(1, eta * power_UAV))
            - d_1 * theta)

        t1 = time.time()

        prob = Problem(obj_opt, constraints)
        prob.solve(solver=ECOS_BB)
        theta_sol = theta.value
        phi_n_sol = sp.multiply((theta_sol - 1) * eta * power_UAV, uav_to_d2d_gains)
        x_rate = sp.matmul(d2d_to_d2d_gains_diag, phi_n_sol)
        term_rate = sp.matmul(sp.transpose(d2d_to_d2d_gains_diff), phi_n_sol) + 1
        rate_sol_ue = sp.divide(sp.log(sp.add(1, sp.divide(x_rate, term_rate))), theta_sol)
        iter_maximin_rate = min(rate_sol_ue)
        term_pow_iter = sp.subtract(1, sp.divide(1, theta_sol)) * eta * power_UAV * sp.add(1, sp.sum(
            uav_to_d2d_gains)) + power_cir_UAV
        iter_phi.append(t_max.value)
        if iter >= 2:
            epsilon = sp.divide(sp.absolute(sp.subtract(iter_phi[iter - 1], iter_phi[iter - 2])),
                                sp.absolute(iter_phi[iter - 2]))
        iter_EE = sp.divide(sp.multiply(1e3, sp.divide(sp.sum(rate_sol_ue), term_pow_iter)), sp.log(2))

    return iter_EE, theta_sol, iter_maximin_rate
Exemplo n.º 8
0
def plot_original_signal_from_intrinsic_mode_functions(sample_frequency, imfs, residue, channel, plotter=plt):
    n_rows = len(imfs)
    data_length = len(imfs[0])
    final_signal = scipy.zeros(len(imfs[1]))
    time_axis = scipy.linspace(start=0, stop=data_length / sample_frequency, num=data_length)

    for i in range(n_rows):
        final_signal = scipy.add(final_signal, imfs[i + 1])

    final_signal = scipy.add(final_signal, residue)
    f, axis = plotter.subplots(1, 1)
    sup_title = "Channel " + channel
    f.suptitle(sup_title, fontsize=18)
    axis.plot(time_axis, final_signal)
    axis.grid()
Exemplo n.º 9
0
def reflect1(v, u, c):
    print("Reflect by vector math variant 1:")
    c = 0
    center_ = eT(center(len(v)))
    print("center_:", center_)
    print("v:", v)
    v = scipy.subtract(v, center_)
    print("v:", v)
    print("u:", u)
    print("c:", c)
    v_dot_u = scipy.dot(v, u)
    print("v_dot_u:", v_dot_u)
    v_dot_u_minus_c = scipy.subtract(v_dot_u, c)
    print("v_dot_u_minus_c:", v_dot_u_minus_c)
    u_dot_u = scipy.dot(u, u)
    print("u_dot_u:", u_dot_u)
    quotient = scipy.divide(v_dot_u_minus_c, u_dot_u)
    print("quotient:", quotient)
    subtrahend = scipy.multiply((2 * quotient), u)
    print("subtrahend:", subtrahend)
    reflection = scipy.subtract(v, subtrahend)
    print("reflection:", reflection)
    reflection = scipy.add(reflection, center_)
    print("reflection:", reflection)
    return reflection
Exemplo n.º 10
0
	def european_option_delta(self):
		numerator = sp.add(
			sp.log(
				sp.divide(
					self.spot_price,
					self.strike_price
				)
			),
			sp.multiply(
				( self.interest_rate - self.dividend_yield + 0.5*sp.power(self.sigma,2)),
				self.time_to_maturity
			)
		)
		d1 = sp.divide(
			numerator,
			sp.prod(
				[
					self.sigma,
					sp.sqrt(self.time_to_maturity)
				],
				axis=0,
			)
		)
		call_delta = self.bls_erf_value(d1)
		put_delta = call_delta - 1 
		
		return call_delta, put_delta
    def __init__(self,
                 id,
                 coverage_r,
                 max_dist,
                 origin_x=0,
                 origin_y=0,
                 low_tx=0.2,
                 low_rx=0.5):
        # def __init__(self, id, coverage_r, max_dist, origin_x=0, origin_y=0, low_tx=0.3, low_rx=0.2):
        tx_d = sp.multiply(coverage_r, sp.random.uniform(low=low_tx))
        tx_angle = sp.multiply(
            2, sp.multiply(sp.pi, sp.subtract(sp.random.rand(), 1)))
        self.tx_x = sp.add(origin_x, sp.multiply(tx_d, sp.sin(tx_angle)))
        self.tx_y = sp.add(origin_y, sp.multiply(tx_d, sp.cos(tx_angle)))
        plt.scatter(self.tx_x, self.tx_y, s=20, c='red')
        plt.annotate(id, (self.tx_x + 10, self.tx_y + 10))


# plt.show()
Exemplo n.º 12
0
 def  kValues(self,tn,yn,h):
     #Initialise an empty vector k of the same length as b and init tnew
     A = self.A
     b = self.b
     c = self.c
     k = [ [0. * i *j for j in range(len(yn))]  for i in range(len(b))]
     tnew = 0
     ynew = scipy.zeros(len(yn))
     lincombinatie = ynew
     for i in range(len(b)):
         tnew = tn + c[i]*h
         ynew = scipy.zeros(len(yn))
         lincombinatie = scipy.zeros(len(yn))
         for j in range(i):
             prod = scipy.multiply(A[i][j]*h,k[j])
             lincombinatie = scipy.add(lincombinatie,prod)
         ynew = scipy.add(yn,lincombinatie)
         k[i] = scipy.multiply(1,self.ode.f(tnew,ynew))
         #k[i] = scipy.multiply(h,self.ode.f(tnew,ynew))
     return k
Exemplo n.º 13
0
    def __init__(self,
                 id,
                 coverage_r,
                 max_dist,
                 origin_x=0,
                 origin_y=0,
                 low_tx=0.2,
                 low_rx=0.5):
        # def __init__(self, id, coverage_r, max_dist, origin_x=0, origin_y=0, low_tx=0.3, low_rx=0.2):
        tx_d = sp.multiply(coverage_r, sp.random.uniform(low=low_tx))
        tx_angle = sp.multiply(
            2, sp.multiply(sp.pi, sp.subtract(sp.random.rand(), 1)))
        self.tx_x = sp.add(origin_x, sp.multiply(tx_d, sp.sin(tx_angle)))
        self.tx_y = sp.add(origin_y, sp.multiply(tx_d, sp.cos(tx_angle)))

        d2d_d = sp.multiply(max_dist, sp.random.uniform(low=low_rx))
        d2d_angle = sp.multiply(
            2, sp.multiply(sp.pi, sp.subtract(sp.random.rand(), 1)))
        self.rx_x = sp.add(self.tx_x, sp.multiply(d2d_d, sp.sin(d2d_angle)))
        self.rx_y = sp.add(self.tx_y, sp.multiply(d2d_d, sp.cos(d2d_angle)))
    def loss_to_pair(self, pair, atg_a, atg_b, pl_exp=4, gamma=1e2):
        dist = sp.sqrt(
            sp.add(sp.square(pair.tx_x),
                   sp.add(sp.square(pair.tx_y), sp.square(self.h))))
        phi = sp.multiply(sp.divide(180, sp.pi),
                          sp.arcsin(sp.divide(self.h, dist)))
        pr_LOS = sp.divide(
            1,
            sp.add(
                1,
                sp.multiply(
                    atg_a, sp.exp(sp.multiply(-atg_b, sp.subtract(phi,
                                                                  atg_a))))))
        pr_NLOS = sp.subtract(1, pr_LOS)

        total_loss = sp.add(
            sp.multiply(pr_LOS, sp.power(dist, -pl_exp)),
            sp.multiply(sp.multiply(pr_NLOS, gamma), sp.power(dist, -pl_exp)))

        return total_loss
    def loss_to_pair(self,
                     pair,
                     gain=1e-3,
                     exp_factor=sp.random.exponential(1),
                     pl_exp=3):
        dist = sp.sqrt(
            sp.add(sp.square(sp.subtract(self.tx_x, pair.rx_x)),
                   sp.square(sp.subtract(self.tx_y, pair.rx_y))))
        loss = sp.multiply(
            gain, sp.multiply(sp.square(exp_factor), sp.power(dist, -pl_exp)))

        return loss
Exemplo n.º 16
0
 def step(self,tn,yn,h):
     """
     takes a single time step using the runge-kutta method
         y_(n+1) = y_n + sum(b_i*k_i)
     Input:
         tn -- current time 
         yn -- state at time tn
         h - size of time step
     Output:
         y -- state at time t0+h
     """
     k = self.kValues(tn,yn,h)
     lincombinatie = scipy.zeros(len(yn))
     for i in range(len(self.b)):
         lincombinatie = scipy.add(scipy.multiply(k[i],self.b[i]*h), lincombinatie)
     return yn + lincombinatie
Exemplo n.º 17
0
	def european_option_vega(self):
		numerator = sp.add(
			sp.log(
				sp.divide(
					self.spot_price,
					self.strike_price
				)
			),
			sp.multiply(
				( self.interest_rate - self.dividend_yield + 0.5*sp.power(self.sigma,2)),
				self.time_to_maturity
			)
		)
		d1 = sp.divide(
			numerator,
			sp.prod(
				[
					self.sigma,
					sp.sqrt(self.time_to_maturity)
				],
				axis=0,
			)
		)
		
		val = sp.multiply(
			sp.multiply(
				self.spot_price,
				sp.exp(
					-sp.multiply(
						self.dividend_yield,
						self.time_to_maturity
					)
				)
			),
			sp.exp(-sp.square(d1)*0.5)
		)
		val = sp.multiply(
			val,
			sp.sqrt(self.time_to_maturity)
		)
		vega = (1/sqrt(2*pi))*val
		
		return vega
Exemplo n.º 18
0
def reflect_by_householder(v, u, c):
    print("Reflect by Householder:")
    print("chord:", v)
    print("Unit normal vector:", u)
    center_ = center(len(v))
    tensor_ = scipy.outer(u, u)
    print("tensor_:\n", tensor_)
    product_ = scipy.multiply(tensor_, 2)
    print("product_:", product_)
    identity_ = scipy.eye(len(v))
    print("identity_:\n", identity_)
    householder = scipy.subtract(identity_, product_)
    print("householder:\n", householder)
    reflected_voices = scipy.matmul(householder, v)
    print("reflected_voices:", reflected_voices)
    translated_voices = scipy.subtract(v, center_)
    print("moved to origin:", translated_voices)
    reflected_translated_voices = scipy.matmul(householder, translated_voices)
    print("reflected_translated_voices:", reflected_translated_voices)
    reflection = scipy.add(reflected_translated_voices, center_)
    print("moved from origin:", reflection)
    print("reflection by householder:", reflection)
    return reflection
Exemplo n.º 19
0
def RMSLE(act, pred):
    loss = sum((sp.log(sp.add(pred, 1)) - sp.log(sp.add(act, 1))) ** 2)
    loss = sp.sqrt(loss * 1.0 / len(pred))
    return loss
Exemplo n.º 20
0
            while epsilon >= 1e-2 and iter <= 20:
                iter += 1
                if iter == 1:
                    theta_ref = theta_ini.value
                else:
                    theta_ref = theta_sol

                term_x = sp.divide(
                    1,
                    sp.multiply(
                        sp.subtract(theta_ref, 1),
                        sp.matmul(d2d_to_d2d_gains_diag, uav_to_d2d_gains)))
                term_y = sp.add(
                    sp.multiply(
                        sp.subtract(theta_ref, 1),
                        sp.matmul(sp.transpose(d2d_to_d2d_gains_diff),
                                  uav_to_d2d_gains)),
                    sp.divide(1, eta * power_UAV))

                a_1 = sp.add(
                    sp.divide(
                        sp.multiply(
                            2,
                            sp.log(
                                sp.add(
                                    1, sp.divide(1,
                                                 sp.multiply(term_x,
                                                             term_y))))),
                        theta_ref),
                    sp.divide(
                        2,
Exemplo n.º 21
0
    def __init__(self, path, datatype, container=0, sizeTD1=0, process = False,  lb = 0, phase = 0, ls = 0, zf = 0, ft_only = [], debug = False, hiper_skip_footer = 0, hiper_skip_header = 3, endianess = "<", maxLoad = 0):
        """ This reads the data """
        #plt.close()
        if datatype == '':
            print("No Datatype - Setting it to ntnmr")
            datatype = "ntnmr"

        self.carrier = 0
        self.allFid = []
        self.allFid.append([])
        self.sizeTD1 = sizeTD1
        self.title = ['no title']
        self.vdList = []
        self.files = [] #here we store open files so we can close them later

        self.parDictionary = {}

        self.debug = debug

        if self.debug: print("The datatype is {0}".format(datatype))

        if datatype == "Hiper":
            """Hiper EPR Data import for EPR experiments at St Andrews, UK.

            Note you will have to adjust the skip_footer parameter,
            depending on the length of the pulse programme that is appended to the data file."""

            data = np.genfromtxt(path, skip_header = hiper_skip_header, skip_footer = hiper_skip_footer, delimiter = ",")
            self.sizeTD1 = 1

            timeList = data[:, 0]
            iData = data[:, 1]
            qData = data[:, 2]
            self.sizeTD2 = len(qData)
            if self.debug: print("sizeTD2: ", self.sizeTD2)

            dwellTime = (timeList[1] - timeList[0])*1e-9
            self.sweepWidthTD2 = int(1. /dwellTime)
            if self.debug: print("SweepWidthTD2: ", self.sweepWidthTD2)

            self.allFid[0].append(iData + 1j*qData)
            self.fidTime = np.linspace(0, (self.sizeTD2-1)*dwellTime, num = self.sizeTD2)


        if datatype == "Magritek":

            #get sweep WidthTD2 based on dweelTime in acqu.par
            if os.path.isfile(path + "/acqu.par"):

                f_acqu = open(path + "/acqu.par", "r")

                count = 0
                while True:
                    count += 1
                    line = f_acqu.readline().strip()
                    if "=" in line:
                        line = line.split("=")
                    elif len(line) == 0 or count > 1000:
                        if self.debug: print("Ended dreading acqus file at line ", count)
                        break
                    else:
                        next

                    if len(line[0]) > 1:
                        self.parDictionary[line[0].strip()] = line[1].strip()


                self.sweepWidthTD2 = int(1./(float(self.parDictionary["dwellTime"])*1e-6))
                if self.debug: print("SweepWidthTD2: ", self.sweepWidthTD2)


            if os.path.isfile(path + "/data.1d"):
                f = open(path + "/data.1d", "rb")

                f.seek(12)
                print("Format: ", struct.unpack('<i', f.read(4))[0])

                #get this information out of the acqu file.
                f.seek(16)
                self.sizeTD2 = struct.unpack('<i', f.read(4))[0]

                print("Size TD2: ", self.sizeTD2)

                f.seek(32)
                dwellTime = 1./self.sweepWidthTD2
                self.fidTime = np.linspace(0, (self.sizeTD2-1)*dwellTime, num = self.sizeTD2)

                #t just contains floats with the time
                t = struct.unpack('<' + 'f'*self.sizeTD2, f.read(4*self.sizeTD2))

                data1 = struct.unpack('<' + 'f'*self.sizeTD2*2, f.read(4*self.sizeTD2*2))

                realPart = np.array(data1[::2])
                imagPart = np.array(data1[1::2])

                self.allFid[0].append(realPart + 1j*imagPart)

                self.frequency = np.linspace(-self.sweepWidthTD2/2,self.sweepWidthTD2/2, 2048)

            elif os.path.isfile(path + "/data.2d"):
                f = open(path + "/data.2d", "rb")

                f.seek(12)
                print("Format: ", struct.unpack('<i', f.read(4))[0])

                #get this information out of the acqu file.
                f.seek(16)
                self.sizeTD2 = struct.unpack('<i', f.read(4))[0]
                self.sizeTD1 = struct.unpack('<i', f.read(4))[0]

                print("Size TD2: ", self.sizeTD2)
                print("Size TD1: ", self.sizeTD1)

                f.seek(32)
                dwellTime = 1./self.sweepWidthTD2

                self.data1 = struct.unpack('<' + 'f'*(self.sizeTD2*self.sizeTD1*2), f.read(4*self.sizeTD2*self.sizeTD1*2))

                self.realStuff = np.array(self.data1[::2])
                self.imagStuff = np.array(self.data1[1::2])

                self.complexData = self.realStuff + 1j*self.imagStuff

                self.allFid[0] = np.split(self.complexData, [self.sizeTD2*(i+1) for i in range(self.sizeTD1-1)])

                self.fidTime = np.linspace(0, (self.sizeTD2-1)*dwellTime, num = self.sizeTD2)

            else:
                print("No 1D file found.")

        if datatype == "varian":

            # read the header file
            if os.path.isfile(path + "/procpar"):
                parFile = open(path + "/procpar", 'r')
                rows = parFile.readlines()
                lineCounter = 0;
                for line in rows:
                    if line.find("np ") > -1:
                        nextLine = rows[lineCounter+1]
                        params = nextLine.split(" ")
                        totalComplexPoints = int(params[1]) #np =is R+I
                        self.sizeTD2 = totalComplexPoints/2
                    elif line.find("acqcycles") > -1:
                        nextLine = rows[lineCounter+1]
                        params = nextLine.split(" ")
                        self.sizeTD1 = int(params[1])
                        if self.sizeTD1 > 1:
                            self.is2D = True
                        else:
                            self.is2D = False
                    lineCounter = lineCounter+1
            else:
                print("No procpar file found.")

            # read the binary data file
            if os.path.isfile(path + "/fid"):
                specpoints = self.sizeTD2
                headerskip_init = 8
                headerskip = 7
                f = open(path + "/fid", 'rb');
                data_array = np.fromfile(f, '>f', -1)
                if not self.is2D:
                    self.allFid[0] = data_array[(headerskip_init + headerskip)::2] + 1j*data_array[(headerskip_init + headerskip + 1)::2]
                else:
                    Nacq = (len(data_array) - headerskip_init) / (2 * specpoints + headerskip)
                    if Nacq != self.sizeTD1:
                        print("warning: inconsistent sizes")
                    for n in range(0, Nacq):
                        skipn = headerskip_init + (n+1)*headerskip + n*2*specpoints;
                        realPart = data_array[skipn+0:skipn+2*specpoints:2]
                        imagPart = 1j*data_array[skipn+1:skipn+2*specpoints:2]
                        self.allFid[0].append(sp.add(realPart, imagPart))
            else:
                print("No fid file found.")

        if datatype == 'TopSpinOld':
            self.f = open(path, mode='rb')
            self.sizeTD2=1
            self.sizeTD1=(int((os.stat(path)).st_size))/8
            self.data = struct.unpack('>' + 'i'*(self.sizeTD2*2*self.sizeTD1), self.f.read(self.sizeTD2*2*self.sizeTD1*4))
            for i in range(0,  self.sizeTD1):
                #print i
                realPart = self.data[i*self.sizeTD2*2:(i+1)*self.sizeTD2*2:2]
                imagPart = sp.multiply(self.data[i*self.sizeTD2*2+1:(i+1)*self.sizeTD2*2+1:2], 1j)
                self.allFid[0].append(sp.add(realPart, imagPart))

        if datatype == 'TopSpin':
            if self.debug:
                print("hi, this is self.debug for the TopSpin datatype")
            #The acqus file containts the spectral width SW_h and 2*SizeTD2 as ##$TD
            #The acqu2s file contains TD1 as ##$TD
            directory = os.path.dirname(path)
            acqusFile = open(directory + "/acqus", mode='r')
            self.files.append(acqusFile)

            if self.debug:
                print("Importing TopSpin data")

            #check if acqu2sfile exists, if yes, experiment is 2D!
            if os.path.isfile(directory + "/acqu2s"):
                acqu2sFile = open(directory + "/acqu2s", mode='r')
                self.files.append(acqu2sFile)
                acqu2File = open(directory + "/acqu2", mode='r')
                self.files.append(acqu2File)

                self.is2D = True
            else:
                self.is2D = False
                self.sizeTD1 = 1

            if self.debug:
                print("2D: ", self.is2D)

            #this could be crafted into a common routine which gives names of parameters
            #parameters and works the same for e.g., spinsight and topspin
            if self.debug:
                print("reading acqus file")
            count = 0
            while True:
                if self.debug:
                    print("count = ", count)
                #try:
                count += 1
                line = acqusFile.readline().strip()
                if self.debug:
                    print(line)
                if "=" in line:
                    line = line.split("=")
                elif len(line) > 0:
                    line = line.split(" ")
                elif len(line) == 0 or count > 1000:
                    if self.debug: print("Ended reading acqus file at line ", count)
                    break
                else:
                    next

                    #print line[0]
                if line[0] == "##$SW_h":
                    #this line might be chopping the last digit off....
                    #self.sweepWidthTD2 = int(float(line[1][:-1]))
                    self.sweepWidthTD2 = int(float(line[1]))
                    if self.debug: print("SweepWidthTD2: ", self.sweepWidthTD2)
                elif line[0] == "##$TD":
                    self.sizeTD2 = int(int(line[1])/2)
                    if self.debug: print("sizeTD2: ", self.sizeTD2)
                elif line[0] == "##$SFO1":
                    self.carrier = float(line[1])*1e6
                    if self.debug: print("SFO1:", self.carrier)

                elif len(line) == 0:
                    break


                if len(line[0]) > 1:
                    if "@" in line[-1]:
                        #this line contains date, time, some unknown stuff and user, does not work with all bruker files, hence try only"
                        try:
                            self.parDictionary["date"] = line[1].strip()
                            self.parDictionary["time"] = line[2].strip()
                        except:
                            pass
                    elif line[0] == "##$D":
                        delays1 = acqusFile.readline().strip()
                        delays2 = acqusFile.readline().strip()
                        self.parDictionary["d"] = [float(d) for d in delays1.strip().split(" ")] + [float(d) for d in delays2.strip().split(" ")]

                    elif line[0] == "##$L":
                        loopCounters = acqusFile.readline().strip()
                        self.parDictionary["l"] = [float(l) for l in loopCounters.strip().split(" ")]

                    else:
                        if self.debug:
                            print("the catch all else")
                        if len(line) > 1:
                            self.parDictionary[line[0][2:].strip()] = line[1].strip()
                        else:
                            if self.debug:
                                print("skipped too short line")


            if self.is2D == True:
                if self.debug:
                    print("reading acqu2s file")
                count = 0
                while True:
                    if self.debug:
                        print("count = ", count)
                    #try:
                    count += 1
                    line = acqu2sFile.readline().strip()
                    if "=" in line:
                        line = line.split("=")
                    elif len(line) == 0 or count > 1000:
                        if self.debug: print("Ended reading acqu2s file at line ", count)
                        break
                    else:
                        next

                    #print line[0]
                    if line[0] == "##$TD" and self.sizeTD1 == 0:
                        self.sizeTD1 = int(line[1])
                        if self.debug: print("sizeTD1: ", self.sizeTD1)
                    elif len(line) == 0:
                        break

                if os.path.isfile(directory + "/vdlist"):
                    if self.debug: print("VD File exists!")
                    self.vdList = np.loadtxt(directory + "/vdlist")

            if self.debug:
                print("TD2: ", self.sizeTD2)
                print("TD1: ", self.sizeTD1)
                print("Carrier:", self.carrier)

            # if self.is2D:
            #     self.f = open(path + "/ser", mode='rb')
            # else:
            #     self.f = open(path + "fid", mode='rb')#
            #           dataString = self.f.read(self.sizeTD2*2*self.sizeTD1*4)
            #          if self.debug: print "len(dataString): ", len(dataString)
            #         self.f.close()
            #
            #           # here we read the FID data from fid/ser file
            #          # first convert the datasting into a list of numbers:
            # if self.debug: print "Endianess: ", endianess
            #     self.data = struct.unpack(endianess + 'i'*(self.sizeTD2*2*self.sizeTD1), dataString)


            if self.is2D:
                self.f = open(path + "/ser", mode='rb')
            else:
                self.f = open(path + "fid", mode='rb')

            self.files.append(self.f)

            dataString = np.frombuffer(self.f.read(), dtype = endianess + "i4")
            if self.debug: print("len(dataString) new: ", len(dataString))

            self.data = dataString
            #this is not how it should be done.
            if self.sizeTD2 == 0:
                self.sizeTD2 = int(len(self.data) / self.sizeTD1 / 2)

            dwellTime = 1./self.sweepWidthTD2
            self.fidTime = np.linspace(0, (self.sizeTD2-1)*dwellTime, num = self.sizeTD2)

            # here we create one array of complex numbers for each of the FIDs
            # i runs overa all fids in a ser file, in case of a fid file i = 0
            # TD1 is number of FIDs, TD2 is number of datapoints in each FID
            if maxLoad > 0:
               self.sizeTD1 = maxLoad

            for i in range(0,  self.sizeTD1):
                #print "sizeTD2: ", self.sizeTD2
                #print i
                realPart = self.data[i*self.sizeTD2*2:(i+1)*self.sizeTD2*2:2]
                imagPart = sp.multiply(self.data[i*self.sizeTD2*2+1:(i+1)*self.sizeTD2*2+1:2], 1j)
                self.allFid[0].append(sp.add(realPart, imagPart))

            # here we read the experiment title (only the one stored in pdata/1):
            # could be made to read titles from all pdata folders (if needed)
            try:
                pathToTitle = directory + '/pdata/1/title'
                titleFile = open(pathToTitle, mode='r')
                self.files.append(titleFile)
                title = list(titleFile)
                self.title = [line.strip() for line in title]
            except:
                if self.debug:
                    print("No title file.")
                else:
                    pass
            #close the files we opened:
            for item in self.files:
                item.close()

            # delete all file handles so that nmrdata objects can be pickled.
            self.files = []
            del self.f

        if datatype == 'spinsight':
            chStr = "" # first read the channel, if succesful read the carrier frequency.
            carrierStr = "carrier string not assigned"
            self.is2D = False
            dataFile = path + "/data"
            print("dataFile is: ", dataFile)
            self.f = open(dataFile, mode='rb')
            self.f.seek(0)

            acqFile = path + "/acq"
            self.fACQ = open(acqFile)
            count = 0
            while True:
                count += 1
                try:
                    line = self.fACQ.readline().strip().split("=")
                    print(line)
                    if line[0] == "ch1":
                        chStr = line[1]
                        carrierStr = "sf" + chStr
                    elif line[0] == carrierStr:
                        self.carrier = float(line[1][:-1])*1e6
                    if line[0] == "dw":
                        print("Hellop: ", line[1])
                        self.sweepWidthTD2 = int(1/float(line[1][:-1]))
                        #print "sweepWidthTD2: ", self.sweepWidthTD2
                    elif line[0] == "array_num_values_pd" or line[0] == "array_num_values_tau" or line[0] == "array_num_values_pw90X" or line[0] == "array_num_values_tau1":
                        self.sizeTD1 = int(line[1])
                        self.is2D = True
                    elif len(line) < 2 and count > 10:
                        break

                except:
                    break

            #check if file named apnd exists
            if self.is2D == True:
                self.sizeTD2 = int((os.stat(dataFile)).st_size)/8/self.sizeTD1
            else:
                self.sizeTD2 = (int((os.stat(dataFile)).st_size))/8
                print("sizeTD2 is: ", self.sizeTD2)
                self.sizeTD1 = 1
                print("sizeTD1 is: ", self.sizeTD1)

            #print "Length 1: ", self.sizeTD1*self.sizeTD2*2
            #print "Length 2: ", len(self.f.read(self.sizeTD1*self.sizeTD2*2*4))
            self.f.seek(0)
            self.data = struct.unpack('>' + 'i'*self.sizeTD1*self.sizeTD2*2, self.f.read(self.sizeTD1*self.sizeTD2*2*4))
            print("Length of data: ", len(self.data))

            for i in range(self.sizeTD1):
                print(i)
                realPart = np.array(self.data[i*self.sizeTD2:(i+1)*self.sizeTD2])
                imagPart = np.array(self.data[self.sizeTD1*self.sizeTD2+i*self.sizeTD2:self.sizeTD1*self.sizeTD2+(i+1)*self.sizeTD2])
                print("Length realPart: ", len(realPart))
                print("Length imagPart: ", len(imagPart))
                self.allFid[0].append(realPart + 1j*imagPart)

            print("sizeTD1: ", self.sizeTD1)
            self.fidTime = np.linspace(0, (self.sizeTD2-1)/float(self.sweepWidthTD2), self.sizeTD2)


        if datatype == 'ntnmr':
            """ File information taken from J. van Becks matNMR, thanks! """
            self.f = open(path, mode='rb')

            """ Length """
            self.f.seek(16)
            self.structureSize = struct.unpack('<I', self.f.read(4))[0]

            #SizeTD2
            self.f.seek(20)
            self.sizeTD2 = struct.unpack('<I', self.f.read(4))[0]

            #SizeTD1
            if sizeTD1 > 0:
                self.sizeTD1 = sizeTD1
            else:
                self.f.seek(24)
                self.sizeTD1 = struct.unpack('<I', self.f.read(4))[0]

            #SpectralFrequencyTD2/Anregungsfrequenz
            self.f.seek(104)
            self.spectralFrequencyTD2 = struct.unpack('<d', self.f.read(8))[0]*1e6
            print('spec TD2 is', self.spectralFrequencyTD2)

            #SpectralFrequencyTD1
            self.f.seek(112)
            self.spectralFrequencyTD1 = struct.unpack('<d', self.f.read(8))[0]
            print("TD1 is", self.sizeTD1)

            #SweepWidthTD2, SampleRate?
            self.f.seek(260)
            self.sweepWidthTD2 = struct.unpack('<d', self.f.read(8))[0]

            #SweepWidthTD1
            self.f.seek(268)
            self.sweepWidthTD1 = struct.unpack('<d', self.f.read(8))[0]


            #a, Daten im Float32, f und 4
            self.f.seek(1056)
            #                self.sizeTD1 = 581
            #print "The length is", len(self.f.read(self.sizeTD2*2*self.sizeTD1*4))
            self.data = struct.unpack('<' + 'f'*(self.sizeTD2*2*self.sizeTD1), self.f.read(self.sizeTD2*2*self.sizeTD1*4))
            self.f.close()

            for i in range(0,  self.sizeTD1):
                #print i
                realPart = self.data[i*self.sizeTD2*2:(i+1)*self.sizeTD2*2:2]
                imagPart = sp.multiply(self.data[i*self.sizeTD2*2+1:(i+1)*self.sizeTD2*2+1:2], 1j)
                self.allFid[0].append(sp.add(realPart, imagPart))
                print("Data imported, Number of Experiments: ", self.sizeTD1)

        if process == True:
            self.process(ls = ls, zf = zf, lb = lb, phase = phase, ft_only = ft_only)

        if datatype == 'TopSpin4':
            if self.debug:
                print("hi, this is self.debug for the TopSpin4 datatype")
            #The acqus file containts the spectral width SW_h and 2*SizeTD2 as ##$TD
            #The acqu2s file contains TD1 as ##$TD
            directory = os.path.dirname(path)
            acqusFile = open(directory + "/acqus", mode='r')
            self.files.append(acqusFile)

            if self.debug:
                print("Importing TopSpin4 data")

            #check if acqu2sfile exists, if yes, experiment is 2D!
            if os.path.isfile(directory + "/acqu2s"):
                acqu2sFile = open(directory + "/acqu2s", mode='r')
                self.files.append(acqu2sFile)
                acqu2File = open(directory + "/acqu2", mode='r')
                self.files.append(acqu2File)

                self.is2D = True
            else:
                self.is2D = False
                self.sizeTD1 = 1

            if self.debug:
                print("2D: ", self.is2D)

            #this could be crafted into a common routine which gives names of parameters
            #parameters and works the same for e.g., spinsight and topspin
            if self.debug:
                print("reading acqus file")
            count = 0
            while True:
                if self.debug:
                    print("count = ", count)
                #try:
                count += 1
                line = acqusFile.readline().strip()
                if self.debug:
                    print(line)
                if "=" in line:
                    line = line.split("=")
                elif len(line) > 0:
                    line = line.split(" ")
                elif len(line) == 0 or count > 1000:
                    if self.debug: print("Ended reading acqus file at line ", count)
                    break
                else:
                    next

                    #print line[0]
                if line[0] == "##$SW_h":
                    #this line might be chopping the last digit off....
                    #self.sweepWidthTD2 = int(float(line[1][:-1]))
                    self.sweepWidthTD2 = int(float(line[1]))
                    if self.debug: print("SweepWidthTD2: ", self.sweepWidthTD2)
                elif line[0] == "##$TD":
                    self.sizeTD2 = int(int(line[1])/2)
                    if self.debug: print("sizeTD2: ", self.sizeTD2)
                elif line[0] == "##$SFO1":
                    self.carrier = float(line[1])*1e6
                    if self.debug: print("SFO1:", self.carrier)

                elif len(line) == 0:
                    break


                if len(line[0]) > 1:
                    if "@" in line[-1]:
                        #this line contains date, time, some unknown stuff and user, does not work with all bruker files, hence try only"
                        try:
                            self.parDictionary["date"] = line[1].strip()
                            self.parDictionary["time"] = line[2].strip()
                        except:
                            pass
                    elif line[0] == "##$D":
                        delays1 = acqusFile.readline().strip()
                        delays2 = acqusFile.readline().strip()
                        self.parDictionary["d"] = [float(d) for d in delays1.strip().split(" ")] + [float(d) for d in delays2.strip().split(" ")]

                    elif line[0] == "##$L":
                        loopCounters = acqusFile.readline().strip()
                        self.parDictionary["l"] = [float(l) for l in loopCounters.strip().split(" ")]

                    else:
                        if self.debug:
                            print("the catch all else")
                        if len(line) > 1:
                            self.parDictionary[line[0][2:].strip()] = line[1].strip()
                        else:
                            if self.debug:
                                print("skipped too short line")


            if self.is2D == True:
                if self.debug:
                    print("reading acqu2s file")
                count = 0
                while True:
                    if self.debug:
                        print("count = ", count)
                    #try:
                    count += 1
                    line = acqu2sFile.readline().strip()
                    if "=" in line:
                        line = line.split("=")
                    elif len(line) == 0 or count > 1000:
                        if self.debug: print("Ended reading acqu2s file at line ", count)
                        break
                    else:
                        next

                    #print line[0]
                    if line[0] == "##$TD" and self.sizeTD1 == 0:
                        self.sizeTD1 = int(line[1])
                        if self.debug: print("sizeTD1: ", self.sizeTD1)
                    elif len(line) == 0:
                        break

                if os.path.isfile(directory + "/vdlist"):
                    if self.debug: print("VD File exists!")
                    self.vdList = np.loadtxt(directory + "/vdlist")

            if self.debug:
                print("TD2: ", self.sizeTD2)
                print("TD1: ", self.sizeTD1)
                print("Carrier:", self.carrier)

            # if self.is2D:
            #     self.f = open(path + "/ser", mode='rb')
            # else:
            #     self.f = open(path + "fid", mode='rb')#
            #           dataString = self.f.read(self.sizeTD2*2*self.sizeTD1*4)
            #          if self.debug: print "len(dataString): ", len(dataString)
            #         self.f.close()
            #
            #           # here we read the FID data from fid/ser file
            #          # first convert the datasting into a list of numbers:
            # if self.debug: print "Endianess: ", endianess
            #     self.data = struct.unpack(endianess + 'i'*(self.sizeTD2*2*self.sizeTD1), dataString)


            if self.is2D:
                self.f = open(path + "/ser", mode='rb')
            else:
                self.f = open(path + "fid", mode='rb')

            self.files.append(self.f)

            dataString = np.frombuffer(self.f.read(), dtype = endianess + "f8")
            if self.debug: print("len(dataString) new: ", len(dataString))

            self.data = dataString
            #this is not how it should be done.
            if self.sizeTD2 == 0:
                self.sizeTD2 = int(len(self.data) / self.sizeTD1 / 2)

            dwellTime = 1./self.sweepWidthTD2
            self.fidTime = np.linspace(0, (self.sizeTD2-1)*dwellTime, num = self.sizeTD2)

            # here we create one array of complex numbers for each of the FIDs
            # i runs overa all fids in a ser file, in case of a fid file i = 0
            # TD1 is number of FIDs, TD2 is number of datapoints in each FID
            if maxLoad > 0:
               self.sizeTD1 = maxLoad

            for i in range(0,  self.sizeTD1):
                #print "sizeTD2: ", self.sizeTD2
                #print i
                realPart = self.data[i*self.sizeTD2*2:(i+1)*self.sizeTD2*2:2]
                imagPart = sp.multiply(self.data[i*self.sizeTD2*2+1:(i+1)*self.sizeTD2*2+1:2], 1j)
                self.allFid[0].append(sp.add(realPart, imagPart))

            # here we read the experiment title (only the one stored in pdata/1):
            # could be made to read titles from all pdata folders (if needed)
            try:
                pathToTitle = directory + '/pdata/1/title'
                titleFile = open(pathToTitle, mode='r')
                self.files.append(titleFile)
                title = list(titleFile)
                self.title = [line.strip() for line in title]
            except:
                if self.debug:
                    print("No title file.")
                else:
                    pass
            #close the files we opened:
            for item in self.files:
                item.close()

            # delete all file handles so that nmrdata objects can be pickled.
            self.files = []
            del self.f
Exemplo n.º 22
0
def get_sphere_vectors(features):

    fshape = features.shape
    features.shape = fshape[0], -1
    npoints, ndims = features.shape

    if npoints < MEAN_MAX_NPOINTS:
        fmean = features.mean(0)
    else:
        # - try to optimize memory usage...
        sel = features[:MEAN_MAX_NPOINTS]
        fmean = sp.empty_like(sel[0,:])

        sp.add.reduce(sel, axis=0, dtype="float32", out=fmean)

        curr = sp.empty_like(fmean)
        npoints_done = MEAN_MAX_NPOINTS
        while npoints_done < npoints:

            # check if can we overwrite (other process)
            if path.exists(output_fname) and not overwrite:
                warnings.warn("not allowed to overwrite %s"  % output_fname)
                return

            sel = features[npoints_done:npoints_done+MEAN_MAX_NPOINTS]
            sp.add.reduce(sel, axis=0, dtype="float32", out=curr)
            sp.add(fmean, curr, fmean)
            npoints_done += MEAN_MAX_NPOINTS                

        #fmean = features[:MEAN_MAX_NPOINTS].sum(0)
        #npoints_done = MEAN_MAX_NPOINTS
        #while npoints_done < npoints:
        #    fmean += features[npoints_done:npoints_done+MEAN_MAX_NPOINTS].sum(0)
        #    npoints_done += MEAN_MAX_NPOINTS

        fmean /= npoints

    if npoints < STD_MAX_NPOINTS:
        fstd = features.std(0)
    else:
        # - try to optimize memory usage...

        sel = features[:MEAN_MAX_NPOINTS]

        mem = sp.empty_like(sel)
        curr = sp.empty_like(mem[0,:])

        seln = sel.shape[0]
        sp.subtract(sel, fmean, mem[:seln])
        sp.multiply(mem[:seln], mem[:seln], mem[:seln])
        fstd = sp.add.reduce(mem[:seln], axis=0, dtype="float32")

        npoints_done = MEAN_MAX_NPOINTS
        while npoints_done < npoints:

            # check if can we overwrite (other process)
            if path.exists(output_fname) and not overwrite:
                warnings.warn("not allowed to overwrite %s"  % output_fname)
                return

            sel = features[npoints_done:npoints_done+MEAN_MAX_NPOINTS]
            seln = sel.shape[0]
            sp.subtract(sel, fmean, mem[:seln])
            sp.multiply(mem[:seln], mem[:seln], mem[:seln])
            sp.add.reduce(mem[:seln], axis=0, dtype="float32", out=curr)
            sp.add(fstd, curr, fstd)

            npoints_done += MEAN_MAX_NPOINTS

        # slow version:
        #fstd = ((features[:MEAN_MAX_NPOINTS]-fmean)**2.).sum(0)
        #npoints_done = MEAN_MAX_NPOINTS
        #while npoints_done < npoints:
        #    fstd += ((features[npoints_done:npoints_done+MEAN_MAX_NPOINTS]-fmean)**2.).sum(0)
        #    npoints_done += MEAN_MAX_NPOINTS

        fstd = sp.sqrt(fstd/npoints)

    fstd[fstd==0] = 1
    sphere_vectors = (fmean, fstd)
    features.shape = fshape

    return sphere_vectors
Exemplo n.º 23
0
    #obtain genotype & phenotype for samples with complete phenotype
    MY = SP.array(Y).transpose()
    RMY = MY[MY[:, 0] > 0]
    RY = RMY[:, 0]
    RY = (RY - RY.mean()) / RY.std()
    RX = RMY[:, 1:]

    #train null model for these samples
    COR = 1. / nf * SP.dot(RX, RX.transpose())
    res = lmm_lasso.train_nullmodel(RY, COR)
    delta = SP.exp(res[2])
    print delta

    #get fake phenotype
    FX = MY[:, 1:]
    FCOR = 1. / nf * SP.dot(FX, FX.transpose())
    D = SP.diag(SP.array([delta] * len(Y[1])))
    FY = SP.random.multivariate_normal(SP.array([0] * len(Y[1])),
                                       SP.add(FCOR, D))
    FY = (FY - FY.mean()) / FY.std()
    FY = SP.array([FY])
    with open("phenotype75.csv", "wb") as f:
        writer = csv.writer(f)
        writer.writerows(FY.transpose())

    #validate fake phenotype, that it has similar delta as we start with
    res = lmm_lasso.train_nullmodel(FY.transpose(), FCOR)
    delta = SP.exp(res[2])
    print delta
Exemplo n.º 24
0
    def __init__(self, path, endianess=">", debug=False, maxLoad=0):
        """This class only takes the following arguments:

        - path: path to an NMR experiment

        All other arguments are optional Keyword Arguments:
        - endianess = ">"
        - debug = False: set to True to output additional debuging information.
        - maxLoad = 0: set to an integer value to limit loading and processing
to an NMR experiment, an optional argument for endianess"""
        super().__init__(debug=debug)

        if self.debug:
            print("hi, this is self.debug for the RS2D datatype")

        #The acqus file containts the spectral width SW_h and 2*SizeTD2 as ##$TD
        #The acqu2s file contains TD1 as ##$TD
        directory = os.path.dirname(path)
        acqusFile = directory + "/header.xml"

        if self.debug:
            print("Importing RS2D data")

        with open(acqusFile) as fp:
            soup = BeautifulSoup(fp, 'lxml-xml')

        allEntries = soup.find_all("entry")

        for e in allEntries:
            keyName = e.key.string
            keyValue = e.value.value.string
            res = e.find_all("value", {"xsi:type": "numberParam"})

            if len(res) > 0:
                r = float(e.value.value.string)
                if r.is_integer():
                    self.parDictionary[e.key.string] = int(r)
                else:
                    self.parDictionary[e.key.string] = r

        self.sizeTD1 = self.parDictionary["ACQUISITION_MATRIX_DIMENSION_2D"]
        self.sizeTD2 = self.parDictionary["ACQUISITION_MATRIX_DIMENSION_1D"]
        self.sweepWidthTD2 = self.parDictionary["SPECTRAL_WIDTH"]

        if self.sizeTD1 > 1:
            self.is2D = True
        else:
            self.is2D = False

        if self.debug: print("2D: ", self.is2D)

        if self.debug:
            print("TD2: ", self.sizeTD2)
            print("TD1: ", self.sizeTD1)
            print("Carrier:", self.carrier)

        with open(path + "/data.dat", mode='rb') as f:
            dataString = struct.unpack(
                ">" + "f" * (self.sizeTD2 * self.sizeTD1) * 2, f.read())

        if self.debug: print("len(dataString) new: ", len(dataString))

        self.data = dataString

        if self.sizeTD2 == 0:
            self.sizeTD2 = int(len(self.data) / self.sizeTD1 / 2)

        self.dwellTime = 1. / self.sweepWidthTD2
        self.fidTime = np.linspace(0, (self.sizeTD2 - 1) * self.dwellTime,
                                   num=self.sizeTD2)

        # here we create one array of complex numbers for each of the FIDs
        # i runs over all fids in a ser file, in case of a fid file i = 0
        # TD1 is number of FIDs, TD2 is number of datapoints in each FID
        if maxLoad > 0:
            self.sizeTD1 = maxLoad

        for i in range(0, self.sizeTD1):
            #print "sizeTD2: ", self.sizeTD2
            #print i
            realPart = self.data[i * self.sizeTD2 * 2:(i + 1) * self.sizeTD2 *
                                 2:2]
            imagPart = sp.multiply(
                self.data[i * self.sizeTD2 * 2 + 1:(i + 1) * self.sizeTD2 * 2 +
                          1:2], 1j)
            self.allFid[0].append(sp.add(realPart, imagPart))

        # here we read the experiment title (only the one stored in pdata/1):
        # could be made to read titles from all pdata folders (if needed)
        try:
            pathToTitle = directory + '/pdata/1/title'
            titleFile = open(pathToTitle, mode='r')
            title = list(titleFile)
            self.title = [line.strip() for line in title]
        except:
            if self.debug:
                print("No title file.")
            else:
                pass

        self.shiftPoints = self.parDictionary["DIGITAL_FILTER_SHIFT"]
Exemplo n.º 25
0
# This loop for channel realization - Monte Carlos
# ############################################################
for Mon in xrange(max_chan_realizaion):
    d2d_pairs = []
    uav = UAV(height)
    for p in range(max_num_d2d_pairs):
        # d2d_pairs.append(D2DPair(p, coverage_r, d2d_max, low_rx=0.8))
        spread = False
        while not spread:
            pair = D2DPair(p, coverage_r, d2d_max, low_rx=0.0, low_tx=0.0)
            if len(d2d_pairs) == 0:
                break
            for existing_pair in d2d_pairs:
                d1 = sp.sqrt(
                    sp.add(
                        sp.square(sp.subtract(pair.tx_x, existing_pair.tx_x)),
                        sp.square(sp.subtract(pair.tx_y, existing_pair.tx_y))))
                if d1 < 150:
                    spread = False
                    break
                spread = True
        d2d_pairs.append(pair)

    for i in xrange(max_num_d2d_pairs):
        for j in xrange(max_num_d2d_pairs):
            max_d2d_to_d2d_gains[i, j, Mon] = sp.divide(
                d2d_pairs[i].loss_to_pair(d2d_pairs[j]), noise_variance)
        max_uav_to_d2d_gains[i, Mon] = sp.divide(
            uav.loss_to_pair(d2d_pairs[i], atg_a, atg_b), noise_variance)

# print max_uav_to_d2d_gains
Exemplo n.º 26
0
    test_chan[0:num_d2d_pairs] = uav_to_d2d_gains
    test_chan[num_d2d_pairs:dimen_input] = d2d_to_d2d_gains.ravel()

    vec_chan_test = sp.array([test_chan])
    X_test = vec_chan_test

    test_tau_result = nn_model.predict(X_test, verbose=0)

    test_theta_dnn = 1 / (1 - test_tau_result)

    phi_n_sol = sp.multiply((test_theta_dnn - 1) * eta * power_UAV,
                            uav_to_d2d_gains)
    x_rate = sp.matmul(d2d_to_d2d_gains_diag, sp.transpose(phi_n_sol))
    term_rate = sp.matmul(sp.transpose(d2d_to_d2d_gains_diff),
                          sp.transpose(phi_n_sol)) + 1
    rate_sol_ue = sp.divide(sp.log(sp.add(1, sp.divide(x_rate, term_rate))),
                            test_theta_dnn)
    maximin_rate_test = min(rate_sol_ue)
    term_pow_iter = sp.subtract(1, sp.divide(
        1, test_theta_dnn)) * eta * power_UAV * sp.add(
            1, sp.sum(uav_to_d2d_gains)) + power_cir_UAV

    iter_EE_test = sp.divide(
        sp.multiply(1e3, sp.divide(sp.sum(rate_sol_ue), term_pow_iter)),
        sp.log(2))
    EE_sol_test.append(iter_EE_test)
    tau_sol_test.append(test_tau_result)
    Maxmin_rate_test.append(maximin_rate_test)

# Calculate the total time of solving
time_sol = (time.time() - t0)
Exemplo n.º 27
0
            fmean = sp.empty_like(sel[0,:])

            sp.add.reduce(sel, axis=0, dtype="float32", out=fmean)

            curr = sp.empty_like(fmean)
            npoints_done = MEAN_MAX_NPOINTS
            while npoints_done < npoints:

                # check if can we overwrite (other process)
                if path.exists(output_fname) and not overwrite:
                    warnings.warn("not allowed to overwrite %s"  % output_fname)
                    return
                
                sel = train_features[npoints_done:npoints_done+MEAN_MAX_NPOINTS]
                sp.add.reduce(sel, axis=0, dtype="float32", out=curr)
                sp.add(fmean, curr, fmean)
                npoints_done += MEAN_MAX_NPOINTS                
     
            #fmean = train_features[:MEAN_MAX_NPOINTS].sum(0)
            #npoints_done = MEAN_MAX_NPOINTS
            #while npoints_done < npoints:
            #    fmean += train_features[npoints_done:npoints_done+MEAN_MAX_NPOINTS].sum(0)
            #    npoints_done += MEAN_MAX_NPOINTS

            fmean /= npoints

        if npoints < STD_MAX_NPOINTS:
            fstd = train_features.std(0)
        else:
            # - try to optimize memory usage...
Exemplo n.º 28
0
    def __init__(self, path, endianess="<", debug=False, maxLoad=0):
        """This class only takes the following arguments:

        - path: path to an NMR experiment

        All other arguments are optional Keyword Arguments:
        - endianess = "<"
        - debug = False: set to True to output additional debuging information.
        - maxLoad = 0: set to an integer value to limit loading and processing
to an NMR experiment, an optional argument for endianess"""
        super().__init__(debug=debug)

        self.files = []

        if self.debug:
            print("hi, this is self.debug for the TopSpin datatype")

        #The acqus file containts the spectral width SW_h and 2*SizeTD2 as ##$TD
        #The acqu2s file contains TD1 as ##$TD
        directory = os.path.dirname(path)
        acqusFile = open(directory + "/acqus", mode='r')
        self.files.append(acqusFile)

        if self.debug:
            print("Importing TopSpin data")

        #check if acqu2sfile exists, if yes, experiment is 2D.
        if os.path.isfile(directory + "/acqu2s"):
            acqu2sFile = open(directory + "/acqu2s", mode='r')
            self.files.append(acqu2sFile)
            acqu2File = open(directory + "/acqu2", mode='r')
            self.files.append(acqu2File)

            self.is2D = True
        else:
            self.is2D = False
            self.sizeTD1 = 1

        if self.debug: print("2D: ", self.is2D)

        #this could be crafted into a common routine which gives names of parameters
        #parameters and works the same for e.g., spinsight and topspin

        if self.debug: print("reading acqus file")
        count = 0

        # read the acqusFile
        while True:
            if self.debug: print("count = ", count)

            count += 1
            line = acqusFile.readline().strip()
            if self.debug: print(line)
            if "=" in line:
                line = line.split("=")
            elif len(line) > 0:
                line = line.split(" ")
            elif len(line) == 0 or count > 1000:
                if self.debug:
                    print("Ended reading acqus file at line ", count)
                break
            else:
                next

            if line[0] == "##$SW_h":
                #this line might be chopping the last digit off....
                #self.sweepWidthTD2 = int(float(line[1][:-1]))
                self.sweepWidthTD2 = int(float(line[1]))
                if self.debug: print("SweepWidthTD2: ", self.sweepWidthTD2)
            elif line[0] == "##$TD":
                self.sizeTD2 = int(int(line[1]) / 2)
                if self.debug: print("sizeTD2: ", self.sizeTD2)
            elif line[0] == "##$SFO1":
                self.carrier = float(line[1]) * 1e6
                if self.debug: print("SFO1:", self.carrier)
            elif len(line) == 0:
                break

            if len(line[0]) > 1:
                if "@" in line[-1]:
                    #this line contains date, time, some unknown stuff and user, does not work with all bruker files, hence try only"
                    try:
                        self.parDictionary["date"] = line[1].strip()
                        self.parDictionary["time"] = line[2].strip()
                    except:
                        pass
                elif line[0] == "##$D":
                    delays1 = acqusFile.readline().strip()
                    delays2 = acqusFile.readline().strip()
                    self.parDictionary["d"] = [
                        float(d) for d in delays1.strip().split(" ")
                    ] + [float(d) for d in delays2.strip().split(" ")]
                elif line[0] == "##$L":
                    loopCounters = acqusFile.readline().strip()
                    self.parDictionary["l"] = [
                        float(l) for l in loopCounters.strip().split(" ")
                    ]
                else:
                    if self.debug: print("the catch all else")
                    if len(line) > 1:
                        self.parDictionary[line[0]
                                           [2:].strip()] = line[1].strip()
                    else:
                        if self.debug: print("skipped too short line")

        if self.is2D == True:
            if self.debug:
                print("reading acqu2s file")
            count = 0
            while True:
                if self.debug:
                    print("count = ", count)
                #try:
                count += 1
                line = acqu2sFile.readline().strip()
                if "=" in line:
                    line = line.split("=")
                elif len(line) == 0 or count > 1000:
                    if self.debug:
                        print("Ended reading acqu2s file at line ", count)
                    break
                else:
                    next

                #print line[0]
                if line[0] == "##$TD" and self.sizeTD1 == 0:
                    self.sizeTD1 = int(line[1])
                    if self.debug: print("sizeTD1: ", self.sizeTD1)
                elif len(line) == 0:
                    break

            if os.path.isfile(directory + "/vdlist"):
                if self.debug: print("VD File exists!")
                self.vdList = np.loadtxt(directory + "/vdlist")

        if self.debug:
            print("TD2: ", self.sizeTD2)
            print("TD1: ", self.sizeTD1)
            print("Carrier:", self.carrier)

        if self.is2D:
            self.f = open(path + "/ser", mode='rb')
        else:
            self.f = open(path + "fid", mode='rb')

        self.files.append(self.f)

        dataString = np.frombuffer(self.f.read(), dtype=endianess + "i4")
        if self.debug: print("len(dataString) new: ", len(dataString))

        self.data = dataString

        if self.sizeTD2 == 0:
            self.sizeTD2 = int(len(self.data) / self.sizeTD1 / 2)

        dwellTime = 1. / self.sweepWidthTD2
        self.fidTime = np.linspace(0, (self.sizeTD2 - 1) * dwellTime,
                                   num=self.sizeTD2)

        # here we create one array of complex numbers for each of the FIDs
        # i runs overa all fids in a ser file, in case of a fid file i = 0
        # TD1 is number of FIDs, TD2 is number of datapoints in each FID
        if maxLoad > 0:
            self.sizeTD1 = maxLoad

        for i in range(0, self.sizeTD1):
            #print "sizeTD2: ", self.sizeTD2
            #print i
            realPart = self.data[i * self.sizeTD2 * 2:(i + 1) * self.sizeTD2 *
                                 2:2]
            imagPart = sp.multiply(
                self.data[i * self.sizeTD2 * 2 + 1:(i + 1) * self.sizeTD2 * 2 +
                          1:2], 1j)
            self.allFid[0].append(sp.add(realPart, imagPart))

        # here we read the experiment title (only the one stored in pdata/1):
        # could be made to read titles from all pdata folders (if needed)
        try:
            pathToTitle = directory + '/pdata/1/title'
            titleFile = open(pathToTitle, mode='r')
            self.files.append(titleFile)
            title = list(titleFile)
            self.title = [line.strip() for line in title]
        except:
            if self.debug:
                print("No title file.")
            else:
                pass
        #close the files we opened:
        for item in self.files:
            item.close()

        # delete all file handles so that nmrdata objects can be pickled.
        self.files = []
        del self.f
    coor_ue.append(UE_loc(p, coverage_r, low_tx=0.2))
    x_ue.append(UE_loc.coordinate_ue(coor_ue[p])[0])
    y_ue.append(UE_loc.coordinate_ue(coor_ue[p])[1])

val_pl = []
chan_array = []
for j in xrange(num_uav):
    pl_value = []
    chan_val = []
    for i in xrange(num_ue):
        xUAV, yUAV, xUE, yUE = x_uav[j], y_uav[j], x_ue[i], y_ue[i]
        val_pl_tem = Path_loss(fc, c_vel, alp_g, mu_los, mu_nlos, a, b,
                               noise_var, hUAV, xUAV, yUAV, xUE, yUE)
        pl_value.append(Path_loss.pl_uav_ue(val_pl_tem))

        chan_temp = sp.divide(sp.add(sp.randn(1), 1j * sp.randn(1)),
                              sp.sqrt(2))
        chan_val.append(sp.multiply(Path_loss.pl_uav_ue(val_pl_tem),
                                    chan_temp))
    val_pl.append(pl_value)
    chan_array.append(chan_val)
# ## the size of val_pl is (num_uav, num_ue)
# ## the size of chan_array is (num_uav, num_ue)

Pf = 200
Pr = 100
P_cm = 200
P_0m = 200
P_cir = 9000
Pow_cir = P_cir + num_uav * (P_cm + P_0m)
            d2d_to_d2d_gains_diag = sp.subtract(d2d_to_d2d_gains,
                                                d2d_to_d2d_gains_diff)

            # ############################################################
            # This code is used to find the initial point for EEmax algorithm
            # ############################################################

            theta_ini = Parameter(value=1 / (1 - 0.5))
            phi_n_ini = sp.multiply((theta_ini.value - 1) * eta *
                                    sp.divide(power_UAV, num_d2d_pairs),
                                    uav_to_d2d_gains)
            x_rate = sp.matmul(d2d_to_d2d_gains_diag, phi_n_ini)
            term_rate = sp.matmul(sp.transpose(d2d_to_d2d_gains_diff),
                                  phi_n_ini) + 1
            rate_sol_ue = sp.divide(
                sp.log(sp.add(1, sp.divide(x_rate, term_rate))),
                theta_ini.value)
            # print rate_sol_ue
            rmin_ref = min(rate_sol_ue)
            if rmin_ref <= 0.2 * sp.log(2):
                rmin = rmin_ref
            else:
                rmin = 0.2 * sp.log(2)

            pow_ = NonNegative(num_d2d_pairs)
            objective = Minimize(sum_entries(pow_) / theta_ini)

            constraints = []
            c1 = d2d_to_d2d_gains_diag * pow_ >= (
                exp(rmin * theta_ini) - 1) * (d2d_to_d2d_gains_diff * pow_ + 1)
            c2 = 1 / theta_ini * pow_ <= (
Exemplo n.º 31
0
def kernel_generate_fromcsv(input_csv_fname,
                            input_suffix,
                            output_fname,
                            corrcoef_type = DEFAULT_CORRCOEF_TYPE,
                            nowhiten = DEFAULT_NOWHITEN,
                            variable_name = DEFAULT_VARIABLE_NAME,
                            input_path = DEFAULT_INPUT_PATH,
                            overwrite = DEFAULT_OVERWRITE,
                            ):

    # add matlab's extension to the output filename if needed
    if path.splitext(output_fname)[-1] != ".mat":
        output_fname += ".mat"        

    # can we overwrite ?
    if path.exists(output_fname) and not overwrite:
        warnings.warn("not allowed to overwrite %s"  % output_fname)
        return
        
    # --------------------------------------------------------------------------
    # -- get training and testing filenames from csv 
    print "Processing %s ..." % input_csv_fname
    csvr = csv.reader(open(input_csv_fname))
    rows = [ row for row in csvr ]
    ori_train_fnames = [ row[0] for row in rows if row[2] == "train" ][:LIMIT]
    train_fnames = sp.array([ path.join(input_path, fname+input_suffix) 
                     for fname in ori_train_fnames ][:LIMIT])
    train_labels = sp.array([ row[1] for row in rows if row[2] == "train" ][:LIMIT])
    
    ori_test_fnames = [ row[0] for row in rows if row[2] == "test" ][:LIMIT]
    test_fnames = sp.array([ path.join(input_path, fname+input_suffix) 
                    for fname in ori_test_fnames ][:LIMIT])
    test_labels = sp.array([ row[1] for row in rows if row[2] == "test" ][:LIMIT])

    ntrain = len(train_fnames)
    ntest = len(test_fnames)

    # --------------------------------------------------------------------------
    # -- load features from train filenames
    # set up progress bar
    print "Loading training data ..."
    pbar = ProgressBar(widgets=widgets, maxval=ntrain)
    pbar.start()

    fvector0 = io.loadmat(train_fnames[0])[variable_name].ravel()
    featshape = fvector0.shape
    featsize = fvector0.size

    # go
    train_features = sp.empty((ntrain,) + featshape, dtype='float32')
    error = False    
    for i, fname in enumerate(train_fnames):
        try:
            fvector = io.loadmat(fname)[variable_name].ravel()

        except TypeError:
            print "[ERROR] couldn't open", fname, "deleting it!"
            os.unlink(fname)
            error = True

        except:
            print "[ERROR] unkwon with", fname
            raise
        
        assert(not sp.isnan(fvector).any())
        assert(not sp.isinf(fvector).any())
        train_features[i] = fvector.reshape(fvector0.shape)
        
        pbar.update(i+1)

    pbar.finish()
    print "-"*80

    if error:
        raise RuntimeError("An error occured (load train). Exiting.")        
        
    # -- preprocess train
    print "Preprocessing train features ..."
    if nowhiten:
        whiten_vectors = None
    else:
        fshape = train_features.shape
        train_features.shape = fshape[0], -1
        npoints, ndims = train_features.shape

        if npoints < MEAN_MAX_NPOINTS:
            fmean = train_features.mean(0)
        else:
            # - try to optimize memory usage...
            sel = train_features[:MEAN_MAX_NPOINTS]
            fmean = sp.empty_like(sel[0,:])

            sp.add.reduce(sel, axis=0, dtype="float32", out=fmean)

            curr = sp.empty_like(fmean)
            npoints_done = MEAN_MAX_NPOINTS
            while npoints_done < npoints:
                sel = train_features[npoints_done:npoints_done+MEAN_MAX_NPOINTS]
                sp.add.reduce(sel, axis=0, dtype="float32", out=curr)
                sp.add(fmean, curr, fmean)
                npoints_done += MEAN_MAX_NPOINTS                
     
            fmean /= npoints

        if npoints < STD_MAX_NPOINTS:
            fstd = train_features.std(0)
        else:
            # - try to optimize memory usage...

            sel = train_features[:MEAN_MAX_NPOINTS]

            mem = sp.empty_like(sel)
            curr = sp.empty_like(mem[0,:])

            seln = sel.shape[0]
            sp.subtract(sel, fmean, mem[:seln])
            sp.multiply(mem[:seln], mem[:seln], mem[:seln])
            fstd = sp.add.reduce(mem[:seln], axis=0, dtype="float32")

            npoints_done = MEAN_MAX_NPOINTS
            while npoints_done < npoints:
                sel = train_features[npoints_done:npoints_done+MEAN_MAX_NPOINTS]
                seln = sel.shape[0]
                sp.subtract(sel, fmean, mem[:seln])
                sp.multiply(mem[:seln], mem[:seln], mem[:seln])
                sp.add.reduce(mem[:seln], axis=0, dtype="float32", out=curr)
                sp.add(fstd, curr, fstd)

                npoints_done += MEAN_MAX_NPOINTS

            fstd = sp.sqrt(fstd/npoints)

        fstd[fstd==0] = 1
        whiten_vectors = (fmean, fstd)
        train_features.shape = fshape
    train_features = preprocess_features(train_features, 
                                         whiten_vectors = whiten_vectors)
    assert(not sp.isnan(sp.ravel(train_features)).any())
    assert(not sp.isinf(sp.ravel(train_features)).any())

    # -- train
    categories = sp.unique(train_labels)
    #if categories.size == 2:
    #    categories = [categories[0]]
    #else:
    #    raise NotImplementedError("not sure if it works with ncats > 2")

    corrcoef_kernels = {}
    cat_index = {}
    for icat, cat in enumerate(categories):

        if corrcoef_type == 'pos_neg_mean':
            #print train_features.shape
            #print train_features[train_labels == cat].shape
            #print train_features[train_labels != cat].shape
            corrcoef_ker = train_features[train_labels == cat].sum(0) \
                           - train_features[train_labels != cat].sum(0)
            corrcoef_ker /= ntrain 
        elif corrcoef_type == 'pos_mean_neg_mean':
            corrcoef_ker = train_features[train_labels == cat].mean(0) \
                           - train_features[train_labels != cat].mean(0)
        elif corrcoef_type == 'pos_mean':
            corrcoef_ker = train_features[train_labels == cat].mean(0)
        else:
            raise ValueError("corrcoef_type '%s' not understood"
                             % corrcoef_type)

        corrcoef_ker -= corrcoef_ker.mean()
        corrcoef_ker_mag = sp.linalg.norm(corrcoef_ker)
        assert corrcoef_ker_mag > 0
        corrcoef_ker /= sp.linalg.norm(corrcoef_ker)

        assert(not sp.isnan(corrcoef_ker).any())
        assert(not sp.isinf(corrcoef_ker).any())
        
        corrcoef_kernels[cat] = corrcoef_ker
        cat_index[cat] = icat 

    # --------------------------------------------------------------------------
    # -- load features from test filenames
    # set up progress bar
    print "Testing (on the fly) ..."
    pbar = ProgressBar(widgets=widgets, maxval=ntest)
    pbar.start()

    # -- test
    # XXX: code adapted from beta svm_ova_fromfilenames (to review!)
    pred = sp.zeros((ntest))
    distances = sp.zeros((ntest, len(categories)))

    for itest, fname in enumerate(test_fnames):

        try:
            fvector = io.loadmat(fname)[variable_name].ravel()
        except TypeError:
            print "[ERROR] couldn't open", fname, "deleting it"
            os.unlink(fname)
            error = True
        except:
            print "[ERROR] unkwon with", fname
            raise
            
        assert(not sp.isnan(fvector).any())
        assert(not sp.isinf(fvector).any())

        # whiten if needed
        if whiten_vectors is not None:
            fmean, fstd = whiten_vectors
            fvector -= fmean        
            assert((fstd!=0).all())
            fvector /= fstd            
        
        assert(not sp.isnan(fvector).any())
        assert(not sp.isinf(fvector).any())

        # corrcoef
        testv = fvector
        testv -= testv.mean()
        testv_mag = sp.linalg.norm(testv)
        assert testv_mag > 0
        testv /= testv_mag

        for icat, cat in enumerate(categories):

            corrcoef_ker = corrcoef_kernels[cat]
            resp = sp.dot(testv, corrcoef_ker)

            distances[itest, icat] = resp        
        
        pbar.update(itest+1)

    pbar.finish()
    print "-"*80

    if error:
        raise RuntimeError("An error occured (load test). Exiting.")        

    if len(categories) > 1:
        pred = distances.argmax(1)
        #print sp.array([cat_index[e] for e in test_labels]).astype('int')
        gt = sp.array([cat_index[e] for e in test_labels]).astype("int")
        perf = (pred == gt)

        accuracy = 100.*perf.sum() / ntest
    else:
        pred = sp.sign(distances).ravel()
        gt = sp.array(test_labels)
        cat = categories[0]
        gt[gt != cat] = -1
        gt[gt == cat] = +1
        gt = gt.astype("int")
        perf = (pred == gt)
        accuracy = 100.*perf.sum() / ntest        
        
    print distances.shape
    print "Classification accuracy on test data (%):", accuracy


    svm_labels = gt

    # -- average precision
    # XXX: redo this part to handle other labels than +1 / -1
    ap = 0
    if distances.shape[1] == 1:
        distances = distances.ravel()
        assert test_labels.ndim == 1            
        assert svm_labels.ndim == 1
    
        # -- inverse predictions if needed
        # (that is when svm was trained with flipped +1/-1 labels)
        # -- convert test_labels to +1/-1 (int)
        try:
            test_labels = array([int(elt) for elt in test_labels])
            if (test_labels != svm_labels).any():
                distances = -distances

            #if not ((test_labels==-1).any() and (test_labels==1).any()):
            #    test_labels[test_labels!=test_labels[0]] = +1
            #    test_labels[test_labels==test_labels[0]] = -1

            #print test_labels

            # -- convert test_labels to +1/-1 (int)
            test_labels = array([int(elt) for elt in test_labels])

            # -- get average precision
            c = distances
            #print c
            si = sp.argsort(-c)
            tp = sp.cumsum(sp.single(test_labels[si]>0))
            fp = sp.cumsum(sp.single(test_labels[si]<0))
            rec  = tp/sp.sum(test_labels>0)
            prec = tp/(fp+tp)

            #print prec, rec
            #from pylab import *
            #plot(prec, rec)
            #show()

            ap = 0
            rng = sp.arange(0,1.1,.1)
            for th in rng:
                p = prec[rec>=th].max()
                if p == []:
                    p = 0
                ap += p / rng.size

            print "Average Precision:", ap

        except ValueError:
            ap = 0


    # XXX: clean this
    test_y = sp.array([svm_labels.ravel()==lab
                       for lab in sp.unique(svm_labels.ravel())]
                      )*2-1
    test_y = test_y.T

    print distances
    
    # --------------------------------------------------------------------------
    # -- write output file
    if output_fname is not None:
        print "Writing %s ..." % (output_fname)
        # TODO: save more stuff (alphas, etc.)
        data = {"accuracy": accuracy,
                "average_precision":ap,
                "test_distances": distances,
                "test_labels": test_labels,
                "test_y": test_y,
                "svm_labels": svm_labels,
                }

        io.savemat(output_fname, data, format='4')

    return accuracy