Пример #1
0
def read_control(filename, atoms):
    f = open(filename)
    for line in f:
        if "$grad          cartesian gradients" in line:
            break
    en = 0
    grads = []
    first_step = True
    while(True):
        line = f.next()
        if not first_step and '$last' in line or '$actual' in line:
            f.close()
            return grads, en
        if '$end' in line:
            f.close()
            return grads, en
        if 'maximum norm' in line:
            f.close()
            return grads, en
        en = float64(line.split('=')[2].split()[0])
        for at, line in zip(atoms, f):
            pass
    #        if at[3] != line.split()[3]:
    #               print at[3], line.split()[3]
        grads = []
        for at, line in zip(atoms, f):
            aux = line.replace('D', 'e').replace('d', 'e').split()[0:3]
            grads += [[float64(g) for g in aux]]
        first_step = False
Пример #2
0
def wr_abs_cal_gui_file_to_scipy_array(filename):
    """
  Retrieve the White Rabbit GUI info which is written during Absolute Calibration
   
  filename -- source file from which to retrieve data.
    
  returns: <type 'numpy.ndarray'> measurements
  """

    data_file = open(filename, "r")
    # header = data_file.read(5)           # read the waveform_header "wrc# "
    #if header != "wrc# ":
    #print("Exception: wr_abs_cal_gui_file_to_scipy_array: Not a WR Absolute Calibration GUI file.")
    #Exception("wr_abs_cal_gui_file_to_scipy_array: Not a WR Absolute Calibration GUI file.")
    #data_file.close()
    #return

    # create an empty gui_data dictionairy
    gui_data = {}

    # create an empty lists for t1 and t4 components
    lst_t1_sec = []
    lst_t1 = []
    lst_t4_sec = []
    lst_t4 = []

    while 1:
        line = data_file.readline()
        line_lst = line.split(" ")
        if len(line_lst) < 6:
            break

        # Values one one line are:
        # t1 sec, t1[ns], t1_phase[ps], t4 sec, t4[ns], t4_phase[ps]
        # to calculate proper t1 and t4
        # ns must be scaled 1e-9 and phase scaled with 1e-12
        lst_t1_sec.append(line_lst[0])
        lst_t1.append(1e-9 * scipy.float64(line_lst[1]) +
                      1e-12 * scipy.float64(line_lst[2]))
        lst_t4_sec.append(line_lst[3])
        lst_t4.append(1e-9 * scipy.float64(line_lst[4]) +
                      1e-12 * scipy.float64(line_lst[5]))

    data_file.close()

    t1_sec = scipy.array(lst_t1_sec)
    t1 = scipy.array(lst_t1)
    t4_sec = scipy.array(lst_t4_sec)
    t4 = scipy.array(lst_t4)

    gui_data["t1 seconds"] = (t1_sec)
    gui_data["t1"] = (t1)
    gui_data["t4 seconds"] = (t4_sec)
    gui_data["t4"] = (t4)

    return gui_data
Пример #3
0
def glmnetControl(pars=None):
    import scipy

    # default options
    ivals = dict()
    ivals["fdev"] = scipy.float64(1e-5)
    ivals["devmax"] = scipy.float64(0.999)
    ivals["eps"] = scipy.float64(1e-6)
    ivals["big"] = scipy.float64(9.9e35)
    ivals["mnlam"] = scipy.float64(5)
    ivals["pmin"] = scipy.float64(1e-5)
    ivals["exmx"] = scipy.float64(250)
    ivals["prec"] = scipy.float64(1e-10)
    ivals["mxit"] = scipy.float64(100)

    # quick return if no user opts
    if pars == None:
        return ivals

    # if options are passed in by user, update options with values from opts
    parsInIvals = set(pars.keys()) - set(ivals.keys())
    if len(parsInIvals
           ) > 0:  # assert 'opts' keys are subsets of 'options' keys
        raise ValueError(
            'attempting to set glmnet controls that are not known to glmnetControl'
        )
    else:
        ivals = {**ivals, **pars}  # update values

    return ivals
Пример #4
0
 def lassotrans(beta, w, y, omega, lam, eta, offset):
     n = y.shape[0]
     expterm = np.exp(np.matmul(w, beta)-offset - np.matmul(np.matmul(np.transpose(beta), omega), beta)/2)
     omegabeta =   np.matmul(omega, beta)
     omegabeta = omegabeta[np.newaxis, :]
     dLbeta = -(np.matmul(np.transpose(y),  w) - np.matmul(np.transpose(expterm), (w - omegabeta)))/n
     Y =  p *  np.sqrt(eta/2) * (beta - dLbeta/(eta))
     X =  p *  np.eye(p) * np.sqrt(eta/2)
     fit = glmnet(x = scipy.float64(X), y = scipy.float64(Y),  lambdau = scipy.float64([lam]), intr = False)
     beta = np.array(glmnetCoef(fit, s= scipy.float64([0])))[1:, 0]#clf.coef_
     return beta
Пример #5
0
def fixed_point(t, M, I, a2):
    l=7
    I = sci.float64(I)
    M = sci.float64(M)
    a2 = sci.float64(a2)
    f = 2*sci.pi**(2*l)*sci.sum(I**l*a2*sci.exp(-I*sci.pi**2*t))
    for s in range(l, 1, -1):
        K0 = sci.prod(range(1, 2*s, 2))/sci.sqrt(2*sci.pi)
        const = (1 + (1/2)**(s + 1/2))/3
        time=(2*const*K0/M/f)**(2/(3+2*s))
        f=2*sci.pi**(2*s)*sci.sum(I**s*a2*sci.exp(-I*sci.pi**2*time))
    return t-(2*M*sci.sqrt(sci.pi)*f)**(-2/5)
Пример #6
0
def fixed_point(t, M, I, a2):
    l = 7
    I = scipy.float64(I)
    M = scipy.float64(M)
    a2 = scipy.float64(a2)
    f = 2 * scipy.pi**(2 * l) * scipy.sum(I**l * a2 * scipy.exp(-I * scipy.pi**2 * t))
    for s in range(l, 1, -1):
        K0 = scipy.prod(range(1, 2 * s, 2)) / scipy.sqrt(2 * scipy.pi)
        const = (1 + (1 / 2)**(s + 1 / 2)) / 3
        time = (2 * const * K0 / M / f)**(2 / (3 + 2 * s))
        f = 2 * scipy.pi**(2 * s) * scipy.sum(I**s * a2 * scipy.exp(-I * scipy.pi**2 * time))
    return t - (2 * M * scipy.sqrt(scipy.pi) * f)**(-2 / 5)
Пример #7
0
def repeat_EN_csv(data_csv, reps=50, shuffle_mark=False):
    #    repeated_cross_validate(clf_in,X,y,cv_fold=10,rep=50)
    csv_name, ext = os.path.splitext(data_csv)
    all_data = pd.read_csv(data_csv)
    y = np.array((all_data['y']))
    fs = np.array(all_data.drop(columns=['y']))
    results = repeated_cross_validate(glmnet_wrapper(),
                                      scipy.float64(fs),
                                      scipy.float64(y),
                                      cv_fold=10,
                                      rep=reps,
                                      shuffle_y=shuffle_mark)
    return results
Пример #8
0
 def drawMap(self):
     self.g = geoplotter.GeoPlotter()
     self.g.clear()
     self.g.drawWorld()
     edge_list = []
     #edge_list=self.edge_df[["start_lon","start_lat","end_lon","end_lat"]].values.tolist()
     #pdb.set_trace()
     # draw the network
     for edge in self.gd.edges():
         start = tuple([scipy.float64(j) for j in edge[0].split(" ")])
         end = tuple([scipy.float64(j) for j in edge[1].split(" ")])
         edge_list.append([start, end])
     self.g.drawLines(edge_list, color='cornflowerblue')
Пример #9
0
def readMultipleColumnsCSV(delimiter=',', fileName ='.csv',nHeaderLines=1):
    data=dict()
    data['name'] = fileName
    #with open(fileName, newline='', encoding='utf-8','rb') as f:
    with open(fileName, newline='', encoding='utf-8') as f:
        #reader=csv.reader(f, delimiter=delimiter)
        reader=csv.reader(f)
        dataList=list()
        nRows=0
        nRead=0
        # Skip the first nHeaderLines
        nCols=0
        for row in reader:
            nRead=nRead+1
            if nRead== nHeaderLines:
                break

        for row in reader:
            #aa=sc.float64(row)
            dataList.append(row)
            print(row)
            nCols = sc.maximum(nCols,len(row))
            nRows=nRows+1

        dataList = sc.array(dataList).transpose()
        dd = list()
        for c in sc.arange(nCols):
            x= dataList[c]
            print(x)
            rr= 0
            for r in sc.arange(nRows):
                xx=dataList[c][r]
                if len(xx)>0:
                    rr=rr+1
                else:
                    print(xx)
            dd.append(sc.zeros(rr))
            for r in sc.arange(rr):
                xx=dataList[c][r]
                if len(xx)>0:
                    print(sc.float64(xx))
                    dd[c][r]=sc.float64(xx)
                    rr=rr+1
                else:
                    print(xx)

        data['values']= dd
        data['nRows']= nRows
        data['nCols']= nCols
        f.close()
    return data
Пример #10
0
    def readTestResourceData(self, fileDataPath, testInstanceDict):

        with open(fileDataPath, "r") as bufferFile:
            currentData = bufferFile.readline()
            index = 0
            while len(currentData) != 0:
                currentData = str(currentData).strip()
                decoyList = currentData.split(sep="\t")
                if len(decoyList) == 3:
                    instanceKey = int(decoyList[0])
                    vectorInstance = []
                    for valueItem in str(decoyList[2]).split(sep=","):
                        vectorInstance.append(float64(valueItem))

                    self.x_TestInstance.update({index: vectorInstance})
                    self.y_TestInstance.update({index: instanceKey})
                    tier1BufferDict = {}
                    if testInstanceDict.__contains__(instanceKey):
                        tier1BufferDict = testInstanceDict.get(instanceKey)
                    tier2BufferDict = {}
                    if tier1BufferDict.__contains__(decoyList[1]):
                        tier2BufferDict.update(
                            {decoyList[1]: tier1BufferDict.get(decoyList[1])})
                    if len(tier2BufferDict) > 0:
                        self.testInstanceId.update({index: tier2BufferDict})
                    else:
                        print("resource error in test instance ", decoyList[1])
                        sys.exit(0)
                    #print(currentData,"\t",index)
                    index = index + 1
                currentData = bufferFile.readline()

        return ()
Пример #11
0
    def get_keff(self, bg2):
        """Find the eigenvalue in a homogenous material

		Parameter:
		----------
		bg2:        float, optional; geometric buckling
					[Default: 0.0]

		Returns:
		--------
		keff:       float; the multiplication factor/eigenvalue
		"""
        if not self.is_fissionable:
            return 0.0
        elif self.ngroups == 1:
            return scipy.float64(self.nu_sigma_f /
                                 (self.sigma_a + self.d * bg2))
        elif self.ngroups == 2:
            ratio = self.flux_ratio(bg2)
            r1 = self.sigma_a[0] + self.sigma_s12
            return (self.nu_sigma_f[0] + self.nu_sigma_f[1]/ratio)/ \
                   (self.d[0]*bg2 + r1)
        else:
            errstr = "k_inf calculation is only available for 1 or 2 groups."
            raise NotImplementedError(errstr)
Пример #12
0
def full_weighted_cv(X, y, Ds, lambda_gtv=np.linspace(.1, 1, 10), lambda_lasso=None, t=50, auto_cv=True, alpha=.9, k=5):
    errors = []
    X_train, X_test, y_train, y_test = temporal_split(X, y, t)
    if alpha<1:
        n = X_train.shape[0]
        weights = np.array([alpha**(n-t) for t in np.arange(1, n+1)])
        X_train = X_train * np.sqrt(weights.reshape(-1,1))
        y_train = y_train * np.sqrt(weights)
    n,p = X_train.shape
    # test errors
    for l1 in lambda_gtv:
        for m in Ds:
            D = Ds[m]
            if auto_cv:
                XD, bigY, invD = augmented_system_lasso(X_train, y_train, D, l1, 0, l1_only=True)
                fit = cvglmnet(x = XD, y = bigY, family = 'gaussian', ptype = 'mse', nfolds = 5)
                b = cvglmnetCoef(fit, s = 'lambda_min')
                l3 = fit['lambda_min'][0]
                beta = [email protected](b.shape[0])[1:]
                mset, r2t = compute_errors(y_train, X_train@beta)
                mse, r2 = compute_errors(y_test, X_test@beta)
                errors.append([m, l1, l3, mset, r2t, mse, r2])
            else:
                for l3 in lambda_lasso:
                    XD, bigY, invD = augmented_system_lasso(X_train, y_train, D, l1/l3, 0, l1_only=True)
                    #XD, bigY, invD = epsilon_system_lasso(X_train, y_train, D, l1)
                    fit = glmnet(x = XD, y = bigY)
                    b = glmnetCoef(fit, s = scipy.float64([l3]), exact = False)
                    beta = [email protected](b.shape[0])[1:]
                    mset, r2t = compute_errors(y_train, X_train@beta)
                    mse, r2 = compute_errors(y_test, X_test@beta)
                    errors.append([m, l1, l3, mset, r2t, mse, r2])
    df = pd.DataFrame(errors, columns=['method', 'lambda_tv', 'lambda_1', 'train_mse', 'train_r2', 'test_mse', 'test_r2'])
    return df
Пример #13
0
 def _get_spacing(self):
     # Find Network spacing
     P12 = self["throat.conns"]
     C12 = self["pore.coords"][P12]
     mag = np.linalg.norm(np.diff(C12, axis=1), axis=2)
     unit_vec = np.around(np.squeeze(np.diff(C12, axis=1)) / mag,
                          decimals=14)
     spacing = [0, 0, 0]
     dims = topotools.dimensionality(self)
     # Ensure vectors point in n-dims unique directions
     c = {tuple(row): 1 for row in unit_vec}
     if len(c.keys()) > sum(dims):
         raise Exception("Spacing is undefined when throats point in " +
                         "more directions than network has dimensions")
     mag = sp.float64(mag.squeeze())
     for ax in [0, 1, 2]:
         if dims[ax]:
             inds = np.where(unit_vec[:, ax] == unit_vec[:, ax].max())[0]
             temp = np.unique(mag[inds])
             if not np.allclose(temp, temp[0]):
                 raise Exception(
                     "A unique value of spacing could not be found")
             else:
                 spacing[ax] = temp[0]
     return np.array(spacing)
Пример #14
0
 def fit_with_lmfit(self, method='lbfgsb', conf='covar', report=True):
     try:
         import lmfit_wrapper as lw
     except ImportError:
         print 'Could not import LMfit'
         return
     x, data, errs = self.set_fit_data()
     result = lw.fit_it(self.params,
                        args=(self.x[self.fitrange],
                              self.indata[self.fitrange],
                              self.errs[self.fitrange]),
                        method=method)
     if report:
         lw.lf.report_fit(result)
     output = lw.params_to_grism(result, output_format='df')
     output['Identifier'] = self.tofit['Identifier']
     output.set_value('Contin', 'Identifier', sp.float64('nan'))
     output['Pos'] -= self.tofit['Line center']
     outdict = {}
     for i in output.index:
         row = output.ix[i]
         if i == 'Contin':
             outdict[i] = [row['Ampl'], row['Ampl_stddev'], row['RedChi2']]
         else:
             outdict[i] = [
                 row['Pos'], row['Sigma'], row['Ampl'], row['Identifier'],
                 row['Pos_stddev'], row['Sigma_stddev'], row['Ampl_stddev']
             ]
     self.Components = outdict
     self.import_model()
     self.result = result
     self.output = output
Пример #15
0
    def read_csv_col(self, phenotype_filepath, colnr, colprefix="", sep='\t'):
        log.info("reading phenotypes: {}, colnr: {}".format(
            phenotype_filepath, colnr))

        with open(phenotype_filepath, 'r') as phenofile:
            hcols = phenofile.readline().strip().split(sep)

            ids = []
            vals = []
            for l in phenofile:
                dcols = l.strip().split(sep)
                try:
                    vals.append(sp.float64(dcols[colnr]))
                    ids.append(dcols[0])
                except ValueError:
                    log.warn(
                        "excluding accession {} because of trait value: {}".
                        format(dcols[0], dcols[colnr]))
                    continue

            tmp_data = pd.DataFrame(vals)
            tmp_data.index = ids
            tmp_data.columns = ["_".join([colprefix, hcols[colnr]])]

            if self.data.size == 0:
                self.data = tmp_data
            else:
                self.data = pd.concat([self.data, tmp_data],
                                      join='inner',
                                      axis=1)
                log.info("phenotype intersection is {} accessions.".format(
                    self.data.shape[0]))
        # self.data.sort_index(inplace=True, key=float);
        return
Пример #16
0
def create_image_and_mask(imagefilename, maskfilename):

    # import a clean input to be corrupted with the mask
    input = mpimg.imread(imagefilename)

    if input.ndim == 3:
        M, N, C = input.shape
    else:
        M, N = input.shape
        C = 1

    # import the mask of the inpainting domain
    # mask = 1 intact part
    # mask = 0 missing domain
    mask = scipy.float64((mpimg.imread(maskfilename) == 1))

    if (input.ndim == 3) & (mask.ndim < 3):
        mask = np.repeat(mask[:, :, np.newaxis], C, axis=2)

    if C == 1:
        input = scipy.expand_dims(input, axis=2)
        mask = scipy.expand_dims(mask, axis=2)

    # create the image with the missin domain:
    noise = scipy.rand(M, N, C)
    u = mask * input + (1 - mask) * noise

    return (u, mask)
Пример #17
0
    def readTrainingResourceData(self, fileDataPath):

        with open(fileDataPath, "r") as bufferFile:
            currentData = bufferFile.readline()
            index = 0
            while len(currentData) != 0:
                currentData = str(currentData).strip()
                decoyList = currentData.split(sep="\t", maxsplit=1)
                if len(decoyList) == 2:
                    instanceKey = int(decoyList[0])
                    vectorInstance = []
                    for valueItem in str(decoyList[1]).split(sep=","):
                        vectorInstance.append(float64(valueItem))
                    self.x_instance.update({index: vectorInstance})
                    self.y_instance.update({index: instanceKey})
                    if (index == 0):
                        self.instanceSpan = len(vectorInstance)
                    if (len(vectorInstance) < self.instanceSpan):
                        print(index)
                        sys.exit()
                    #print(currentData,"\t",index)
                    index = index + 1
                currentData = bufferFile.readline()

        self.crossValidationSplit()
        return ()
Пример #18
0
    def readTrainingResourceData(self, fileDataPath):

        with open(fileDataPath, "r") as bufferFile:
            currentData = bufferFile.readline()
            index = 0
            while len(currentData) != 0:
                currentData = str(currentData).strip()
                decoyList = currentData.split(sep="\t")
                if len(decoyList) == 3:
                    instanceKey = int(decoyList[0])
                    vectorList = list(
                        map(lambda valueItem: float64(valueItem),
                            list(str(decoyList[2]).split(sep=","))))
                    vectorInstance = np.array(vectorList)
                    self.x_instance.update({index: vectorInstance})
                    self.y_instance.update({index: instanceKey})
                    if (index == 0):
                        self.instanceSpan = vectorInstance.shape[0]
                        self.featureDimension = self.windowSize
                    if (vectorInstance.shape[0] < self.instanceSpan):
                        print(index)
                        sys.exit()
                    #print(currentData,"\t",index)
                    index = index + 1
                currentData = bufferFile.readline()

        self.crossValidationSplit()
        return ()
Пример #19
0
    def plot_curr(self):
        """Plotet anhand der eingebenen Zeilennummer"""
        ######## plot a 3D Current ########

        
        fig = plt.figure(figsize=plt.figaspect(0.5))
        canvas = FigureCanvasTkAgg(fig, master=page3)
        canvas.get_tk_widget().pack(side='top', fill='both')
        canvas._tkcanvas.pack(side='top', fill='both', expand=15)
        toolbar = NavigationToolbar2TkAgg(canvas, page3)
        canvas.get_tk_widget().grid(row=1,column=1)
        toolbar.grid(row=50,column=1) 
          
        
        
        ax = fig.add_subplot(1, 2, 1, projection='3d')
        
        X = sp.linspace(0,10,10)
        Y = sp.linspace(0,10,10)
        
        X, Y = sp.meshgrid(X, Y)
        X, Y = X.ravel(), Y.ravel()
        
        #R = sp.sqrt(X**2 + Y**2)
        #Z = sp.sin(R)
        
              
        
        width = depth = 1
        bottom=X*0.
        
        
        #plt.ion()
        getrow=int(entryrowplot.get())
        current_floats=sp.float64(self.data_str['current'][getrow].split(";")).reshape(10,10)
        maxcurr= np.max(current_floats)
        #print sp.mean(current_floats)
        top=current_floats.ravel()
        top_scaled=(top-sp.amin(top))/(sp.amax(top)-sp.amin(top))
        mycolors = cm.jet(top_scaled)
        ax.bar3d(X, Y, bottom, width, depth, top,color=mycolors, alpha=float(entryTrans.get()))
        ax.set_title('Partial Currents @ '+self.data_str['sumcurr'][getrow]+' A'+'//   @ '+self.data['voltage'][getrow]+' V')
        
        ax.set_zlim(0,maxcurr)
        ax.set_zlabel('Current in A', linespacing=10.4)
        ax.view_init(elev=int(entryCurrEval.get()), azim=int(entryCurrAngle.get()))
        
        #plt.show()
        fig = plt.figure()

       
        
        #plt.clf()
        plt.show()
        plt.gcf().canvas.draw()
        
              
        currplot = ts.strftime("%Y.%m.%d. %H:%M:%S :")+"Current dargestellt"
        self.displaystate.insert(tk.END, currplot+'\n')  
Пример #20
0
    def getSPNetworkx(self, startnode, destnode, find=True):
        if find:
            startnode = self.findClosestNode(*startnode)
            destnode = self.findClosestNode(*destnode)

        path = networkx.shortest_path(self.gd, startnode, destnode, 'time')
        path = [tuple([scipy.float64(j) for j in x.split(" ")]) for x in path]
        return zip(path, path[1:])
Пример #21
0
 def __init__(self, kdim, depth, algo, seed, codes):
     assert(kdim == 2)
     self.kdim = kdim
     self.depth = depth
     self.algo = algo
     self.seed = seed
     self.codes = codes
     self.indptr = sp.cumsum(sp.bincount(codes + 1, minlength=(self.nr_codes + 1)), dtype=sp.uint64)
     self.indices = sp.argsort(codes * sp.float64(self.nr_elements) + sp.arange(self.nr_elements))
Пример #22
0
 def findClosestNode(self, lon, lat):
     min_dist = scipy.inf
     min_point = []
     for i_point in self.gd.nodes():
         start = tuple([scipy.float64(j) for j in i_point.split(" ")])
         dist = (start[0] - lon)**2 + (start[1] - lat)**2
         if dist < min_dist:
             min_dist = dist
             min_point = i_point
     return min_point
Пример #23
0
def once_csv_pred_test(data_csv):
    csv_name, ext = os.path.splitext(data_csv)
    start_time = time.time()
    all_data = pd.read_csv(data_csv)
    y = np.array(all_data['y'])
    fs = np.array(all_data.drop(columns=['y']))
    clf = glmnet_wrapper()
    y_pred = cross_val_predict(clf,
                               scipy.float64(fs),
                               scipy.float64(y),
                               cv=10,
                               n_jobs=-1)
    r_value, p_value = scipy.stats.pearsonr(y_pred, y)
    e = int(time.time() - start_time)
    e_time = '{:02d}:{:02d}:{:02d}'.format(e // 3600, (e % 3600 // 60), e % 60)
    with open('appending_results.txt', 'a+') as fo:
        fo.write('\r\n filename---{} r---: {} p---: {} time--: {} \r\n'.format(
            csv_name, r_value, p_value, e_time))
    return r_value, p_value, e_time
Пример #24
0
def file_to_scipy_array(filename):
    """
  Retrieve the Keysight 53230A Universal Frequency Counter/Timer measurements from file.
   
  filename -- source file from which to retrieve data.
    
  returns: <type 'numpy.ndarray'> measurements
  """

    data_file = open(filename, "r")

    line = data_file.readline()
    if line.strip() != "#MeasurementData:Keysight 53230A":
        #print("Exception: file_to_scipy_array: Not a Keysight 53230A Measurement Data file.")
        Exception(
            "file_to_scipy_array: Not a Keysight 53230A Measurement Data file."
        )
        data_file.close()
        return

    line = data_file.readline()
    version = line.strip().split(":")
    if not (version[0] == "#version" and version[1] == "0.3"):
        Exception(
            "file_to_scipy_array: Keysight 53230A wrong version easurement Data file."
        )
        data_file.close()
        return

    lst_measurements = []

    while 1:
        line = data_file.readline()
        if len(line) == 0:
            break
        if line[:len("#date:")] == "#date:":
            date_in_file = line.split(":")[1].strip()
        if line[:len("#time:")] == "#time:":
            time_lst = line.split(":")
            time_in_file = time_lst[1].strip() + ":" + time_lst[2].strip(
            ) + ":" + time_lst[3].strip()

        # All lines starting witout a "#" contain measurements
        if line[:len("#")] != "#":
            value = scipy.float64(line.strip())
            #print (type(value), value)
            lst_measurements.append(value)

    data_file.close()

    x_data = scipy.arange(0, len(lst_measurements), 1, dtype=scipy.float64)
    y_data = scipy.array(lst_measurements)
    measurement_data = scipy.array([x_data, y_data])

    return measurement_data
Пример #25
0
def read_fcon1000_data(csv_fname,
                       data_dir,
                       reg_var_name='Verbal IQ',
                       num_sub=5,
                       reg_var_positive=1,
                       gord=1):
    """ reads fcon1000 csv and data"""

    count1 = 0
    sub_ids = []
    reg_var = []
    pbar = tqdm(total=num_sub)

    with open(csv_fname, newline='') as csvfile:
        creader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
        for row in creader:

            # read the regression variable
            rvar = row[reg_var_name]

            if gord == 1:
                # Read the filtered data by default
                fname = os.path.join(
                    data_dir, row['ScanDir ID'] + '_rest_bold.32k.GOrd.mat')
            else:
                fname = os.path.join(
                    data_dir, row['ScanDir ID'] + '_rest_bold.BOrd.mat')

            # If the data does not exist for this subject then skip it
            if not os.path.isfile(fname) or int(row['QC_Rest_1']) != 1:
                continue

            if reg_var_positive == 1 and sp.float64(rvar) < 0:
                continue

            if count1 == 0:
                sub_data_files = []

            # Truncate the data at a given number of time samples This is needed because
            # BrainSync needs same number of time sampples
            sub_data_files.append(fname)
            sub_ids.append(row['ScanDir ID'])
            reg_var.append(float(rvar))

            count1 += 1
            pbar.update(1)  # update the progress bar
            #print('%d,' % count1, end='')
            if count1 == num_sub:
                break

    pbar.close()
    print('CSV file and the data has been read\nThere are %d subjects' %
          (len(sub_ids)))

    return sub_ids, sp.array(reg_var), sub_data_files
Пример #26
0
def weighted_gtv(X, y, D, l1, l3, alpha=.9):
    if alpha<1:
        n = X.shape[0]
        weights = np.array([alpha**(n-t) for t in np.arange(1, n+1)])
        X = X * np.sqrt(weights.reshape(-1,1))
        y = y * np.sqrt(weights)
    XD, bigY, invD = augmented_system_lasso(X, y, D, l1/l3, 0, l1_only=True)
    fit = glmnet(x = XD, y = bigY)
    b = glmnetCoef(fit, s = scipy.float64([l3]), exact = False)
    beta = [email protected](b.shape[0])[1:]
    return beta
Пример #27
0
    def backward_step(self, a, t):
        """Return r error value for this output layer

            a - output of the layer (float), equals to a_(k)
            t - actual class (+1 or -1)
        """
        a = s.float64(a)
        t = int(t)
        assert t in [-1, 1], "Invalid class"
        t_new = (1 + t) / 2
        return s.array([func.sig(a) - t_new])
Пример #28
0
def glmnetSet(opts = None):
    import scipy
    
    # default options
    options = {
        "weights"             : scipy.empty([0]),
        "offset"              : scipy.empty([0]),
        "alpha"               : scipy.float64(1.0),
        "nlambda"             : scipy.int32(100),
        "lambda_min"          : scipy.empty([0]),
        "lambdau"             : scipy.empty([0]),
        "standardize"         : True,
        "intr"                : True,
        "thresh"              : scipy.float64(1e-7),
        "dfmax"               : scipy.empty([0]),
        "pmax"                : scipy.empty([0]),
        "exclude"             : scipy.empty([0], dtype = scipy.integer),
        "penalty_factor"      : scipy.empty([0]),
        "cl"                  : scipy.array([[scipy.float64(-scipy.inf)], [scipy.float64(scipy.inf)]]), 
        "maxit"               : scipy.int32(1e5),
        "gtype"               : [],
        "ltype"               : 'Newton',
        "standardize_resp"    : False,
        "mtype"               : 'ungrouped'
   }
    
    # quick return if no user opts
    if opts == None:
        print('pdco default options:')
        print(options)
        return options
    
    # if options are passed in by user, update options with values from opts
    optsInOptions = set(opts.keys()) - set(options.keys());
    if len(optsInOptions) > 0:          # assert 'opts' keys are subsets of 'options' keys
        print(optsInOptions, ' : unknown option for glmnetSet')
        raise ValueError('attempting to set glmnet options that are not known to glmnetSet')
    else:        
        options.update(opts)            # update values
    
    return options
Пример #29
0
 def coo_to_csr(coo):
     nr_rows, nr_cols, nnz, row, col, val = \
             coo.shape[0], coo.shape[1], coo.data.shape[0], coo.row, coo.col, coo.data
     indptr = sp.cumsum(sp.bincount(row + 1,
                                    minlength=(nr_rows + 1)),
                        dtype=sp.uint64)
     indices = sp.zeros(nnz, dtype=sp.uint32)
     data = sp.zeros(nnz, dtype=dtype)
     sorted_idx = sp.argsort(row * sp.float64(nr_cols) + col)
     indices[:] = col[sorted_idx]
     data[:] = val[sorted_idx]
     return indptr, indices, data
Пример #30
0
    def glmnetFit(self, X, y, offsets, numCodons, numGenes, varsNames, lambda_min):
        """

        :param X:
        :param y:
        :param offsets:
        :param numCodons:
        :param numGenes:
        :param varsNames:
        :param lambda_min:
        :return:
        """
        # fit the model
        if not lambda_min:
            fit = cvglmnet(x=X.copy(), y=y.copy(), family='poisson',
                           offset=offsets, alpha=0, parallel=True, lambda_min=np.array([0]))
            coefs = cvglmnetCoef(fit, s=fit['lambda_min'])  # lambda_min lambda_1se
        else:
            fit = glmnet(x=X.copy(), y=y.copy(), family='poisson',
                         offset=offsets, alpha=0, lambda_min=np.array([0]))
            coefs = glmnetCoef(fit, s=scipy.float64([lambda_min]))
        # parse and scale coefficients
        intercept = coefs[0][0]
        geneBetas = pd.DataFrame([[varsNames[i-1].split("_")[1], coefs[i][0]]
                                  for i in range(1, numGenes+1)], columns=["gene", "beta"])
        geneBetas["log2_TE"] = (geneBetas["beta"] - np.median(geneBetas["beta"])) / np.log(2)
        geneBetas.drop(["beta"], inplace=True, axis=1)
        codonBetas = pd.DataFrame([[varsNames[i-1].split("_")[1], coefs[i][0]]
                                   for i in range(numGenes+1, numGenes + numCodons + 1)], columns=["codon", "beta"])
        codonBetas["log_codon_dwell_time"] = (codonBetas["beta"] - np.median(codonBetas["beta"]))
        codonBetas["codon_dwell_time"] = np.exp(codonBetas["log_codon_dwell_time"])
        codonBetas.drop(["beta", "log_codon_dwell_time"], inplace=True, axis=1)
        downstreamSLBeta = coefs[numGenes + numCodons + 1][0]
        #  export to local
        geneBetas.to_csv(path_or_buf=self.output + '/genesTE.csv', sep='\t',
                         header=True, index=False, float_format='%.4f')
        codonBetas.to_csv(path_or_buf=self.output + '/codons.csv', sep='\t',
                          header=True, index=False, float_format='%.4f')
        # print results
        if lambda_min:
            sys.stderr.write("[results]\tpre-defined lambda: " + str(lambda_min) + "\n")
        else:
            sys.stderr.write("[results]\tlambda that gives minimum mean cv error: " + str(fit['lambda_min']) + "\n")
            sys.stderr.write("[results]\tlambda 1 se away: " + str(fit['lambda_1se']) + "\n")
        sys.stderr.write("[results]\tintercept: " + str(intercept) + "\n")
        sys.stderr.write("[results]\tbetas for 2' structure windows: " + str(downstreamSLBeta) + "\n")
        # plot
        if not lambda_min:
            plt.figure()
            cvglmnetPlot(fit)
            plt.gcf()
            plt.savefig(self.output + "/" + "lambda_cv.pdf")
            plt.clf()
Пример #31
0
    def plot_temp(self):
        ######## plot 3D Temperature ########

        fig2 = plt.figure(figsize=plt.figaspect(0.5))
        ax2 = fig2.add_subplot(1, 2, 1, projection='3d')

        X2 = sp.linspace(0, 5, 5)
        Y2 = sp.linspace(0, 5, 5)

        X2, Y2 = sp.meshgrid(X2, Y2)
        X2, Y2 = X2.ravel(), Y2.ravel()

        #R = sp.sqrt(X**2 + Y**2)
        #Z = sp.sin(R)

        width = depth = 1
        bottom = X2 * 0

        #plt.ion()

        p = int(entryrowplot.get())
        temp_floats = sp.float64(self.data_str['temp'][p].split(";")).reshape(
            5, 5)
        mintemp = np.min(temp_floats)
        mintempplot = mintemp
        mintemptxt = str(round(mintemp, 1))
        maxtemp = np.max(temp_floats)
        maxtempplot = maxtemp
        maxi = maxtempplot - mintempplot + 0.1
        top2 = temp_floats.ravel() - mintemp
        top_scaled2 = (top2 - sp.amin(top2)) / (sp.amax(top2) - sp.amin(top2))
        mycolors = cm.jet(top_scaled2)
        ax2.bar3d(X2,
                  Y2,
                  bottom,
                  width,
                  depth,
                  top2,
                  color=mycolors,
                  alpha=float(entryTrans.get()))
        ax2.set_title('Temperature' + '\n' + '@ ' +
                      self.data_str['sumcurr'][p] + ' A' + '  //   @ ' +
                      self.data['voltage'][p] + ' V')

        ax2.set_zlim(0, maxi)
        ax2.set_zlabel('Temperature +' + mintemptxt + '°C')
        ax2.view_init(elev=25., azim=60)

        plt.show()

        tempplot = ts.strftime(
            "%Y.%m.%d. %H:%M:%S :") + "Temperature dargestellt"
        self.displaystate.insert(tk.END, tempplot + '\n')
Пример #32
0
def read_hess_params(hessian_file=hessian_file):
    f_in = open(hessian_file)
    for line_tmp in f_in:
        if 'N' in line_tmp:
           line = line_tmp
           break
    N = int(line_tmp.split()[1])
    line = f_in.next()
    eps, delta = (float64(x) for x in [line.split()[1, 3]])
    line = f_in.next()
    line = f_in.next()
    labels = [lab for lab in line.split() if lab!='labels' and lab!='xyz']
Пример #33
0
    def convert_codes_to_csc_matrix(codes, depth):
        nr_codes = 1 << depth
        nr_elements = len(codes)

        indptr = sp.cumsum(sp.bincount(codes + 1, minlength=(nr_codes + 1)),
                           dtype=sp.uint64)
        indices = sp.argsort(codes * sp.float64(nr_elements) +
                             sp.arange(nr_elements))
        C = smat.csc_matrix(
            (sp.ones_like(indices, dtype=sp.float32), indices, indptr),
            shape=(nr_elements, nr_codes),
        )
        return C
Пример #34
0
def save2surfgord(lsurf,
                  rsurf,
                  out_dir,
                  surf_name,
                  bfp_path='.',
                  save_png=True):
    # if label is zero, black out surface, attribute should be nan
    num_vert = lsurf.vertices.shape[0]
    lab = spio.loadmat(
        os.path.join(bfp_path, 'supp_data/USCBrain_grayordinate_labels.mat'))
    labs = lab['labels'].squeeze()
    labs = sp.float64(labs)
    lsurf.attributes[labs[:num_vert] == 0] = sp.nan
    rsurf.attributes[labs[num_vert:2 * num_vert] == 0] = sp.nan
    lsurf.vColor[sp.isnan(lsurf.attributes), :] = 0
    rsurf.vColor[sp.isnan(lsurf.attributes), :] = 0

    writedfs(out_dir + '/Right_' + surf_name + '.dfs', rsurf)
    writedfs(out_dir + '/Left_' + surf_name + '.dfs', lsurf)

    if VTK_INSTALLED == 0:
        print('VTK is not installed, screenshots will not be saved.')
        save_png = False

    if save_png == True:
        # Visualize left hemisphere
        view_patch_vtk(lsurf,
                       azimuth=100,
                       elevation=180,
                       roll=90,
                       outfile=out_dir + '/LeftLateral_' + surf_name + '.png',
                       show=0)
        view_patch_vtk(lsurf,
                       azimuth=-100,
                       elevation=180,
                       roll=-90,
                       outfile=out_dir + '/LeftMedial_' + surf_name + '.png',
                       show=0)
        # Visualize right hemisphere
        view_patch_vtk(rsurf,
                       azimuth=-100,
                       elevation=180,
                       roll=-90,
                       outfile=out_dir + '/RightLateral_' + surf_name + '.png',
                       show=0)
        view_patch_vtk(rsurf,
                       azimuth=100,
                       elevation=180,
                       roll=90,
                       outfile=out_dir + '/RightMedial_' + surf_name + '.png',
                       show=0)
Пример #35
0
def calcCDFs(sampleList,binEdges):
    """
    Example:
    dataEKI = getCSVData(csvFiles['EKI'])
    bMeasures = calcBurstMeasures(dataEKI)
    cdfs =calcCDFs(sampleList=bMeasures['burstDur'],binEdges=bins['burstDur'])
    """
    nSamples=len(sampleList)
    nBinPts = len(binEdges)
    cdfs=list()
    for n in range(nSamples):
        nPts=sc.float64(len(sampleList[n]))
        cdfs.append(sc.zeros(nBinPts))
        for m in range(0,nBinPts):
            samp=sampleList[n]
            cdfs[n][m] = len(sc.where(samp<=binEdges[m])[0])/nPts 
    return sc.array(cdfs)
Пример #36
0
        print "P_{} = {} a. u.".format(ii/2, Pm[-1])
        backup(backup_suf='{}mbackup'.format(ii))
        restore()
    return Pm,dPm,d2Pm/(field**2)

def backup(calc_files=calc_files, backup_suf='.backup0'):
    for filename in calc_files:
        sp.call("cp -r {0} {0}{1}".format(filename, backup_suf),
                shell=True)

def restore(calc_files=calc_files, backup_suf='.backup0'):
    for filename in calc_files:
        sp.call("cp -r {0}{1} {0}".format(filename, backup_suf),
                shell=True)

charge = float64(sys.argv[1])
atom_type = sys.argv[2]
grd_obj = Grads(excls, coord_pat, control)

de =  grd_obj.flatten_grads()
print "E0 = {}".format(grd_obj.en)
px,dpx,d2px =  eval_dipole(charge)
f = open(dipole_txt, 'w')
f.write('#p\n')
sc.savetxt(f, px)
for ii,dpi in enumerate(dpx):
    f.write('#dp {}\n'.format(ii))
    sc.savetxt(f, dpi)
f.write('#d2p\n')
sc.savetxt(f, d2px)
Пример #37
0
from standard_config import *
import subprocess as sp
excls = 'zz3 zz4'.split()
node_dir_prefix = 'node'
srun_command = 'srun -N1 -n1'

hessian_file = 'HESS.DAT'
omega_file = 'OMEGA.DAT.scipy'


eps = 1e-1
ncol = 5

srun_command +=' '#
number_of_nodes = int(sys.argv[1])
eps = float64(sys.argv[2])
calc_files = [coord, control, realmos, imagmos, basis, embedding]


class Node(object):
    IDLE, RUN = range(2)

    def __init__(self,node_dir, calc_files):
        self.calc_files = calc_files[:]
        self.node_dir = node_dir
        sp.call("mkdir {}".format(node_dir), shell=True)
        for x in calc_files:
            sp.call("cp {0} {1}/{0}".format(x, node_dir), shell=True)
        self.process = None
        self.occupied = None
        self.status = Node.IDLE
Пример #38
0
 def normalize(self, data):
     return scipy.float64(data)/255 - 0.5
    def set_boundary_conditions(self, bctype='', bcvalue=None, pores=None,
                                throats=None, mode='merge'):
        r"""
        Apply boundary conditions to specified pores or throats

        Parameters
        ----------
        bctype : string
            Specifies the type or the name of boundary condition to apply. \
            The types can be one one of the followings:
                 - 'Dirichlet' : Specify the quantity in each location
                 - 'Neumann' : Specify the flow rate into each location
                 - 'Neumann_group' : Specify the net flow rate into a group
                   of pores/throats
        component : OpenPNM Phase object
            The Phase object to which this BC applies
        bcvalue : array_like
            The boundary value to apply, such as concentration or rate
        pores : array_like
            The pores where the boundary conditions should be applied
        throats : array_like
            The throats where the boundary conditions should be applied
        mode : string, optional
            Controls how the conditions are applied.  Options are:

            - 'merge': Inserts the specified values, leaving existing values \
              elsewhere
            - 'overwrite': Inserts specified values, clearing all other \
              values for that specific bctype
            - 'remove': Removes boundary conditions from specified locations

        Notes
        -----
        - It is not possible to have multiple boundary conditions for a
          specified location in just one algorithm. So when new condition is
          going to be applied to a specific location, any existing one should
          be removed or overwritten.
        - BCs for pores and for throats should be applied independently.
        """
        try:
            self._existing_BC
        except AttributeError:
            self._existing_BC = []
        if sp.size(self._phases) != 1:
            raise Exception('In each use of set_boundary_conditions ' +
                            'method, one component should be specified ' +
                            'or attached to the algorithm.')
        else:
            component = self._phases[0]

        if mode not in ['merge', 'overwrite', 'remove']:
            raise Exception('The mode (' + mode + ') cannot be applied to ' +
                            'the set_boundary_conditions!')

        logger.debug('BC method applies to the component: ' + component.name)
        # Validate bctype
        if bctype == '':
            raise Exception('bctype must be specified!')
        # If mode is 'remove', also bypass checks
        if mode == 'remove':
            if pores is None and throats is None:
                for item in self.labels():
                    if bctype == item.split('.')[-1]:
                        element = item.split('.')[0]
                        try:
                            del self[element + '.' + 'bcval_' + bctype]
                        except KeyError:
                            pass
                        try:
                            del self[element + '.' + bctype]
                        except KeyError:
                            pass
                logger.debug('Removing ' + bctype + ' from all locations' +
                             ' for ' + component.name + ' in ' +
                             self.name)
                self._existing_BC.remove(bctype)
            else:
                if pores is not None:
                    prop_label = 'pore.' + 'bcval_' + bctype
                    self[prop_label][pores] = sp.nan
                    info_label = 'pore.' + bctype
                    self[info_label][pores] = False
                    logger.debug('Removing ' + bctype + ' from the ' +
                                 'specified pores for ' + component.name +
                                 ' in ' + self.name)
                if throats is not None:
                    prop_label = 'throat.' + 'bcval_' + bctype
                    self[prop_label][throats] = sp.nan
                    info_label = 'throat.' + bctype
                    self[info_label][throats] = False
                    logger.debug('Removing ' + bctype + ' from the ' +
                                 'specified throats for ' +
                                 component.name + ' in ' + self.name)
            return
        # Validate pores/throats
        if pores is None and throats is None:
            raise Exception('pores/throats must be specified')
        elif pores is not None and throats is not None:
            raise Exception('BC for pores and throats must be specified ' +
                            'independently.')
        elif throats is None:
            element = 'pore'
            loc = sp.array(pores, ndmin=1)
            all_length = self.Np
        elif pores is None:
            element = 'throat'
            loc = sp.array(throats, ndmin=1)
            all_length = self.Nt
        else:
            raise Exception('Problem with the pore and/or throat list')
        # Validate bcvalue
        if bcvalue is not None:
            # Check bcvalues are compatible with bctypes
            if bctype == 'Neumann_group':  # Only scalars are acceptable
                if sp.size(bcvalue) != 1:
                    raise Exception('When specifying Neumann_group, bcval ' +
                                    'should be a scalar')
                else:
                    bcvalue = sp.float64(bcvalue)
                    if 'Neumann_group' not in self._existing_BC:
                        setattr(self, '_' + element +
                                '_Neumann_group_location', [])
                    getattr(self, '_' + element +
                            '_Neumann_group_location').append(loc)
            else:  # Only scalars or Np/Nt-long are acceptable
                if sp.size(bcvalue) == 1:
                    bcvalue = sp.ones(sp.shape(loc)) * bcvalue
                elif sp.size(bcvalue) != sp.size(loc):
                    raise Exception('The pore/throat list and bcvalue list ' +
                                    'are different lengths')
        # Confirm that prop and label arrays exist
        l_prop = element + '.' + 'bcval_' + bctype
        if l_prop not in self.props():
            self[l_prop] = sp.ones((all_length,), dtype=float) * sp.nan
        l_label = element + '.' + bctype
        if l_label not in self.labels():
            self[l_label] = sp.zeros((all_length,), dtype=bool)
        # Check all BC from specified locations, prior to setting new ones
        for item in self.labels():
            bcname = item.split('.')[-1]
            if bcname in self._existing_BC and item.split('.')[0] == element:
                if mode in ['merge', 'overwrite']:
                    try:
                        c1 = element
                        c2 = 'bcval_' + bcname
                        c1_label = c1 + c2
                        self[c1_label][loc]
                        condition1 = sp.isnan(self[c1_label][loc]).all()
                        c2_label = c1 + '_' + bcname
                        condition2 = sp.sum(self[c2_label][loc]) == 0
                        if not (condition1 and condition2):
                            if mode == 'merge':
                                raise Exception('Because of the existing ' +
                                                'BCs, the method cannot ' +
                                                'apply new BC with the merge' +
                                                ' mode to the specified pore' +
                                                '/throat.')
                            elif (mode == 'overwrite' and bcname != bctype):
                                raise Exception('Because of the existing ' +
                                                'BCs, the method cannot ' +
                                                'apply new BC with overwrite' +
                                                ' mode. This mode only ' +
                                                'overwrites this bctype, ' +
                                                'not the other ones.')
                    except KeyError:
                        pass
        # Set boundary conditions based on supplied mode
        if mode == 'merge':
            if bcvalue is not None:
                self[l_prop][loc] = bcvalue
            self[l_label][loc] = True
            if bctype not in self._existing_BC:
                self._existing_BC.append(bctype)
        elif mode == 'overwrite':
            self[l_prop] = sp.ones((all_length,), dtype=float) * sp.nan
            if bcvalue is not None:
                self[l_prop][loc] = bcvalue
            self[l_label] = sp.zeros((all_length,), dtype=bool)
            self[l_label][loc] = True
            if bctype not in self._existing_BC:
                self._existing_BC.append(bctype)
Пример #40
0
    def _Go_Button_fired(self):

        # Transform the internal dict holding the model to a Pandas dataframe
        # that the lmfit wrapper will digest:
        tofit = pd.DataFrame.from_dict(self.Components).T
        print tofit
        print tofit.index
        tofit.columns = ['Pos', 'Sigma', 'Ampl', 'Identifier']
        tofit['Line center'] = self.line_center
        tofit.set_value('Contin', 'Lock', self.LockConti)
        for lines in self.Components.keys():
            if lines == 'Contin':
                continue
            tofit.set_value(lines, 'Lock', self.Locks[lines][:3])
        print self.Components
        print tofit

        # Make sure no dataarrays belonging to the parent class are altered.
        x = self.x.copy()
        data = self.indata.copy()
        errs = self.errs.copy()

        # If fitranges set, use them, otherwise use all data.
        fitrange = []
        if len(self.fitrange) == 0:
            self.fitrange = [(self.line_center - 15, self.line_center + 15)]
            print 'Fitrange 1: ', fitrange
        if len(self.fitrange) > 0:
            for ran in self.fitrange:
                print 'Ran', ran
                rmin, rmax = ran[0], ran[1]
                fitrange += sp.where((self.x > rmin) & (self.x < rmax))
            fitrange = sp.hstack(fitrange[:])
            fitrange.sort()
            x = x[fitrange]
            data = data[fitrange]
            errs = errs[fitrange]
        try:
            import lmfit_wrapper as lw
        except ImportError:
            print 'Could not import LMfit'
            return

        params = lw.load_params(tofit)
        result = lw.fit_it(
            #lw.build_model,
            params,
            args=(self.x[fitrange], self.indata[fitrange], self.errs[fitrange]))

        output = lw.params_to_grism(params, output_format='df')
        print output
        print tofit
        output['Identifier'] = tofit['Identifier']
        print output
        output.set_value('Contin', 'Identifier', sp.float64('nan'))
        output['Pos'] -= tofit['Line center']
        print output

        outdict = {}
        for i in output.index:
            row = output.ix[i]
            if i == 'Contin':
                outdict[i] = row['Ampl']
            else:
                outdict[i] = [row['Pos'], row['Sigma'], \
                              row['Ampl'], row['Identifier']]
        self.Components = outdict
        self.import_model()
        return result
Пример #41
0
    def set_boundary_conditions(self,component=None,bctype='',bcvalue=None,pores=None,throats=None,mode='merge'):
        r'''
        Apply boundary conditions to specified pores or throats

        Parameters
        ----------
        bctype : string
            Specifies the type or the name of boundary condition to apply.  The types can be one one of the followings:

            - 'Dirichlet' : Specify the quantity in each location
            - 'Neumann' : Specify the flow rate into each location
            - 'Neumann_group' : Specify the net flow rate into a group of pores/throats
        component : OpenPNM Phase object
            The Phase object to which this BC applies
        bcvalue : array_like
            The boundary value to apply, such as concentration or rate
        pores : array_like
            The pores where the boundary conditions should be applied
        throats : array_like
            The throats where the boundary conditions should be applied
        mode : string, optional
            Controls how the conditions are applied.  Options are:

            - 'merge': Inserts the specified values, leaving existing values elsewhere
            - 'overwrite': Inserts specified values, clearing all other values
            - 'remove': Removes boundary conditions from specified locations

        Notes
        -----
        1. It is not possible to have multiple boundary conditions for a specified location in just one algorithm.
        So when new condition is going to be applied to a specific location, any existing one
        should be removed or overwritten.
        2- BCs for pores and for throats should be applied independently.
        '''
        try: self._existing_BC
        except: self._existing_BC = []
        if component is None:
            if sp.size(self._phases)!=1:
                raise Exception('In each use of set_boundary_conditions method, one component should be specified or attached to the algorithm.' )
            else:
                component = self._phases[0]
        else:
            if sp.size(component)!=1:
                raise Exception('For using set_boundary_conditions method, only one component should be specified.')
        
        if mode not in ['merge','overwrite','remove']:
            raise Exception('The mode ('+mode+') cannot be applied to the set_boundary_conditions!')

        logger.debug('BC applies to the component: '+component.name)
        #If mode is 'remove', also bypass checks
        if mode == 'remove':
            if pores is None and throats is None:
                if bctype=='':
                    raise Exception('No bctype/pore/throat is specified')
                else:
                    for item in self.labels():
                        if bctype == (item.split('.')[-1]).replace(self._phase.name+'_',"") :
                            element = item.split('.')[0]
                            try:
                                del self[element+'.'+component.name+'_bcval_'+bctype]
                            except: pass
                            try:
                                del self[element+'.'+component.name+'_'+bctype]
                            except:
                                pass
                    logger.debug('Removing '+bctype+' from all locations for '+component.name+' in '+self.name)
                    self._existing_BC.remove(bctype)
            else:
                if pores is not None:
                    if bctype!='':
                        self['pore.'+component.name+'_bcval_'+bctype][pores] = sp.nan
                        self['pore.'+component.name+'_'+bctype][pores] = False
                        logger.debug('Removing '+bctype+' from the specified pores for '+component.name+' in '+self.name)
                    else:   raise Exception('Cannot remove BC from the pores unless bctype is specified')

                if throats is not None:
                    if bctype!='':
                        self['throat.'+component.name+'_bcval_'+bctype][throats] = sp.nan
                        self['throat.'+component.name+'_'+bctype][throats] = False
                        logger.debug('Removing '+bctype+' from the specified throats for '+component.name+' in '+self.name)
                    else:   raise Exception('Cannot remove BC from the throats unless bctype is specified')

            return
        #Validate bctype
        if bctype == '':
            raise Exception('bctype must be specified')
        #Validate pores/throats
        if pores is None and throats is None:
            raise Exception('pores/throats must be specified')
        elif pores is not None and throats is not None:
            raise Exception('BC for pores and throats must be specified independently')
        elif  throats is None:
            element ='pore'
            loc = sp.array(pores,ndmin=1)
            all_length = self.num_pores()
        elif pores is None:
            element ='throat'
            loc = sp.array(throats,ndmin=1)
            all_length = self.num_throats()
        else:
            raise Exception('Problem with the pore and/or throat list')
        #Validate bcvalue
        if bcvalue is not None:
            #Check bcvalues are compatible with bctypes
            if bctype == 'Neumann_group':  #Only scalars are acceptable
                if sp.size(bcvalue) != 1:
                    raise Exception('When specifying Neumann_group, bcval should be a scalar')
                else:   
                    bcvalue = sp.float64(bcvalue)
                    if 'Neumann_group' not in self._existing_BC: 
                        setattr(self,'_'+element+'_'+component.name+'_Neumann_group_location',[])
                    getattr(self,'_'+element+'_'+component.name+'_Neumann_group_location').append(loc)
            else: #Only scalars or Np/Nt-long are acceptable
                if sp.size(bcvalue) == 1:
                    bcvalue = sp.ones(sp.shape(loc))*bcvalue
                elif sp.size(bcvalue) != sp.size(loc):
                    raise Exception('The pore/throat list and bcvalue list are different lengths')
        #Confirm that prop and label arrays exist
        if element+'.'+component.name+'_bcval_'+bctype not in self.props():
            self[element+'.'+component.name+'_bcval_'+bctype] = sp.ones((all_length,),dtype=float)*sp.nan
        if element+'.'+component.name+'_'+bctype not in self.labels():
            self[element+'.'+component.name+'_'+bctype] = sp.zeros((all_length,),dtype=bool)
        #Check all BC from specified locations, prior to setting new ones
        for item in self.labels():
            bcname = (item.split('.')[-1]).replace(component.name+'_',"")
            if bcname in self._existing_BC  and item.split('.')[0]==element:
                if mode=='merge':
                    try:
                        self[element+'.'+component.name+'_bcval_'+bcname][loc]
                        if not (sp.isnan(self[element+'.'+component.name+'_bcval_'+bcname][loc]).all() and sp.sum(self[element+'.'+component.name+'_'+bcname][loc])==0):
                            raise Exception('Because of the existing BCs, the method cannot apply new BC with the merge mode to the specified pore/throat.')
                    except KeyError: pass
        #Set boundary conditions based on supplied mode
        if mode == 'merge':
            if bcvalue is not None:   self[element+'.'+component.name+'_bcval_'+bctype][loc] = bcvalue
            self[element+'.'+component.name+'_'+bctype][loc] = True
            if bctype not in self._existing_BC: self._existing_BC.append(bctype)
        elif mode == 'overwrite':
            self[element+'.'+component.name+'_bcval_'+bctype] = sp.ones((all_length,),dtype=float)*sp.nan
            if bcvalue is not None:   self[element+'.'+component.name+'_bcval_'+bctype][loc] = bcvalue
            self[element+'.'+component.name+'_'+bctype] = sp.zeros((all_length,),dtype=bool)
            self[element+'.'+component.name+'_'+bctype][loc] = True
            if bctype not in self._existing_BC: self._existing_BC.append(bctype)