예제 #1
0
def get_totaldos(lines, index, natoms, nedos, efermi):
    write_content = []  # write doscar0
    plt = []  #plot figures
    for n in range(nedos):
        part_content = ''
        plt_content = []
        line = lines[index].strip().split()
        e = float(line[0])
        e_f = e - efermi
        plt_content.append(round(float(e_f), 8))
        part_content += '%15.8f ' % (e_f)
        for col in range(1, len(line)):
            dos = float(lines[index].strip().split()[col])
            if gogal == 'total':
                pass
            else:
                if col == 1:
                    dos = dos / natoms

            part_content += '%15.8f ' % (dos)
            plt_content.append(round(float(dos), 8))
        part_content += '\n'
        write_content.append(part_content)
        plt.append(plt_content)
        index += 1
    return write_content, plt
예제 #2
0
def swapProb(temps):
    p = []
    for t in xrange(len(temps)):
        for i in xrange(len(temps)):
            f = math.exp((temps[t] - temps[i]) / temps[t])
            if t < len(temps) - 1 and temps[i] == temps[t + 1]:
                p.append(f)
    return p
예제 #3
0
def setInitPert(np, ks, w):
    global tau, MPL, epsilon
    p = []
    for i in range(len(ks)):
        """ The perturvations in the adiabatic universe. """
        p.append(Perturbation([0.000], [0.000], [0.00, -(3./2.) * w[1] * np[i], -(3./2.) * w[2] * np[i], -(3./2.) * w[3] * np[i]], [0.00, (ks[i] ** 2) * tau * np[i]/2.,  (ks[i] ** 2) * tau * np[i]/2.,  (ks[i] ** 2) * tau * np[i]/2.], np[i], ks[i]))
        
        """ The perturvations in the adiabatic universe. """
        p.append(Perturbation([-3. * MPL * np[i] * sqrt(epsilon)], [0.000], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], 0.0, ks[i]))
    return p
예제 #4
0
def get_pdos(lines, index, natoms, nedos, efermi):
    '''
    :param lines: 输入文件内容,按照行读取所有内容
    :param index: 开始行号
    :param natoms: 原子个数
    :param nedos: dos中点的个数
    :param efermi: 费米面位置
    :return: 返回第一个为可以写入读取的分dos的列表,第二个为可以用来画图的列表
    画图的部分数据分别为:横坐标值,spd三个轨道分别的值,spd加起来的total值,各个轨道加起来的值
    x s p1 p2 p3 d1 d2 d3 d4 d5 total s p_total d_total(共14列,0,1-9,10,11-13)
    '''
    write_content = []  # write doscar0
    plt = []  #plot figures
    a = lines[index - 1].strip().split()

    for n in range(nedos):
        part_content = ''
        plt_content = []
        s, p, d, total = (0, 0, 0, 0)

        line = lines[index].strip().split()
        if len(line) == 10:
            e = float(line[0])
            e_f = e - efermi
            plt_content.append(round(float(e_f), 8))
            part_content += '%15.8f  ' % (e_f)
            for col in range(1, len(line)):
                dos = float(lines[index].strip().split()[col])
                if col == 1:
                    s = dos
                elif col > 1 and col <= 4:
                    p += dos
                elif col > 4:
                    d += dos
                plt_content.append(round(float(dos), 8))
            total = s + p + d
            # each_atom =
            part_content += '%15.8f' % (total)
            part_content += '%15.8f' % (s)
            part_content += '%15.8f' % (p)
            part_content += '%15.8f' % (d)
            part_content += '\n'
            plt_content.append(total)
            plt_content.append(s)
            plt_content.append(p)
            plt_content.append(d)
            write_content.append(part_content)
            plt.append(plt_content)
            index += 1
    return write_content, plt
예제 #5
0
    def jsonInput(self, filename):
        f = open(filename, 'r')
        jsonData = json.load(f)
        f.close()
        #angle
        datas = []
        for user in jsonData:
            #data is joint angle
            data = []
            #dts:data_size, dtd:data_dimension
            self.dts = len(user["datas"])
            self.dtd = len(user["datas"][0]["data"])
            for j in range(self.dts):
                data.append(user["datas"][j]["data"])
            datas.append(data)

        
        poses = []
        for user in jsonData:
            pos = []
            psize = len(user["datas"][0]["jdata"])
            for j in range(self.dts):
                pls = []
                for p in range(psize):
                    pl = []
                    for xyz in range(3):
                        pl.append(user["datas"][j]["jdata"][p][xyz])
                    pls.append(pl)
                pos.append(pls)
            poses.append(pos)

        #time ただし1.14現在,値が入ってない
        time = []
        for t in jsonData[0]["datas"]:
            time.append(t["time"])
        
        #print "poses[0]:",poses[0]

        #可視化用,ジョイント
        f = open('/home/uema/catkin_ws/src/rqt_cca/joint_index.json', 'r')
        jsonIdxDt = json.load(f)
        f.close
        self.jIdx = []
        for idx in jsonIdxDt:
            jl = []
            for i in idx:
                jl.append(i)
            self.jIdx.append(jl)
        
        return datas[0], datas[1], poses[0], poses[1], time
예제 #6
0
def setInitPert(np, ks):
    global tau, MPL, epsilon
    p = []
    for i in range(len(ks)):
        """ The perturvations in the adiabatic universe. """
        p.append(
            Perturbation([0.000], [0.000],
                         [-2 * np[i], -(3. / 2.) * np[i], -2 * np[i]],
                         [(ks[i]**2) * tau * np[i] / 2.,
                          (ks[i]**2) * tau * np[i] / 2.,
                          (ks[i]**2) * tau * np[i] / 2.], np[i], ks[i]))
        """ The perturbations in the entropic universe. """
        p.append(
            Perturbation([-3. * MPL * np[i] * sqrt(epsilon)], [0.000],
                         [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], 0.0, ks[i]))
    return p
def macro_avg_precision(clusters_merged_labelled):
    
    precision = []
    for label, items in clusters_merged_labelled.items():
        p = []
        for label, item in items.items():
            p.append(item)
        precision.append(p)
    
    macro_precision = []
    for i in precision:
        pre = (max(i)/sum(i))*100
        macro_precision.append(pre)
        
    
    macro_precision = sum(macro_precision)/len(macro_precision)
    
    return macro_precision
예제 #8
0
    def poseInput(self, filename):
        """
        f = open(filename, 'r')
        js = json.load(f)
        f.close()

        #fposename = "/home/uema/catkin_ws/src/rqt_cca/data2/"+str(js["prop"]["fname"])
        fposename = str(js["prop"]["fname"])
        """
        with h5py.File(filename) as f:  

            fposename = f["/prop/fname"].value
            
            print "open pose file:",fposename
            fp = open(fposename, 'r')
            jsp = json.load(fp)
            f.close()
            
            datas = []
            for user in jsp:
            #data is joint angle
                data = []
                for j in range(self.dts):
                    data.append(user["datas"][j]["data"])
                datas.append(data)

            poses = []
            for user in jsp:
                pos = []
                psize = len(user["datas"][0]["jdata"])
                for j in range(self.dts):
                    pls = []
                    for p in range(psize):
                        pl = []
                        for xyz in range(3):
                            pl.append(user["datas"][j]["jdata"][p][xyz])
                        pls.append(pl)
                    pos.append(pls)
                poses.append(pos)

        return datas[0], datas[1], poses[0], poses[1]
예제 #9
0
def run_test(datasetdir, label_col, data_begin, data_end, numclasses, model_array, graphing, printing, folds, ps, pe, outfile):
    result_log = open(outfile, "w")
    corrplt = resplt.subplot()
    dataset_str_array = datasetdir.split("/")
    dataset_str_nameP = dataset_str_array[len(dataset_str_array)-1].split(".")
    dataset_str_name = dataset_str_nameP[0]
    if(not printing):
        blockPrint()
    else:
        enablePrint(dataset_str_name + "_" + str(model_array) + "_" + str(folds)+".txt")
    dataset_str_array = datasetdir.split("/")
    dataset_str_name = dataset_str_array[len(dataset_str_array)-1].strip(".csv")
    codebook = MatrixGeneration.GenerateMatrix(numclasses, numclasses)

    listOfCBs = [codebook]

    accuracy_array = [[]]
    correlation_array = [[]]

    for model in model_array:
        accuracy_array.append([])
        correlation_array.append([])
    print("starting run")

    pdomain = []
    p = ps
    while p >= pe:
        print("DATA PER CLASSIFIER " + str(p))
        pdomain.append(round(p, 2))
        counter = 0
        for model in model_array:
            temp = getECOCBaselineAccuracies_VC_VD(datasetdir, listOfCBs, label_col, data_begin, data_end, [model], p, folds, graphing)
            accuracy_array[counter].append(temp[0])
            correlation_array[counter].append(temp[1])
            counter += 1
        mixed = getECOCBaselineAccuracies_VC_VD(datasetdir, listOfCBs, label_col, data_begin, data_end, model_array, p, folds, graphing)
        accuracy_array[len(accuracy_array)-1].append(mixed[0])
        correlation_array[len(correlation_array)-1].append(mixed[1])
        p = p - 0.1

    labels = "P     " 
    accuracy_results = ""
    correlation_results = ""

    for j in range(len(model_array)):
        labels += models_String[model_array[j]-1] + "     "
    labels += "ALL\n"

    for i in range(len(pdomain)):
        accuracy_results += str(pdomain[i]) + "     "
        for j in range(len(model_array)+1):
            accuracy_results += str(accuracy_array[j][i]) + "     "
        accuracy_results += "\n"
    for i in range(len(pdomain)):
        correlation_results += str(pdomain[i]) + "     "
        for j in range(len(model_array)+1):
            correlation_results += str(correlation_array[j][i]) + "     "
        correlation_results += "\n"

    result_log.write(labels + "Accuracies\n" + accuracy_results + "Correlation\n" + correlation_results)
        
    print(accuracy_array)
    print(correlation_array)
    if (graphing):
        accplt = [[]]
        corrplt = [[]]
        for model in model_array:
            accplt.append([])
            corrplt.append([])
        counter = 0
        for acc, corr in zip(accuracy_array, correlation_array):
            for modelacc, modelcorr in zip(acc, corr):
                accplt[counter].append(modelacc[0])
                corrplt[counter].append(modelcorr[0])
            counter+=1
        line_references = ['-', ':', '-.', '--']
        resplt.suptitle(dataset_str_name + " Varied Data Accuracies")
        resplt.xlabel("Percent of Data Per Learner")
        resplt.ylabel("Accuracy")
        
        for i in range(len(model_array)):
            resplt.plot(pdomain, accplt[i], line_references[i], label= models_String[model_array[i]-1])

        resplt.plot(pdomain, accplt[len(accplt)-1], line_references[len(line_references)-1], label = "All")
        resplt.legend(loc= "lower right")
        resplt.savefig(dataset_str_name + " Varied Data Accuracies.png")

        resplt.clf()

        resplt.suptitle(dataset_str_name + " Varied Data Correlation")
        resplt.xlabel("Percent of Data Per Learner")
        resplt.ylabel("Correlation")
        for i in range(len(model_array)):
            resplt.plot(pdomain, corrplt[i], line_references[i], label= models_String[model_array[i]-1])
        
        resplt.plot(pdomain, corrplt[len(corrplt)-1], line_references[len(line_references)-1], label = "All")
        resplt.legend(loc= "lower right")
        resplt.savefig(dataset_str_name + " Varied Data Correlation.png")
    result_log.close()
    return result_log
예제 #10
0
파일: parse_cat.py 프로젝트: balbinot/mwgc
lines = a.readlines()

d = OrderedDict({})
p = []
coldefs = []

for j,line in enumerate(lines):
    if 'ID' in line:
        colnames = line.split()
        tmp = colnames
        for i,col in enumerate(colnames):
            if '+' in col:
                tmp[i] = tmp[i-1]+'err'
            d[tmp[i]] = []
        coldefs.append(tmp)
        p.append(j+2)
    if 'NGC 7492' in  line:
        p.append(j+1)

#datalines = lines[p[0]:p[1]] + lines[p[2]:p[3]] + lines[p[4]:p[5]]
datalines = lines[p[0]:p[1]]

# Part 1
for j, line in enumerate(datalines):
    cols = coldefs[0]
    d[cols[0]].append(line[0:9].strip())
    d[cols[1]].append(line[10:21].strip())
    d[cols[2]].append(line[23:37].strip())
    d[cols[3]].append(line[38:51].strip())
    d[cols[4]].append(line[52:60].strip())
    d[cols[5]].append(line[61:68].strip())
Yc = []
for el in Diameters:
    Ds.append(el[0])
    Xc.append(el[1])
    Yc.append(el[2])
lines, t, p = file_parser("0_700_out.txt")
if len(t)>len(Ds):
    del t[-(len(t)-len(Ds)):len(t)]
    del p[-(len(p)-len(Ds)):len(p)]
if len(t)<len(Ds):
    last_t= t[-1]
    last_p = p[-1]
    for i in range(len(t),len(Ds)):
        t.append(last_t+(i-len(t))*FPS)
    for i in range (len(p),len(Ds)):
        p.append(last_p)
p = array(p)
t = array(t)
pixTomm = 9.9/132.0
D = pixTomm*array(Ds)
R = array(Residus)
pixTomm = 9.9/132.0
X_center = pixTomm*(array(Xc)-Xc[0])
yc0 = Yc[0]
Y_center = pixTomm*(array(Yc)-yc0)
Y_lines = array(Y_line)
L0 = Y_lines[0]
spring_displacement = (Y_lines-L0)*pixTomm

# Dat_Xc_t = column_stack((t, X_center))
# Dat_Yc_t = column_stack((t, Y_center))