Ejemplo n.º 1
0
def plotDelay():
    pk_filename = "data232-LiRudy-2.dat"
    mio_filename = "data242-LiRudy-2.dat"
    mat0 = genfromtxt(pk_filename)
    mat1 = genfromtxt(mio_filename)
    pyplot.plot(mat0[:, 0],
                mat0[:, 1],
                label="Purkinje",
                linestyle="-",
                linewidth=2,
                color="black")
    #pyplot.plot(mat1[:,0], mat1[:,1], label = "Miocardio", linestyle ="--", linewidth=2, color="black")
    pyplot.plot(mat1[:, 0],
                mat1[:, 1],
                label="Myocardium",
                linestyle="--",
                linewidth=2,
                color="black")
    pyplot.grid()
    pyplot.tick_params(labelsize=12)
    pyplot.xlim([0, 50])
    #pyplot.title("Tempo ativacao - Purkinje x Miocardio",size=15)
    #pyplot.xlabel("Tempo de ativacao (ms)", size=15)
    #pyplot.ylabel("Potencial transmembranico (mV)", size=15)
    pyplot.title("Activation Time - Purkinje x Myocardium (LiRudy)", size=15)
    pyplot.xlabel("Activation Time (ms)", size=15)
    pyplot.ylabel("Transmembrane Potential (mV)", size=15)
    pyplot.legend(loc=0, prop={'size': 16})
    pyplot.savefig("delay-LiRudy-block-EN.pdf")
Ejemplo n.º 2
0
def plotDelay():
    pk_filename = "data232-LiRudy-2.dat"
    mio_filename = "data242-LiRudy-2.dat"
    mat0 = genfromtxt(pk_filename)
    mat1 = genfromtxt(mio_filename)
    pyplot.plot(mat0[:, 0],
                mat0[:, 1],
                label="Purkinje",
                linestyle="-",
                linewidth=2,
                color="black")
    pyplot.plot(mat1[:, 0],
                mat1[:, 1],
                label=u"Miocárdio",
                linestyle="--",
                linewidth=2,
                color="black")
    pyplot.grid()
    pyplot.tick_params(labelsize=12)
    pyplot.xlim([0, 50])
    pyplot.title(u"Tempo ativação - Purkinje x Miocárdio (LiRudy)", size=15)
    pyplot.xlabel(u"Tempo de ativação (ms)", size=15)
    pyplot.ylabel(u"Potencial transmembrânico (mV)", size=15)
    pyplot.legend(loc=0, prop={'size': 16})
    pyplot.savefig("delay-LiRudy-block.pdf")
Ejemplo n.º 3
0
def showSAXSProfiles(exp_data, model_data):
    #Read experimental data
    #Read model data
    #Plot data
    from matplotlib import pyplot;
    from pylab import genfromtxt;
    mat0 = genfromtxt(exp_data);
    mat1 = genfromtxt(model_data);
    pyplot.plot(mat0[:,0], mat0[:,1], label = "Experimental");
    pyplot.plot(mat1[:,0], mat1[:,1], label = "Model");
    pyplot.legend();
    pyplot.show();
def LoadEnvCond(arg, dirname, files):
       for file in files:
            Grand_var = p.nan
            Grand_std = p.nan
            filepath = os.path.join(dirname, file)
            if filepath == os.path.join(dirname,'GeneralData.dat'):
                data = p.genfromtxt(filepath)
                if data[-1,4] != 0.0:
                    frame_scaling = 1.0 / (data[1, 0] - data[0, 0])
                    filepath_turb = os.path.join(dirname,'ModelParams.dat')
                    l = re.split(" ", ln.getline(filepath_turb, 6))
                    turb_param = float(l[6])
                    l = re.split(" ", ln.getline(filepath_turb, 29))
                    rand_death_factor = float(l[6])
                    frame_half_size = p.rint((1.0 / (data[:, 11] \
                        / data[:, 4]).mean() ) * 0.5 * frame_scaling)
                    mean_turbul = p.zeros((data.shape[0]-(2.0 \
                        * frame_half_size), 2))
                    mean_turbul[:,0] = data[frame_half_size : \
                        - frame_half_size][:, 0].copy()
                    k = frame_half_size
                    for j in range(mean_turbul.shape[0]):
                        mean_turbul[j, 1] = data[k - frame_half_size : k \
                            + frame_half_size, 1].var()
                        k = k + 1
                    Grand_var = mean_turbul[:, 1].mean()
                    Grand_std = mean_turbul[:, 1].std()
                    Super_total_var = data[:, 1].var()
                    arg.append((Grand_var, Grand_std, turb_param,
                                rand_death_factor, Super_total_var))
                else:
                    break
Ejemplo n.º 5
0
def readSparseSystem(filename):
    """

    Convert's iSAM library's output (when printing sparse 
    matrices) to a scipy sparse matrix

    Returns
    -------
    
    a sparse COO matrix, the Cholesky factor R of the 
    information matrix
    
    """
    f = open(filename, 'r')
    line = f.readline()
    f.close()
    dimStr = re.search('[0-9]+x[0-9]+', line).group(0)
    dim = int(dimStr.split('x')[0])

    data = pl.genfromtxt(filename)
    data = data[1:, :]
    rows = data[:, 0].astype(int)
    cols = data[:, 1].astype(int)
    vals = data[:, 2]

    R = scipy.sparse.coo_matrix((vals, (rows, cols)), shape=(dim, dim))
    return R
Ejemplo n.º 6
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("--dir", action="store", dest="dir", type=str, required=True)
    inargs = parser.parse_args()

    os.chdir(os.path.expanduser("~/Data/EC/4DBlock/Old/" + inargs.dir))

    os.mkdir("Last")

    b_num = get_b_num()

    os.system("cp info.txt Last/")

    all = os.listdir(".")
    for i, j in enumerate(all):
        if "poindat" in j:
            curfile = open(j, "r")
            to_file = open("Last/" + j, "w")
            to_file.write(curfile.readline())
            to_file.write(curfile.readline())
            cur_dat = pl.genfromtxt(curfile, comments="Z")
            for l in range(b_num):
                to_file.write(
                    str(cur_dat[-b_num + l, 0])
                    + " "
                    + str(cur_dat[-b_num + l, 1])
                    + " "
                    + str(cur_dat[-b_num + l, 2])
                    + " "
                    + str(cur_dat[-b_num + l, 3])
                )
                to_file.write("\n")
            to_file.close()
            curfile.close()
Ejemplo n.º 7
0
def LoadTheBigData(arg, dirname, files):
    '''Walks thorough directory tree and looks into GeneralData.dat files
    calculating the average number and STD of born and dead cells per time step,
    but only if population makes it to the end of simulation. Appends all of
    that to a list'''
    for file in files:
        filepath_general = os.path.join(dirname, file)
        if filepath_general == os.path.join(dirname, 'GeneralData.dat'):
            l = re.split(
                " ",
                ln.getline(filepath_general,
                           fl.file_len(filepath_general) - 1))
            is_alive = float(l[4])
            if is_alive > 0.0:
                data = p.genfromtxt(filepath_general)
                filepath_turb = os.path.join(dirname, 'ModelParams.dat')
                l = re.split(" ", ln.getline(filepath_turb, 6))
                turb_param = float(l[6])
                half_run_len = p.floor(data.shape[0] / 2.0)
                data = data[half_run_len:-1, :]
                # goes: turbulence, mean_born, std_born, mean_dead, std_dead
                arg.append((turb_param, data[:, 10].mean(), data[:, 10].std(),
                            data[:, 11].mean(), data[:, 11].std()))
            else:
                break
Ejemplo n.º 8
0
def parseResults(files):
    ''' Reads all of the results files and puts them into a list with the
    results. Returns field, dither, fiber, and redshift.

    '''

    r = []
    for f in files:
        print f
        cluster, field, dither = f.split('_')
        data = pyl.genfromtxt(f, delimiter='\t', names=True, dtype=None)
        try:
            for fiber, z, Q in zip(data['Fiber'], data['Redshift'],
                                   data['Quality']):
                if Q == 0:
                    r.append((field, 'D' + str(dither.rstrip('.results')),
                              fiber, z))
        except TypeError:
            fiber = int(data['Fiber'])
            z = float(data['Redshift'])
            Q = int(data['Quality'])
            if Q == 0:
                r.append(
                    (field, 'D' + str(dither.rstrip('.results')), fiber, z))

    print len(r), 'objects read'
    return r
Ejemplo n.º 9
0
def test_bregmanSplit():
    RF3_read = genfromtxt(os.path.join(data, "RF3_save.txt"))
    dos_300 = hh.load(os.path.join(data, 'graphite-Ei_300-DOS.h5'))
    DOS_300 = dos_300.I
    spacing = dos_300.E[1] - dos_300.E[0]
    energytobin = np.int(220 / spacing)
    DOS_300[energytobin::] = 0
    E_axis_DOS = dos_300.E
    E_axis_res = np.arange(-50, 240, 0.1)
    left = E_axis_res[E_axis_res < E_axis_DOS[0]]
    extended_E_axis_DOS = np.concatenate((left, E_axis_DOS))
    extended_g = np.concatenate((np.zeros(left.shape), dos_300.I))
    dos_intrp1 = interp1d(extended_E_axis_DOS, extended_g, kind='cubic')  # interpolation
    interpolated_dos = dos_intrp1(E_axis_res)
    E, g = E_axis_res, interpolated_dos
    RF_T = np.transpose(RF3_read)
    m = convolve_NS(RF_T, RF3_read)
    delta = 2. / LA.norm(m, ord=1)

    initial_d = np.zeros(g.shape[0])
    initial_b = initial_d
    mu = 100
    lamda = 1
    ninnner = 1
    nouter = 20
    max_cg = 500
    R0 = S(g, RF3_read, initial_d, initial_b, mu, lamda, ninnner, nouter, max_cg)

    plt.figure()
    plt.plot (E_axis_res,R0)
    plt.legend()
    plt.show()
Ejemplo n.º 10
0
def eval_errval():
    timeNow = time.localtime()
    timeStr = str('%02d'%timeNow.tm_mon)+'-'+str('%02d'%timeNow.tm_mday)+','+str('%02d'%timeNow.tm_hour)+':'+str('%02d'%timeNow.tm_min)+':'+str('%02d'%timeNow.tm_sec)
    print timeStr
    
    all_files = np.array(os.listdir("err_log/"))
    all_files.sort()

    for selfile in all_files:
        if selfile[-3:] != 'txt':
            all_files = np.delete(all_files, np.where(all_files==selfile))
    
    sel_newest = pylab.genfromtxt("err_log/"+all_files[-1],delimiter=',')
    #sel_newest = sel_newest[25:,:]
    nums = sel_newest.shape[0]
    errs = sel_newest[:,0]
    vals = sel_newest[:,1]
    accs = sel_newest[:,2]

    print nums
    fig, ax = plt.subplots(1,2)
    fig.set_figheight(6)
    fig.set_figwidth(16)
    fig.suptitle('updated: ' + timeStr,fontsize=10)
    ax[0].plot(range(nums),errs,range(nums),vals)
    ax[1].plot(range(nums),accs)
    plt.savefig('plot.png')
    plt.close(fig)
    time.sleep(60)
Ejemplo n.º 11
0
def readCsv(ifile):
    skeys = [
        'date', 'homeTeam', 'awayTeam', 'game_id', 'player', 'posteam',
        'oldstate', 'newstate'
    ]
    ikeys = ['seas', 'igame_id', 'yds']
    fkeys = []

    dt = []
    lines = [l.strip() for l in open(ifile).readlines()]
    hd = lines[0]
    ks = hd.split(',')
    for k in ks:
        if k in skeys:
            tmp = (k, 'S64')
        elif k in ikeys:
            tmp = (k, 'i4')
        elif k in fkeys:
            tmp = (k, 'f4')
        else:
            tmp = (k, 'f8')
        dt.append(tmp)

    dt = pylab.dtype(dt)
    data = pylab.genfromtxt(ifile, dtype=dt, skip_header=1, delimiter=',')
    return data
Ejemplo n.º 12
0
    def read_data_from_file(self, file_name, type=1):
        '''
        type 1: for data saved from floor 3;
        type 2: for data saved from floor 2;
        type 3: for data save by this software;
        type 4: unknown type, input manually
        '''

        data = pl.genfromtxt(file_name, delimiter=',')
        # print 'the total size of input data are:',len(data),'X',len(data[0])
        if type == 1:
            data = data[20:, :]
        elif type == 2:
            data = data[:, -3:-1]
        elif type == 3:
            data = data[1:, :2]
        else:
            begin_row = input('begin row:')
            begin_column = input('begin column:')
            end_row = raw_input('end row:')
            end_column = raw_input('end column:')
            if end_row == 'end':
                end_row = len(data)
            else:
                end_row = int(end_row)
            if end_column == 'end':
                end_column = len(data[0])
            else:
                end_column = int(end_column)
            data = data[begin_row:end_row, begin_column:end_column]
        return data
Ejemplo n.º 13
0
def main():
    f = open("final_position.txt","r")
    data = pl.genfromtxt(f,comments = "L")
    
    # need to get every other
    x = pl.array([])
    y = pl.array([])
    for i,j in enumerate(data[:-7,2]):
        if i%4 == 0:
            x = pl.append(x,data[i,4])
            y = pl.append(y,j)
    
    print(x)
    print(y)
    fit = np.polyfit(x,y,2)

    print(fit)
    
    #fited = fit[0]+fit[1]*x + fit[2]*x**2
    fited = np.poly1d(fit)
    print(fited)

    #pl.plot(pl.append(x,[.262,.264,.266]),fited(pl.append(x,[.262,.264,.266])),color="black")
    pl.scatter(x,y,color = "black")
    pl.xlabel("$A$",fontsize="30")
    pl.ylabel("$x$",fontsize="30")
    pl.savefig("fin_pts.png",transparent=True,dpi=300)
    
    os.system("open fin_pts.png")
Ejemplo n.º 14
0
def plotAP():
    filename = "noble-2000ms.dat"
    data = genfromtxt(filename)
    pyplot.plot(data[:, 0],
                data[:, 1],
                label="Vm",
                linestyle="-",
                color="k",
                linewidth=2)
    #pyplot.tick_params(
    #			    axis='x',          # changes apply to the x-axis
    #			    which='both',      # both major and minor ticks are affected
    #			    bottom='off',      # ticks along the bottom edge are off
    #			    top='off',         # ticks along the top edge are off
    #			    labelbottom='off') # labels along the bottom edge are off
    pyplot.yticks(size=12)
    pyplot.xticks(size=12)
    pyplot.xlim([0, 2000])
    pyplot.ylim([-90, 50])
    #pyplot.title(u'Potencial de Ação - Li e Rudy (2011)',size=22)
    pyplot.title(u'Action Potential - Noble (1962)', size=22)
    #pyplot.xlabel("Tempo (ms)", size=18)
    pyplot.xlabel("Time (ms)", size=18)
    #pyplot.ylabel(u'Potencial Transmembrânico (mV)', size=18)
    pyplot.ylabel(u'Transmembrane Potential (mV)', size=18)
    pyplot.legend(loc=0, fontsize=15)
    pyplot.savefig("ap-noble-EN.pdf")
Ejemplo n.º 15
0
 def draw_plot(self):
     global LAST_CALL, LAST_MODIFIED_DATE, IMAGE_BUFFER
     data_filename = "stats.txt"
     try:
         mtime = os.path.getmtime(data_filename)
     except OSError:
         mtime = 0
     modified_date = datetime.fromtimestamp(mtime)
     if LAST_CALL == self.path and modified_date == LAST_MODIFIED_DATE:
         IMAGE_BUFFER.seek(0)
         return IMAGE_BUFFER
     LAST_CALL = self.path
     LAST_MODIFIED_DATE = modified_date
     data = pylab.genfromtxt(data_filename , delimiter=',', dtype=int)
     y_data = data[:, 0]
     x_data = data[:, 1]
     if self.op == 'game':
         y_data = y_data[-self.game_count:]
         x_data = x_data[-self.game_count:]
     pylab.plot(x_data, y_data, '-')
     # pylab.show()
     IMAGE_BUFFER = io.BytesIO()
     pylab.savefig(IMAGE_BUFFER, format='png')
     IMAGE_BUFFER.seek(0)
     # pylab.legend()
     # pylab.title("Title of Plot")
     # pylab.xlabel("X Axis Label")
     # pylab.ylabel("Y Axis Label")
     pylab.close()
     return IMAGE_BUFFER
def LoadEnvelopeMeanSize(arg, dirname, files):
    for file in files:
        Grand_mean = p.nan
        Grand_STD = p.nan
        filepath = os.path.join(dirname, file)
        if filepath == os.path.join(dirname, 'GeneralData.dat'):
            data = p.genfromtxt(filepath)
            if data[-1, 4] != 0.0:
                data_chopped = data[1000:-1, :]
                Grand_mean = data_chopped[:, 12].mean()
                Grand_STD = p.sqrt(
                    (sum(data_chopped[:, 4] * data_chopped[:, 13]**2) + sum(
                        (data_chopped[:, 12] - Grand_mean)**2)) /
                    sum(data_chopped[:, 4]))
                filepath_turb = os.path.join(dirname, 'ModelParams.dat')
                l = re.split(" ", ln.getline(filepath_turb, 6))
                turb_param = float(l[6])
                # change the line number in ln.getline below to extract the
                # parameter you're interested in
                l = re.split(" ", ln.getline(filepath_turb, important_line))
                interesting_param = float(l[6])
                arg.append(
                    (Grand_mean, Grand_STD, turb_param, interesting_param))
            else:
                break
Ejemplo n.º 17
0
def LoadMutationData(arg, dirname, files):
    '''Walks thorough directory tree and looks into MutationRecords.dat files
    calculating the average number and STD of mutations per clone, but only if
    population makes it to the end of simulation. Append all thet that to a
    list'''
    for file in files:
        filepath_general = os.path.join(dirname, file)
        if filepath_general == os.path.join(dirname,'GeneralData.dat'):
            l = re.split(" ", ln.getline(filepath_general,
                                         fl.file_len(filepath_general)-1))
            is_alive = float(l[4])
            if  is_alive > 0.0:
                filepath_turb = os.path.join(dirname,'ModelParams.dat')
                l = re.split(" ", ln.getline(filepath_turb, 6))
                turb_param = float(l[6])
                # change the line number in ln.getline below to extract the
                # parameter you're interested in
                l = re.split(" ", ln.getline(filepath_turb, 12))
                interesting_param = float(l[6])
                filepath_mut = os.path.join(dirname,'MutationRecords.dat')
                Mut_raw_data = p.genfromtxt(filepath_mut)
                l_2 = re.split(" ", ln.getline(filepath_mut,
                                               fl.file_len(filepath_mut)-1))
                time_stamp = float(l_2[1])
                arg.append((turb_param, MutationPerClone(Mut_raw_data),
                            interesting_param, time_stamp))
            else:
                break
Ejemplo n.º 18
0
def readSparseSystem (filename):
    """

    Convert's iSAM library's output (when printing sparse 
    matrices) to a scipy sparse matrix

    Returns
    -------
    
    a sparse COO matrix, the Cholesky factor R of the 
    information matrix
    
    """
    f = open (filename, 'r')
    line = f.readline ()
    f.close ()
    dimStr = re.search ('[0-9]+x[0-9]+', line).group (0)
    dim = int (dimStr.split ('x')[0])

    data = pl.genfromtxt (filename)
    data = data[1:,:]
    rows = data[:,0].astype (int)
    cols = data[:,1].astype (int)
    vals = data[:,2]

    R = scipy.sparse.coo_matrix ((vals, (rows, cols)), shape=(dim,dim))
    return R
Ejemplo n.º 19
0
def loadTheData3(DIRR=os.getcwd()):
    """Iterates trough directories and look for HostsGeneDivers.csv file and
    corresponding InputParameters.json then copies the necessary data to
    a Python list to be analysed and plotted later on in the program. Version
    for Python 3.5 utilazing os.walk() function"""
    TheData = []
    for dirName, subdirList, fileList in os.walk(DIRR):
        for file in fileList:
            filepath = os.path.join(dirName, file)
            if filepath == os.path.join(dirName, 'HostsGeneDivers.csv'):
                genes = p.genfromtxt(filepath)
                paramsFile = os.path.join(dirName, 'InputParameters.json')
                with open(paramsFile) as f:
                    prms = json.load(f)
                # change here
                interestOne = float(prms['mutation_probability_in_pathogen'])
                # change here
                interestTwo = float(prms['number_of_pathogen_species'])
                path_spp = prms['number_of_pathogen_species']
                pop_size = float(prms['host_population_size'])
                if path_spp == "NOT_IN_THIS_MODEL":
                    path_spp = 1
                print("patho species:", path_spp, "| things:", interestOne,
                      " ; ", interestTwo, "| dir:",
                      dirName.split("/")[-1])
                TheData.append((interestOne, interestTwo, int(path_spp),
                                genes[:, 0], genes[:, 3], genes[:, 4],
                                genes[:, 5], genes[:, 2], genes[:,
                                                                6], pop_size))
    return TheData
Ejemplo n.º 20
0
def main():

    is_transparent = False 
    
    f = open("pi_data.txt","r")
    
    # this is a little different than normal becase of the complex data for the floquet stability
    # multipliers. When we use the "dtype" option we get a single array of tuples so slicing is a
    # little more awkward has to look like data[#][#] to get a single value NOT data[#,#].
    data = pl.genfromtxt(f,comments="e",dtype="complex,complex,float")
   
    eigs1 = pl.array([])
    eigs2 = pl.array([])
    A = pl.array([])
    
    for i,j in enumerate(data):
        eigs1 = pl.append(eigs1,j[0])
        eigs2 = pl.append(eigs2,j[1])
        A = pl.append(A,j[2])

    fig1, ax1 = pl.subplots(2,2,sharex=True)
    ax1[0,0].plot(A,[k.real for k in eigs1],color = "Black")
    ax1[1,0].plot(A,[k.imag for k in eigs1],color = "Black")
    ax1[0,1].plot(A,[k.real for k in eigs2],color = "Black")
    ax1[1,1].plot(A,[k.imag for k in eigs2],color = "Black")

    ax1[0,0].set_ylabel("Re[$\lambda_1$]",fontsize=25)
    ax1[1,0].set_ylabel("Im[$\lambda_1$]",fontsize=25)
    ax1[0,1].set_ylabel("Re[$\lambda_2$]",fontsize=25)
    ax1[1,1].set_ylabel("Im[$\lambda_2$]",fontsize=25)
    ax1[1,0].set_xlabel("$A$",fontsize=25)
    ax1[1,1].set_xlabel("$A$",fontsize=25)
    fig1.tight_layout()
    fig1.savefig("paper_A_vs_eigs.png",dpi=300,transparent=is_transparent)
    os.system("open paper_A_vs_eigs.png")
Ejemplo n.º 21
0
def LoadEnvData(arg, dirname, files):
    for file in files:
        filepath = os.path.join(dirname, file)
        if filepath == os.path.join(dirname, 'VarianceOfEnvEtAl.dat'):
            data = p.genfromtxt(filepath)
            # goes like this: mean envelope surface, turbulance level T,
            # STD of envelope surface
            arg.append((data[:, 2], data[:, 3], data[:, 0]))
Ejemplo n.º 22
0
def LoadShannonsData(arg, dirname, files):
    for file in files:
        filepath = os.path.join(dirname, file)
        if filepath == os.path.join(dirname, 'Shannons.dat'):
            shannons = p.genfromtxt(filepath)
            # goes like this: mean Shannon index, turbulance level T,
            # STD of Shannon index
            arg.append((shannons[:, 0], shannons[:, 2], shannons[:, 1]))
def LoadGeneNumbers(arg, dirname, files):
    for file in files:
        filepath = os.path.join(dirname, file)
        if filepath == os.path.join(dirname, 'GenomeSizes.dat'):
            genes = p.genfromtxt(filepath)
            # goes like this: mean number of genes, turbulance level T,
            # STD of number of genes
            arg.append((genes[:, 0], genes[:, 2], genes[:, 1]))
def LoadEnvelData(arg, dirname, files):
    for file in files:
        filepath = os.path.join(dirname, file)
        if filepath == os.path.join(dirname, 'Evelopes.dat'):
            envelopes = p.genfromtxt(filepath)
            # goes like this: mean envelope surface, turbulance level T,
            # STD of envelope surface
            arg.append((envelopes[:, 0], envelopes[:, 2], envelopes[:, 1]))
Ejemplo n.º 25
0
def main():
    #visualize = False
    visualize = True
    minimumIntensity = 100.0

    #f = open('testing_eic.txt','r')
    f = open('testing_eic2.txt', 'r')
    d = pl.genfromtxt(f)

    intensities = d[0, :]
    rt = d[1, :]

    if visualize:
        pl.plot(rt, intensities)
        pl.show()

    cwtObject = cwt.ContinuousWaveletTransform(1, 10, 1)
    cwtObject.setVisualize(visualize)
    cwtObject.setSignal(intensities)
    cwtObject.setX(rt)
    cwtObject.buildRidgelines()
    cwtObject.filterRidgelines()

    newPeaks = cwtObject.findBoundries()

    for i in range(len(newPeaks[0, :])):

        leftBound = int(newPeaks[0, i])
        rightBound = int(newPeaks[1, i])

        if visualize:
            pl.plot(rt, intensities)
            pl.fill_between(rt[leftBound:rightBound + 1],
                            intensities[leftBound:rightBound + 1],
                            facecolor='r')
            pl.title('peak before boundry correction')
            pl.show()

        leftBound = correctbounds.fixLeftBoundry(intensities, leftBound)
        rightBound = correctbounds.fixRightBoundry(intensities, rightBound)

        leftBound, rightBound = correctbounds.cropZerosFromEdges(
            intensities, leftBound, rightBound)

        if visualize:
            pl.plot(rt, intensities)
            pl.fill_between(rt[leftBound:rightBound + 1],
                            intensities[leftBound:rightBound + 1],
                            facecolor='r')
            pl.title('peak after boundry correction')
            pl.show()

        print "left bound = " + str(leftBound)
        print "right bound = " + str(rightBound)

        if max(intensities[leftBound:rightBound]) < minimumIntensity:
            continue
Ejemplo n.º 26
0
def sheat_vs_tempature(ancl):

    if 'ssheat_data.txt' not in os.listdir('.'):
        print('Need specific heat data')
        os.system('say Need specific heat data')
    if 'temp_granular_sliced.txt' not in os.listdir('.'):
        print('Need granular tempature data')
        os.system('say Need granular tempature data')

    tempature_file = open('temp_granular_sliced.txt','r')
    sheat_file = open('ssheat_data.txt','r')

    # first line is labels
    tempature_labels = tempature_file.readline()
    tempature_plotting_data = pl.genfromtxt(tempature_file)
    tempature_arr = tempature_plotting_data[:,1]

    # first line is labels
    sheat_labels = sheat_file.readline()
    sheat_plotting_data = pl.genfromtxt(sheat_file)
    #first column sweep variables
    var_arr = sheat_plotting_data[:,0]
    # evergy_stuff is next coulumn
    delta_E_sqrd = sheat_plotting_data[:,1]
    s_heat_arr = sheat_plotting_data[:,2]

    fig = pl.figure()
    ax = fig.add_subplot(111)
    # form of errorbar(x,y,xerr=xerr_arr,yerr=yerr_arr)
    pl.scatter(tempature_arr,s_heat_arr,c='k')
    #pl.errorbar(var_arr,averages_2,yerr=std_arr,c='b',ls='none',fmt='o')
    ax.set_xlabel(r'T_g',fontsize=30)
    ax.set_ylabel('Specific heat per particle',fontsize=20)
    fig.tight_layout()
    fig.savefig('T_vs_s_heat.png',dpi=300)
    pl.close(fig)


    print('\a')
    os.system('say finnished plotting tempature against specific heat')
Ejemplo n.º 27
0
def spatio_temporal(ancl):

    os.mkdir('SpatioTemporalVels')

    print('RIGHT NOW THIS IS ONLY FOR VX!!!!!!!')

    p_arr = pl.arange(0,ancl.N)
    

    # How many cycles do we want to look at?
    how_many = 10

    var_arr = pl.array([])
    for i,j in enumerate(os.listdir('.')):
        if 'poindat.txt' not in j:
            continue
        print('working on file ' + j)
        poin_num = int(j[:j.find('p')])
        cur_file = open(j,'r')
        cur_sweep_var = float(cur_file.readline().split()[-1])
        cur_data=pl.genfromtxt(cur_file)
        cur_file.close()

        var_arr = pl.append(var_arr,cur_sweep_var)
        
        count = 0
        grid = cur_data[-int(how_many*2.0*pl.pi/ancl.dt):,:ancl.N]

        # in 1D because particles never cross eachother we can order them in the images to mathch
        # their physical order.
        grid_ordered = pl.zeros(pl.shape(grid))
        # can just use the initial conditions to figure out where each is
        init_x = cur_data[0,ancl.N:2*ancl.N]
        sorted_x = sorted(init_x)
        for a,alpha in enumerate(sorted_x):
            for b,beta in enumerate(init_x):
                if alpha == beta:
                    grid_ordered[:,a]=grid[:,b]
        
    
        print('shape of grid_ordered: ' + str(pl.shape(grid_ordered)))
        
        fig = pl.figure()
        ax = fig.add_subplot(111)
        # form of errorbar(x,y,xerr=xerr_arr,yerr=yerr_arr)
        ax.imshow(grid_ordered,interpolation="nearest", aspect='auto')
        ax.set_xlabel('Particle',fontsize=30)
        #ax.set_aspect('equal')
        ax.set_ylabel(r'$ t $',fontsize=30)
        fig.tight_layout()
        fig.savefig('SpatioTemporalVels/%(number)04d.png'%{'number':poin_num})
        pl.close(fig)
Ejemplo n.º 28
0
def plot_guard_band(options):
    datas = [
        genfromtxt("0_0/plottable.csv"),
        genfromtxt("0_05/plottable.csv"),
        genfromtxt("0_1/plottable.csv"),
        genfromtxt("0_5/plottable.csv"),
    ]

    datas[0] = np.array([x for x in datas[0] if x[0] < 949.27e6])
    datas[1] = np.array([x for x in datas[1] if x[0] < 949.27e6])
    datas[2] = np.array([x for x in datas[2] if x[0] < 949.27e6])
    datas[3] = np.array([x for x in datas[3] if x[0] < 949.27e6])
    #datas[0] = np.array([x for x in datas[0] if  x[0] <= 949e6])
    #datas[1] = np.array([x for x in datas[1] if  x[0] <= 949e6])
    #datas[2] = np.array([x for x in datas[2] if  x[0] <= 949e6])
    #datas[3] = np.array([x for x in datas[3] if  x[0] <= 949e6])

    for d, cor, mar in zip(datas, ['r', 'b', 'g', 'y', 'k'],
                           ['o', '^', 's', '*', 'v']):
        plt.errorbar(d[:, 0] - 948.25e6,
                     d[:, 1],
                     yerr=d[:, 2],
                     linestyle='-',
                     marker=mar,
                     color=cor)
        #plt.errorbar(d[:,0] - 948e6, d[:,1], yerr=d[:,2], linestyle='-', marker = mar, color=cor)

    plt.ylabel("SINR")
    plt.ylim([0, 10])
    plt.xlabel("Guard Band [KHz]")
    plt.xticks((200e3, 400e3, 600e3, 800e3, 1e6),
               ('200 KHz', '400 KHz', '600KHz', '800 KHz', '1 MHz'))
    plt.xticks()

    plt.legend(("LTE Only", "Normalized", "2x Amplitude", "10x Amplitude"),
               loc='lower right')

    plt.savefig("guard_band_snr.pdf")
Ejemplo n.º 29
0
def write_smurf():

    a = P.genfromtxt('smurf_words.txt', dtype=None, delimiter=',')
    xs = a[:, 0]
    ys = a[:, 1]
    n = 45
    N = 100
    for k in range(N):
        for i in range(45):
            pv1 = base_name + "[0]"
            pv2 = base_name + "[1]"
            epics.caput(pv1, 1000. * xs[i])
            epics.caput(pv2, 1000. * ys[i])
            time.sleep(0.1)
Ejemplo n.º 30
0
    def __init__(self, xml_tree):
        self.root = xml_tree.getroot()
        self.misc = self.root.find('Misc')

        self.duration = self.misc.find('Duration')
        self.final_t = float(self.duration.attrib['Time'])

        self.pressure = self.misc.find('Pressure')
        self.pressure_avg = float(self.pressure.attrib['Avg'])
        self.pressure_tensor = self.pressure.find('Tensor')
        stream = io.BytesIO()
        stream.write(self.pressure_tensor.text)
        stream.seek(0)
        self.pressure_tensor = pylab.genfromtxt(stream)
    def setUp(self):
        f = open('testing_eic.txt', 'r')
        d = pl.genfromtxt(f)
        intensities = d[0, :]
        rt = d[1, :]

        self.peakdetector = pd.PeakDetector()

        # intensities and rt (inputs) must be numpy arrays
        self.peakdetector.setSignal(intensities)
        self.peakdetector.setX(rt)
        self.peakdetector.setWaveletPeakHighestSimilarity("off")
        self.peakdetector.setSignalToNoiseThreshold(7)
        self.allLeftBounds, self.allRightBounds, self.allPeakPositions = self.peakdetector.findPeaks(
        )
def plotVelocity():
    # Plot graphics for the propagation velocity
    print("[!] Saving velocity figure")
    input_filename = "v.txt"
    output_filename = input_filename.split(".")[0] + str(".pdf")
    pp = PdfPages(output_filename)
    mat0 = genfromtxt(input_filename)
    pyplot.plot(mat0[:, 0], mat0[:, 1], label="v")
    pyplot.grid()
    pyplot.title("Propagation velocity x Level")
    pyplot.xlabel("Level of the tree")
    pyplot.ylabel("Velocity (cm/s)")
    pyplot.ylim([0, 300])
    pyplot.legend()
    #pyplot.savefig("velocity_level.png")
    pp.savefig()
    pp.close()
    pyplot.clf()
Ejemplo n.º 33
0
def parseEhtSchedule(file='eht_schedule.txt'):
    """
    load the eht observing schedule text file provided by EHT group
    Format is  date/time, src, AZ, EL
    Comma delimited
    Returns a structured array
    """
    print("Reading %s" % file)
    conv = {0: lambda s: datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')}
    eht = genfromtxt(file,
                     comments='#',
                     delimiter=',',
                     names=['time', 'src', 'EL', 'AZ'],
                     dtype=None,
                     converters=conv,
                     invalid_raise=False)

    return eht
Ejemplo n.º 34
0
def eval_errval():
    timeNow = time.localtime()
    timeStr = str('%02d' % timeNow.tm_mon) + '-' + str(
        '%02d' % timeNow.tm_mday) + ',' + str(
            '%02d' % timeNow.tm_hour) + ':' + str(
                '%02d' % timeNow.tm_min) + ':' + str('%02d' % timeNow.tm_sec)
    print timeStr

    all_files = np.array(os.listdir("err_logs/"))
    all_files.sort()

    for selfile in all_files:
        if selfile[-3:] != 'txt':
            all_files = np.delete(all_files, np.where(all_files == selfile))

    sel_newest = pylab.genfromtxt("err_logs/" + all_files[-1], delimiter=',')
    if sel_newest.ndim < 2:
        print 'wait...'
        return


#     if sel_newest.shape[0] > 100:
#         sel_newest = sel_newest[100:,:]
#(step, error, accu, valid_all.mean(), v_acc_all.mean())
    nums = sel_newest[:, 0]
    errs = sel_newest[:, 1]
    acus = sel_newest[:, 2]
    vals = sel_newest[:, 3]
    accs = sel_newest[:, 4]

    print int(nums[-1])
    fig, ax = plt.subplots(2, 1)
    fig.set_figheight(8)
    fig.set_figwidth(7)
    ax[0].set_title('updated: ' + timeStr, fontsize=10)
    ax[0].plot(nums, errs, nums, vals)
    ax[1].plot(nums, acus, nums, accs)

    ax[0].grid(linestyle=':')
    ax[1].grid(linestyle=':')
    plt.tight_layout()

    plt.savefig('plot.png')
    plt.close(fig)
Ejemplo n.º 35
0
def LoadGenomeMeanSize(arg, dirname, files):
    for file in files:
        filepath = os.path.join(dirname, file)
        if filepath == os.path.join(dirname, 'GeneralData.dat'):
            data = p.genfromtxt(filepath)
            if data[-1, 4] != 0.0:  # checking if data set is OK
                data_chopped = data[1000:-1, :]  # removing some of data
                Grand_mean = data_chopped[:, 2].mean()
                Grand_STD = p.sqrt(
                    (sum(data_chopped[:, 4] * data_chopped[:, 3]**2) + sum(
                        (data_chopped[:, 2] - Grand_mean)**2)) /
                    sum(data_chopped[:, 4]))

                if filepath == os.path.join(dirname, 'ModelParams.dat'):
                    l = re.split(" ", ln.getline(filepath, 6))
                    turb_param = float(l[2])
                    arg.append((Grand_mean, Grand_STD, turb_param))
            else:
                break
def CountHunry(arg, dirname, files):
    for file in files:
        filepath = os.path.join(dirname, file)
        if filepath == os.path.join(dirname, 'RecourceUptakenCells.dat'):
            uptake_res_data = p.genfromtxt(filepath)
            uptake_res_data = uptake_res_data[-p.floor(uptake_res_data.shape[0]
                                                       ):]
            for k in xrange(uptake_res_data.shape[0]):
                uptake_res_data[k, :] = uptake_res_data[k, :] \
                    / uptake_res_data[k, :].sum()
            means_uptake = uptake_res_data[:, 0].mean()
            STD_uptake = uptake_res_data[:, 0].std()
            filepath_turb = os.path.join(dirname, 'ModelParams.dat')
            l = re.split(" ", ln.getline(filepath_turb, 6))
            turb_param = float(l[6])
            l = re.split(" ", ln.getline(filepath_turb, important_line))
            interesting_param = float(l[6])
            arg.append(
                (means_uptake, STD_uptake, turb_param, interesting_param))
Ejemplo n.º 37
0
def get_fit(which):
    f = open("final_position.txt","r")
    data = pl.genfromtxt(f,comments = "L")

    if which=="x":
        datnum = 2
    if which=="vx":
        datnum = 0

    x = pl.array([])
    y = pl.array([])
    for i,j in enumerate(data[:-7,datnum]):
        if i%2 == 0:
            x = pl.append(x,data[i,4])
            y = pl.append(y,j)

    fit = pl.polyfit(x,y,2)

    fitted = pl.poly1d(fit)
    return fitted 
Ejemplo n.º 38
0
def CountTrueDeathRate(arg, dirname, files):
    for file in files:
        Grand_mean = p.nan
        Grand_STD = p.nan
        filepath = os.path.join(dirname, file)
        if filepath == os.path.join(dirname, 'GeneralData.dat'):
            data = p.genfromtxt(filepath)
            if data[-1, 4] != 0.0:
                half_of_data = p.floor(data.shape[0] / 2)
                data_chopped = data[half_of_data:-1, :]
                Grand_mean = (data_chopped[:, 11] / data_chopped[:, 4]).mean()
                Grand_STD = (data_chopped[:, 11] / data_chopped[:, 4]).std()
                filepath_turb = os.path.join(dirname, 'ModelParams.dat')
                l = re.split(" ", ln.getline(filepath_turb, 6))
                turb_param = float(l[6])
                l = re.split(" ", ln.getline(filepath_turb, important_line))
                interesting_param = float(l[6])
                arg.append(
                    (Grand_mean, Grand_STD, turb_param, interesting_param))
            else:
                break
def LoadShannonIndex(arg, dirname, files):
    for file in files:
        Grand_mean = p.nan
        Grand_STD = p.nan
        filepath = os.path.join(dirname, file)
        if filepath == os.path.join(dirname, 'GeneralData.dat'):
            data = p.genfromtxt(filepath)
            if data[-1, 4] != 0.0:
                half_data = p.ceil(data.shape[0] / 2.0)
                data_chopped = data[half_data:-1, :]
                Grand_mean = data_chopped[:, 9].mean()
                Grand_STD = data_chopped[:, 9].std()
                filepath_turb = os.path.join(dirname, 'ModelParams.dat')
                l = re.split(" ", ln.getline(filepath_turb, 6))
                turb_param = float(l[6])
                l = re.split(" ", ln.getline(filepath_turb, important_line))
                interesting_param = float(l[6])
                arg.append(
                    (Grand_mean, Grand_STD, turb_param, interesting_param))
            else:
                break
Ejemplo n.º 40
0
def LoadGenomeMeanSize(arg, dirname, files):
    for file in files:
        Grand_mean = p.nan
        Grand_STD = p.nan
        filepath = os.path.join(dirname, file)
        if filepath == os.path.join(dirname, 'GeneralData.dat'):
            data = p.genfromtxt(filepath)
            if data[-1, 4] != 0.0:
                data_chopped = data[1000:-1, :]
                Grand_mean = data_chopped[:, 2].mean()
                Grand_STD = p.sqrt((sum(data_chopped[:, 4] \
                * data_chopped[:, 3]**2) + sum((data_chopped[:, 2] \
                - Grand_mean)**2)) / sum(data_chopped[:, 4]))
                filepath_hgt = os.path.join(dirname, 'ModelParams.dat')
                l = re.split(" ", ln.getline(filepath_hgt, interest_line))
                hgt_param = float(l[6])
                l_1 = re.split(" ", ln.getline(filepath_hgt, 6))
                T_value = float(l_1[6])
                arg.append((Grand_mean, Grand_STD, hgt_param, T_value))
            else:
                break
Ejemplo n.º 41
0
def loadPlayByPlay(csvfile, vbose=0):
    skeys = [
        'game_id', 'type', 'playerName', 'posTeam', 'awayTeam', 'homeTeam'
    ]
    ikeys = ['seas', 'igame_id', 'dwn', 'ytg', 'yfog', 'yds']
    fkeys = []

    lines = [l.strip() for l in open(csvfile).readlines()]
    hd = lines[0]
    ks = hd.split(',')
    dt = []
    for k in ks:
        # postgres copy to file makes headers lower-case; this is a kludge
        if k == 'playername':
            k = 'playerName'
        elif k == 'posteam':
            k = 'posTeam'
        elif k == 'away_team':
            k = 'awayTeam'
        elif k == 'awayteam':
            k = 'awayTeam'
        elif k == 'home_team':
            k = 'homeTeam'
        elif k == 'hometeam':
            k = 'homeTeam'

        if k in skeys:
            tmp = (k, 'S16')
        elif k in ikeys:
            tmp = (k, 'i4')
        else:
            tmp = (k, 'f8')

        if vbose >= 1:
            print k, tmp

        dt.append(tmp)
    dt = pylab.dtype(dt)
    data = pylab.genfromtxt(csvfile, dtype=dt, delimiter=',', skip_header=1)
    return data
Ejemplo n.º 42
0
def loadPlayByPlay(csvfile, vbose=0):
    skeys = ['game_id','type','playerName','posTeam','awayTeam','homeTeam']
    ikeys = ['seas','igame_id','dwn','ytg','yfog','yds']
    fkeys = []

    lines = [l.strip() for l in open(csvfile).readlines()]
    hd = lines[0]
    ks = hd.split(',')
    dt = []
    for k in ks:
        # postgres copy to file makes headers lower-case; this is a kludge
        if k=='playername':
            k = 'playerName'
        elif k=='posteam':
            k = 'posTeam'
        elif k=='away_team':
            k = 'awayTeam'
        elif k=='awayteam':
            k = 'awayTeam'
        elif k=='home_team':
            k = 'homeTeam'
        elif k=='hometeam':
            k = 'homeTeam'

        if k in skeys:
            tmp = (k, 'S16')
        elif k in ikeys:
            tmp = (k, 'i4')
        else:
            tmp = (k, 'f8')

        if vbose>=1:
            print k, tmp

        dt.append(tmp)
    dt = pylab.dtype(dt)
    data = pylab.genfromtxt(csvfile, dtype=dt, delimiter=',', skip_header=1)
    return data
Ejemplo n.º 43
0
def readCsv(ifile):
    skeys = ['date', 'homeTeam', 'awayTeam', 'game_id','player','posteam','oldstate','newstate']
    ikeys = ['seas','igame_id','yds']
    fkeys = []

    dt = []
    lines = [l.strip() for l in open(ifile).readlines()]
    hd = lines[0]
    ks = hd.split(',')
    for k in ks:
        if k in skeys:
            tmp = (k, 'S64')
        elif k in ikeys:
            tmp = (k, 'i4')
        elif k in fkeys:
            tmp = (k, 'f4')
        else:
            tmp = (k, 'f8')
        dt.append(tmp)

    dt = pylab.dtype(dt)
    data = pylab.genfromtxt(ifile, dtype=dt, skip_header=1, delimiter=',')
    return data
Ejemplo n.º 44
0
def main(cluster):
    cluster = cluster + '_r_mosaic.fits'
    f = pyl.figure(1, figsize=(8, 8))

    gc = aplpy.FITSFigure(cluster, north=True, figure=f)
    gc.show_grayscale(stretch='arcsinh')
    gc.set_theme('publication')

    gc.set_tick_labels_format(xformat='hh:mm:ss', yformat='dd:mm:ss')
    #gc.set_tick_labels_size('small')

    data = pyl.genfromtxt('./../analysis_all/redshifts/' +\
            cluster.split('_')[0]+'_redshifts.csv', delimiter=',', names=True,
            dtype=None)

    try:
        # filter out the specz's
        x = pyl.isnan(data['Specz'])
        # draw the specz's
        gc.show_markers(data['ra'][~x], data['dec'][~x], edgecolor='#ffbf00',
                        facecolor='none', marker='D', s=200)

    except ValueError:
        print 'no Speczs found'

    # draw observed but not redshifted
    x = data['Q'] == 2
    gc.show_markers(data['ra'][x], data['dec'][x], edgecolor='#a60628',
                    facecolor='none', marker='s', s=150)

    # draw redshifted
    x = (data['Q'] == 0) | (data['Q'] == 1)
    gc.show_markers(data['ra'][x], data['dec'][x], edgecolor='#188487',
                    facecolor='none', marker='o', s=150)

    pyl.tight_layout()
    pyl.show()
Ejemplo n.º 45
0
def crunchZfile(f,aCol,sCol,bCol,normFactor):
    '''
    Takes a zAveraged... data file generated from the crunchData
    function of this library and produces the arithmetic mean
    as well as the standard error from all seeds.  The error
    is done through the propagation of errors as:
    e = sqrt{ \sum_k (c_k e_k)^2 } where e_k are the individual
    seed's standard errors and c_k are the weighting coefficients
    obeying \sum_k c_k = 1.
    '''
    avgs,stds,bins = pl.genfromtxt(f, usecols=(aCol, sCol, bCol),
            unpack=True, delimiter=',')

    # get rid of any items which are not numbers..
    # this is some beautiful Python juju.
    bins = bins[pl.logical_not(pl.isnan(bins))]
    stds = stds[pl.logical_not(pl.isnan(stds))]
    avgs = avgs[pl.logical_not(pl.isnan(avgs))]

    # normalize data.
    stds *= normFactor
    avgs *= normFactor

    weights = bins/pl.sum(bins)

    avgs *= weights
    stds *= weights  # over-estimates error bars

    stds *= stds

    avg = pl.sum(avgs)
    stdErr = pl.sum(stds)

    stdErr = stdErr**0.5

    return avg, stdErr
Ejemplo n.º 46
0
import pylab as pl
import scipy as sp
import testvis as tv

i_i=0
i_theta=1
i_pb=2
i_snr=3
i_fwhmsnr=4
i_fwhm2snr=5
i_anglesnr=6
i_poserr=7
i_pos2err=8
i_zeros=9

c1=pl.genfromtxt('c40-1n.cfgnew-constFlux_2.parErrs.txt')
c2=pl.genfromtxt('c40-2n.cfgnew-constFlux_2.parErrs.txt')
c3=pl.genfromtxt('c40-3n.cfgnew-constFlux_2.parErrs.txt')
c4=pl.genfromtxt('c40-4n.cfgnew-constFlux_2.parErrs.txt')
c5=pl.genfromtxt('c40-5n.cfgnew-constFlux_2.parErrs.txt')
c6=pl.genfromtxt('c40-6n.cfgnew-constFlux_2.parErrs.txt')
c7=pl.genfromtxt('c40-7n2.cfgnew-constFlux_2.parErrs.txt')
c8=pl.genfromtxt('c40-8n2.cfgnew-constFlux_2.parErrs.txt')
c9=pl.genfromtxt('c40-9n2.cfgnew-constFlux_2.parErrs.txt')

lam=3e8/(100e9)
b1=tv.getbaselines('c40-1n.cfg',lam=lam)
b2=tv.getbaselines('c40-2n.cfg',lam=lam)
b3=tv.getbaselines('c40-3n.cfg',lam=lam)
b4=tv.getbaselines('c40-4n.cfg',lam=lam)
b5=tv.getbaselines('c40-5n.cfg',lam=lam)
Ejemplo n.º 47
0
source.add( composition )

# run simulation
sim.setShowProgress(True)
sim.run(source, 20000, True)


# ### (Optional) Plotting

# In[2]:

get_ipython().magic(u'matplotlib inline')
import pylab as pl
# load events
output.close()
d = pl.genfromtxt('events.txt', names=True)

# observed quantities
Z = pl.array([chargeNumber(id) for id in d['ID'].astype(int)])  # element
A = pl.array([massNumber(id) for id in d['ID'].astype(int)])  # atomic mass number
lE = pl.log10(d['E']) + 18  # energy in log10(E/eV))

lEbins = pl.arange(18, 20.51, 0.1)  # logarithmic bins
lEcens = (lEbins[1:] + lEbins[:-1]) / 2  # logarithmic bin centers
dE = 10**lEbins[1:] - 10**lEbins[:-1]  # bin widths

# identify mass groups
idx1 = A == 1
idx2 = (A > 1) * (A <= 7)
idx3 = (A > 7) * (A <= 28)
idx4 = (A > 28)
Ejemplo n.º 48
0
def main():
    parser = argparse.ArgumentParser()
    # f is for file
    parser.add_argument('-f',action='store',dest = 'f',type = str, required = False)
    # plot type
    parser.add_argument('-t',action='store',dest = 't',type = str,required = True)
    # plot sub type
    parser.add_argument('--st',action='store',dest = 'st',type = str,required = False)
    # plot every image or everyother or ...
    # to plot evey image skip = 1. Skip cannot be less than 1 or else you get devide by zero
    # error in the moddulus.
    parser.add_argument('-s',action='store',dest = 's',type = int,required = False,default = 2)

    inargs = parser.parse_args()
    f = inargs.f
    plot_type = inargs.t
    print('plot_type is: ' + str(plot_type))
    plot_sub_type = inargs.st
    skip = inargs.s

    # ancl --> anal class
    ancl = of.anal_run()
    ancl.get_info()
    ancl.set_list_dir()

    # how many cycles do we want to get trow away as transients
    # i.e. what do we think transients are
    how_many_get_rid = 50 

    if plot_type == 'one':
        working_file = open(f,"r")

        # get the varible for the plot
        var = float(working_file.readline().split()[-1])
        sol = pl.genfromtxt(working_file)
        working_file.close()

        # puts the file number ahead of the RunImages folder to prevent overwriting
        f_num_str = f[:f.find('p')]

        print('making directory: ' + f_num_str+'RunImages/PhaseSpace')
        os.mkdir(f_num_str+'RunImages')
        os.mkdir(f_num_str+'RunImages/PhaseSpace')

        for i in range(len(sol)):
            print('in make_one loop')
            if i%skip!=0:
                continue
            print(i)
            print(sol[i,:ancl.N])

            r_fig = pl.figure()
            r_ax = r_fig.add_subplot(111)
            r_ax.set_xlim([-pl.pi/ancl.N,2.0*pl.pi-pl.pi/ancl.N])
            r_ax.set_ylim([-1,1])
            r_ax.set_xlabel(r'$\phi$',fontsize=30)
            r_ax.set_ylabel(r'$\dot{\phi}$',fontsize=30)
            # Need to plot phi. not theta
            for j in range(ancl.N):
                # these two lines are just to make the phi more readable
                theta_j = sol[i,ancl.N+j]
                theta_dot_j = sol[i,j]

                
                phi_j = (pl.pi/ancl.N)*(j*2+pl.sin(theta_j)) 
                phi_dot_j = (pl.pi/ancl.N)*pl.cos(theta_j)*theta_dot_j

                r_ax.scatter(phi_j,phi_dot_j)

            r_fig.tight_layout()
            r_fig.savefig(f_num_str+'RunImages/PhaseSpace/%(number)04d.png'%{'number':i})
            pl.close(r_fig)
        
    # plot the velocity distribution for each file
    if plot_type == 'veldist':
        to_save_dir = 'VelDistMovie_neg100-neg50'
        os.mkdir(to_save_dir)

        cycle_int = int(2.0*pl.pi/ancl.dt)
        for i,j in enumerate(ancl.list_dir):
            working_file = open(j,'r')
            cur_sweep_var = float(working_file.readline().split()[-1])
            cur_data = pl.genfromtxt(working_file)
            working_file.close()
            
            # get rid of transient cycles
            cur_data = cur_data[-100*cycle_int:-50*cycle_int,:]
            # get one array of all the particles velocity maginudes
            theta_dot_arr = pl.array([])
            theta_dot_arr = pl.append(theta_dot_arr,cur_data[:,:ancl.N])
    
            fig = pl.figure()
            ax = fig.add_subplot(111)
            ax.set_xlabel(r'$\dot{\theta}$',fontsize=30)
            ax.set_ylabel('Count',fontsize = 25)
            ax.hist(theta_dot_arr,bins = 20)
            fig.tight_layout()
            fig.savefig(to_save_dir+'/%(number)04d.png'%{'number':i})
            pl.close(fig)
    # x magnitism
    # This may not be the right order parameter but I'm curious to see what this looks like. The
    # details of why this is an order parameter in the HMF method are in "Nonequilibrium statistical
    # mechanics of systems with long-range interactions" Physics reports, 2014.
    if plot_type == 'mx':
        #os.mkdir('MxMovie')
        fig = pl.figure()
        ax = fig.add_subplot(111)
        ax.set_xlabel('Cycle',fontsize=30)
        ax.set_ylabel(r'$M_x$',fontsize = 25)
        for i,j in enumerate(ancl.list_dir):
            working_file = open(j,'r')
            cur_sweep_var = float(working_file.readline().split()[-1])
            cur_data = pl.genfromtxt(working_file)
            working_file.close()
            
            # get rid of transient cycles
            cur_data = cur_data[how_many_get_rid:,:]
            
            # want to plot the magnitism as a function of time for like the last 10 cycles
            # make an array of the average values at a given time for differnt values of time
            theta_arr = pl.array([])
            for a in range(int((10*2*pl.pi)/ancl.dt)):
                theta_arr = pl.append(theta_arr,pl.cos(cur_data[-(a+1),ancl.N:]).sum()/ancl.N)
    
            ax.plot(pl.linspace(-10,0,int((10*2*pl.pi)/ancl.dt)),theta_arr)
        fig.tight_layout()
        #fig.savefig('MxMovie/%(number)04d.png'%{'number':i})
        fig.savefig('mx.png')
        pl.close(fig)
Ejemplo n.º 49
0
def make_int_c_sqrd_plot(v):

    qq,dt,beta,A,cycles,N,x_num_cell,y_num_cell,order,sweep_str,Dim  = of.get_system_info()
    
    y_lbl = r'$\int C^2(\tau)$'
    x_lbl = sweep_str

    sweep_var_arr = pl.array([])
    int_c_sqrd_arr = pl.array([])
    # loop over all files
    for i,j in enumerate(os.listdir('.')):
        if 'poindat.txt' not in j:
            continue
        
        work_file = open(j,'r')
        sweep_var_arr = pl.append(sweep_var_arr,float(work_file.readline().split()[-1]))
        data = pl.genfromtxt(work_file)
        work_file.close()

        average_out = pl.array([])

        # average over every particle in the simulation
        for a in range(N):
            
            if v == 'x':
                input_arr = data[:,Dim*N+a]
            if v == 'vx':
                input_arr = data[:,a]
            # Will only get into these if asking for 2D stuff anyway
            if v == 'y':
                if Dim==1:
                    print('No y in 1D')
                    quit()
                input_arr = data[:,(Dim+1)*N+a]
            if v == 'vy':
                if Dim==1:
                    print('No y in 1D')
                    quit()
                input_arr = data[:,(Dim-1)*N+a]

            print('shape of input_arr: ' + str(pl.shape(input_arr)))

            # lets try this for t from 0 to 100 cycles
            #t_arr = pl.arange(0,int(100.0*2.0*pl.pi),int(2.0*pl.pi))
            tau_arr = pl.arange(1,int(cycles*2.0*pl.pi),1)
            output_arr = pl.array([])
            for i,j in enumerate(tau_arr):
                cur = acf(j,input_arr)
                #print('current run of acf is (should be one number): ' +str(cur))
                
                output_arr = pl.append(output_arr,cur)

            # for average acf plot
            if a == 0:
                average_out = pl.append(average_out,output_arr)
            else:
                average_out += output_arr

        average_out = average_out/a
        print('shape of average_out (should be 1D array): ' + str(pl.shape(average_out)))

        # Romberg Integration integrate averages**2 to get int c^2 number
        #average_out = average_out[:257]
        #print('shape of :2**13+1 averag out: ' +str(len(average_out)))
        #int_c_sqrd = scint.romb(average_out**2,show=True)

        # simpson integration
        int_c_sqrd = scint.simps(average_out, x=None, dx=1, axis=-1, even='avg')
        int_c_sqrd_arr = pl.append(int_c_sqrd_arr, int_c_sqrd)
        print('int_c_sqrd_arr (should be number): ' + str(int_c_sqrd_arr))

         
            
    fig = pl.figure()
    ax = fig.add_subplot(111)
    #ax.set_xlim(x_rng)
    #ax.set_ylim(y_rng)
    ax.set_xlabel(x_lbl,fontsize=30)
    ax.set_ylabel(y_lbl,fontsize=30)
    ax.scatter(sweep_var_arr,int_c_sqrd_arr,c='k')
    fig.tight_layout()
    fig.savefig('int_c_sqrd_vs_sweep.png')
    pl.close(fig)

    print('\a')
    print('\a')
    os.system('open int_c_sqrd_vs_sweep.png')
Ejemplo n.º 50
0
#http://matplotlib.org/users/pyplot_tutorial.html
from matplotlib import pyplot;
from pylab import genfromtxt;  
mat0 = genfromtxt("data0.txt");
mat1 = genfromtxt("data1.txt");
pyplot.plot(mat0[:,0], mat0[:,1], label = "data0");
pyplot.plot(mat1[:,0], mat1[:,1], label = "data1");
pyplot.legend();
pyplot.show();
Ejemplo n.º 51
0
    gc.show_grayscale(stretch='arcsinh', pmin=1, pmax=99.9)
    gc.set_tick_labels_format(xformat='hh:mm:ss', yformat='dd:mm:ss')
    gc.set_theme('publication')
    gc.set_tick_labels_size('small')

    # now for the axis lables
    if not i % 3 == 0:
        gc.axis_labels.hide_y()
    if i < 6:
        gc.axis_labels.hide_x()

    ax = fig.axes[-1]
    ax.set_title(cluster)

    data = pyl.genfromtxt('./../analysis_all/redshifts/' +\
            cluster.split('_')[0]+'_redshifts.csv', delimiter=',', names=True,
            dtype=None)

    try:
        # filter out the specz's
        x = pyl.isnan(data['Specz'])
        # draw the specz's
        gc.show_markers(data['ra'][~x], data['dec'][~x], edgecolor='#ffbf00',
                        facecolor='none', marker='D', s=50)

    except ValueError:
        print 'no Speczs found'

    # draw observed but not redshifted
    x = data['Q'] == 2
    gc.show_markers(data['ra'][x], data['dec'][x], edgecolor='#a60628',
Ejemplo n.º 52
0
import pylab
import time
import os
from matplotlib import dates
import datetime


os.environ["DISPLAY"] = ":0.0"

plotdailylogfilename = '/home/pi/Desktop/powersystem-logfiles/' + time.strftime("%Y-%m-%d") + '.log'
#dt=pylab.dtype([('f0','datetime64[ms]'),('f1',str,(2)),('f2',float,(5))])
dt=pylab.dtype([('f0','datetime64[ms]'),('s1',str,(15)),('f2',float,(1)),('f3',float,(1)),('f4',float,(1)),('f5',float,(1)),('f6',float,(1)),('f7',float,(1)),('f8',float,(1)),('f9',float,(1)),('f10',float,(1)),('f11',float,(1)),('s12',str,(15)),('f13',float,(1)),('f14',float,(1)),('f15',float,(1)),('f16',float,(1)),('f17',float,(1)),('f19',float,(1)),('f20',float,(1)),('f21',float,(1)),('f22',float,(1)),('f23',float,(1)),('f24',float,(1)),('f25',float,(1)),('f26',float,(1)),('f27',float,(1)),('f28',float,(1)),('f29',float,(1)),('f30',float,(1)),('f31',float,(1)),('f32',float,(1)),('f33',float,(1)),('f34',float,(1))])
data = pylab.genfromtxt(plotdailylogfilename,dtype=dt,delimiter='\t')

pylab.plot( data['f0'], data['f2'], label="Output")
pylab.plot( data['f0'], data['f3'], label="Battery")
pylab.legend()
pylab.title("Charge Data for " + time.strftime("%Y-%m-%d"))
pylab.xlabel("Time")
pylab.ylabel("Voltage: \n")
pylab.savefig("/var/www/html/voltagegraph.png")
pylab.clf()

pylab.plot( data['f0'], data['f21'], label="Amp")
pylab.plot( data['f0'], data['f22'], label="Watts")
pylab.legend()
pylab.title("Charge Data for " + time.strftime("%Y-%m-%d"))
pylab.xlabel("Time")
pylab.ylabel("Charge")
pylab.legend(loc='lower left')
pylab.savefig("/var/www/html/whcgraph.png")
Ejemplo n.º 53
0
def main():

    # Structure:
    # 1) take multi particle run of interest -> this is fiducial trajectory.
    # 1a) slice the data
    # 2) throw awway transients
    # 3) take PC of fiducal as initial conditions for a seperate run 
    # 4) purturb ALL the partiicls epsilon/N
    # 5) Run puturbed version of system for a period
    # 6) measure All the distance between the fiducial trajectories and the new trajectories after
    # the one period. Store the sum of these distances and each of them individualy. FOR ALL PARTICLES. 
    # 7) move all particles back along line to fiducial particls to get expansion to get iniitial conditinos for next run.
    # 8) do this many times and average the total separations to find LE

    # 1)
    # getting the data
    parser = argparse.ArgumentParser()
    # d is for directory
    parser.add_argument('-d',action='store',dest = 'd',type = str, required = False, default = './')
    # f is for file
    parser.add_argument('-f',action='store',dest = 'f',type = str, required = False)
    # n is for the particle of interest
    #parser.add_argument('-n',action='store',dest = 'n',type = int, required = True)
    # eplsilon should be passable. THis is total epsilon
    parser.add_argument('-e',action='store',dest = 'e',type = float, required = False,default = 1e-4)


    inargs = parser.parse_args()
    d = inargs.d
    f = inargs.f
    #particle = inargs.n

    # distances from fidicual are stored 
    final_dist_arr = pl.array([])

    # get system info
    qq,dt,beta,A_sweep_str,cycles,N,x_num_cell,y_num_cell,order,sweep_str,Dim = of.get_system_info()
    print("A from get_system_info() is: " +str(A_sweep_str))
    
    file_object = open(f,'r')
    first_line = file_object.readline()
    print('first line of data file: ' +str(first_line))
    A = pl.zeros(N)+float(first_line.split()[-1])
    print("A from working file is: " + str(A))

    data = pl.genfromtxt(file_object)
    print('shape of fiducial data before doing anything: ' +str(pl.shape(data)))

    # 1a)
    # first lets slice the data so that we have poincare sections of minimum field potential. This
    # should make it so slight errors in initial conditions of unpurturbed particles are less
    # prevelent. Why? Because small error in position when \Phi not zero means an error in evergy as
    # well. This will happen with the velocities (KE) but we can minimizi it by using zerro
    # potential poincare sections.

    # data for values of t=pi(2*n + 1/2) (zero potential poincare seciton)
    new_data = pl.array([])
    # also want data for time RIGHT BEFORE slice point --> why? --> see renormalize function
    new_before = pl.array([])
    checked = True
    for i in range(len(data)):
        # This is getting values of time that are at makimum potentials!!! WRONG
        # check_time = i*dt%(pl.pi*2.0)
        # Right
        check_time = (i*dt+pl.pi/2.0)%(pl.pi*2.0)
        if check_time < dt and check_time > 0.0:
            new_data = pl.append(new_data,data[i,:])
            new_before = pl.append(new_before,data[i-1,:])
            if checked:
                first_i = i
                checked=False


    sliced_data = new_data.reshape(-1,2*N)
    sliced_before = new_before.reshape(-1,2*N)

    print('shape of sliced fiducial data before doing anything: ' +str(pl.shape(sliced_data)))

    sliced_data[:,N:] = sliced_data[:,N:]%(2.0*pl.pi)

    # for full data set just throw away the first few points so it lines up with the sliced data
    # startwise
    data = data[first_i:,:]

    # SETTING SOME OTHER VARIABLS NOW
    # the perturb distance for INDIVIDUAL particles is different from THE SYSTEM PURTERBATION
    epsilon = inargs.e
    print('epsilon: ' + str(epsilon))
    # good
    #epsilon = 1.0e-10
    # works 
    # epsilon = 1.0e-10
    # 1.0e-13

    # period variable should probably just be kept at the actual period (2*pl.pi) but the purpose of
    # this variable is to alow us to changhe the lenght of time we wate before we colect the
    # distance information and renormalize. This is also nesssasary in the final calculation of the
    # LE becae LE = 1/period * ln(rm/r0).
    period = 2.0*pl.pi
    print('period is: ' + str(period))


    # time array of one period. BE CAREFULL -> must start at pi/2 and go to 
    t = pl.arange(3.0*pl.pi/2.0,3.0*pl.pi/2.0+2.0*pl.pi,dt)
    
    # 2)
    # throw awway transients
    # fiducial run is probably not very long becasue of the need for a high time resolution so lets
    # throw awway the whole first half
    sliced_data = sliced_data[(len(sliced_data[:,0])/2):,:]
    print('shape of sliced fiducial data after getting rid of transients: ' +str(pl.shape(sliced_data)))

    # 3) get our initial conditions for the purturbed run
    fiducial_start = sliced_data[0,:]
    print('First fiducial: '+str(fiducial_start))
    # 4) purturb all particles by epsilon
    init = get_first_init(fiducial_start,epsilon,N)

    print('first purturbed initial conditions: '+str(init))
    # 5) Run puturbed version of system for a period
    elec = ec.Sin1D(qq,A,beta,x_num_cell)

    watch = pl.array([])
    watch_le = 0.0

    # In order to watch whats happening we are going to add some plotting stuff that we can put in
    # "if" or just comment out later
    os.mkdir('WatchingLE')
    #for i in range(1,len(sliced_data)-1):
    for i in range(1,50):
        print(i)
        purt_sol = odeint(elec.f,init,t)

        #print('init: ' + str(init))
        #print('sliced_data[i-1,:] = ' + str(sliced_data[i-1,:]))
        
        # Lets see what happens when we run the fudicial trajectory again with (see how much the
        # time step maters).
        #test_sol = odeint(elec.f,sliced_data[i-1,:],t)
        #test_sol[:,N:(2*N)] = test_sol[:,N:(2*N)]%(2.0*pl.pi)

        # make sure particle are in the right modulus space
        #purt_sol[:,N:(2*N)] = purt_sol[:,N:(2*N)]%(2.0*pl.pi)

        # 6) measure the distance between the fiducial trajectores and the new trajectories after the one period
        fiducial_end = sliced_data[i,:]
        before_end = sliced_before[i,:]

        purt_end = purt_sol[-1,:]
        #purt_end = test_sol[-1,:]

        #first_fig = pl.figure()
        #first_ax = first_fig.add_subplot(111)
        #for gamma in range(N):
        #    first_ax.scatter(purt_sol[:,gamma+N],purt_sol[:,gamma],color="Red",s=5)
        #    first_ax.scatter(data[(int(2.0*pl.pi/.001)*(i-1)):(int(2.0*pl.pi/.001)*i),gamma+N],data[(int(2.0*pl.pi/.001)*(i-1)):(int(2.0*pl.pi/.001)*i),gamma],color="Blue",s=5)
        #    if gamma == 1:
        #        first_ax.annotate('start',xy=(purt_sol[0,gamma+N],purt_sol[0,gamma]),xytext=(pl.pi/2,-1.5),arrowprops=dict(facecolor='black',shrink=0.05))
        #        first_ax.annotate('stop' ,xy=(purt_sol[-1,gamma+N],purt_sol[-1,gamma]),xytext=(pl.pi/2,1.5)    ,arrowprops=dict(facecolor='black',shrink=0.05))
        #first_ax.set_xlim([0.0,2.0*pl.pi])
        #first_ax.set_ylim([-2.0,2.0])
        ##first_ax.set_xlabel("$x_1$",fontsize=25)
        ##first_ax.set_ylabel("$x_2$",fontsize=25)
        ##first_ax.set_xlim([0,2*pl.pi])
        ##first_ax.set_ylim([-1.3,1.3])
        #first_fig.savefig('WatchingLE/'+str(i)+".png")
        #pl.close(first_fig)


        # get the distance between the fudicial and the purturbed trajectory
        final_dist = full_distace(fiducial_end,purt_end,N)
        print('final distance: ' +str(final_dist))
        final_dist_arr = pl.append(final_dist_arr,final_dist)

        # 7) set up new initial condition
        init = renormalize(fiducial_end,before_end,purt_end,epsilon,N)
        print('renormalized... new initial conditions are: ' + str(init))
        print('compair above to fiducial final position ->: ' + str(fiducial_end))
        print('epsilon: ' + str(epsilon))
        print('one minus the other sqrd sqrted (should be epsilon): '+
                str(pl.sqrt(((init-fiducial_end)**2).sum())))

        watch_le += pl.log(final_dist_arr[-1]/epsilon)
        cur_avg = watch_le/i/period
        watch = pl.append(watch,cur_avg)


    eps_arr = pl.zeros(len(final_dist_arr))+epsilon
    le = pl.log(pl.sqrt(abs(final_dist_arr))/epsilon)/period

    print('mean LE (LE is): ' +str(le.mean()))
    print('standard deviation LE: ' +str(le.std()))

    fig = pl.figure()
    ax = fig.add_subplot(111)
    ax.scatter(pl.arange(len(watch)),watch,s=.1)
    #ax.set_xlabel("$x_1$",fontsize=25)
    #ax.set_ylabel("$x_2$",fontsize=25)
    #ax.set_xlim([0,2*pl.pi])
    #ax.set_ylim([-1.3,1.3])
    #fig.tight_layout()
    fig.savefig("convergence_LE.png")
    os.system("open convergence_LE.png")
Ejemplo n.º 54
0
def main():

    savePDF = True
    solidDens = False

    Lx = 12.0
    Ly = 12.0
    az = 47.5
    ay = 2.0

    bulkVol = 2.0*Lx*Ly*az

    lowX7 = -6.0
    #lowX7 = -4.0   # boltzmannons
    highX7 = 2.9
    #highX7 = 2.5   # boltzmannons

    # define some plotting colors    
    colors = ['Navy','DarkViolet','MediumSpringGreen','Salmon','Fuchsia',
            'Yellow','Maroon','Salmon','Blue']
    
    # -------------------------------------------------------------------------
    # bulk and film density on same plot
    figg = pl.figure(1)
    ax = figg.add_subplot(111)
    pl.xlabel(r'$\text{Potential Shift}\ [K]$', fontsize=20)
    pl.ylabel('Spatial Density '+r'$[\si{\angstrom}^{-d}]$', fontsize=20)
    pl.grid(True)
    pl.xlim([-5.5,3.5])
    #pl.ylim([0,0.07])
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax.yaxis.get_major_ticks()
    yticks[0].set_visible(False)

    # bulk SVP density line
    bulkVert = -30
    minMus = -5.5
    maxMus = 2.0
    boxSubtract = 1.6
    pl.plot([minMus, maxMus], [0.02198, 0.02198], 'k-', lw=3)
    pl.annotate('3d SVP', xy=(maxMus - boxSubtract, 0.02195),  #xycoords='data',
            xytext=(-10, bulkVert), textcoords='offset points',
            bbox=dict(boxstyle="round", fc="0.8"),
            arrowprops=dict(arrowstyle="->",
                connectionstyle="angle,angleA=0,angleB=90,rad=10"),
            )

    # film SVP density line
    pl.plot([minMus, maxMus], [0.0432, 0.0432], 'k-', lw=3)
    pl.annotate('2d SVP', xy=(maxMus - boxSubtract, 0.0432),  #xycoords='data',
            xytext=(30, -30), textcoords='offset points',
            bbox=dict(boxstyle="round", fc="0.8"),
            arrowprops=dict(arrowstyle="->",
                connectionstyle="angle,angleA=0,angleB=90,rad=10"),
            )
    
    if solidDens:
        pl.plot([minMus, maxMus], [0.0248, 0.0248], 'k-', lw=3)
        pl.annotate('HCP solid SVP', xy=(maxMus - boxSubtract, 0.0248),  #xycoords='data',
                xytext=(-10, 30), textcoords='offset points',
                bbox=dict(boxstyle="round", fc="0.8"),
                arrowprops=dict(arrowstyle="->",
                    connectionstyle="angle,angleA=0,angleB=90,rad=10"),
                )
    
    # -------------------------------------------------------------------------
    # bulk density
    figg2 = pl.figure(2)
    ax2 = figg2.add_subplot(111)
    pl.xlabel(r'$\text{Potential Shift}\ [K]$', fontsize=20)
    pl.ylabel('Bulk Density '+r'$[\si{\angstrom}^{-3}]$', fontsize=20)
    pl.grid(True)
    pl.xlim([-5.5,0.5])
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax.yaxis.get_major_ticks()
    yticks[0].set_visible(False)

    # set up bulk SVP densities for plot
    bulkVert = 30   # changes text box above (positive) or below (negative) line
    boxSubtract = 1.6
    pl.plot([minMus, maxMus], [0.02198, 0.02198], 'k-', lw=3)
    pl.annotate('3d SVP', xy=(maxMus - boxSubtract, 0.02198),  #xycoords='data',
            xytext=(-50, bulkVert), textcoords='offset points',
            bbox=dict(boxstyle="round", fc="0.8"),
            arrowprops=dict(arrowstyle="->",
                connectionstyle="angle,angleA=0,angleB=90,rad=10"),
            )
    
    # -------------------------------------------------------------------------
    # number of particles in film region
    pl.figure(3)
    pl.xlabel(r'$\text{Potential Shift}\ [K]$', fontsize=20)
    pl.ylabel(r'$N_{\text{film}}$', fontsize=20)
    pl.grid(True)
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax.yaxis.get_major_ticks()
    yticks[0].set_visible(False)
    
    # -------------------------------------------------------------------------
    # normalized angular winding
    pl.figure(4)
    pl.xlabel(r'$\text{Potential Shift}\ [K]$', fontsize=20)
    pl.ylabel(r'$\Omega$', fontsize=20)
    pl.grid(True)
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax.yaxis.get_major_ticks()
    yticks[0].set_visible(False)

    # -------------------------------------------------------------------------
    # film density
    figg5 = pl.figure(5)
    ax5 = figg5.add_subplot(111)
    pl.xlabel(r'$\text{Potential Shift}\ [K]$', fontsize=20)
    pl.ylabel(r'$\text{Film Density}\ [\si{\angstrom}^{-2}]$', fontsize=20)
    pl.ylim([0.03,0.05])
    pl.xlim([-5.5,0.5])
    pl.grid(True)
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax.yaxis.get_major_ticks()
    yticks[0].set_visible(False)
 
    # film SVP density line
    pl.plot([lowX7, highX7], [0.0432, 0.0432], 'k-', lw=3)
    pl.annotate('2d SVP', xy=(-4, 0.0432),  #xycoords='data',
            xytext=(30, -30), textcoords='offset points',
            bbox=dict(boxstyle="round", fc="0.8"),
            arrowprops=dict(arrowstyle="->",
                connectionstyle="angle,angleA=0,angleB=90,rad=10"),
            )
 
    # -------------------------------------------------------------------------
    # superfluid fraction
    pl.figure(6)
    pl.xlabel(r'$\text{Potential Shift}\ [K]$', fontsize=20)
    pl.ylabel(r'$\rho_S/\rho$', fontsize=20)
    pl.grid(True)
    pl.xlim([-5.5,0.5])
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax.yaxis.get_major_ticks()
    yticks[0].set_visible(False)

    # -------------------------------------------------------------------------
    # film/bulk densities subplot 
    pl.figure(7)
    ax7a = pl.subplot(211)
    pl.ylabel(r'$\text{Film Density}\ [\si{\angstrom}^{-2}]$', fontsize=20)
    pl.ylim([0.035,0.05])
    pl.grid(True)
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax.yaxis.get_major_ticks()
    yticks[0].set_visible(False)
 
    # film SVP density line
    pl.plot([lowX7, highX7], [0.0432, 0.0432], 'k-', lw=3)
    pl.annotate('2d SVP', xy=(-2, 0.0432),  #xycoords='data',
            xytext=(30, -30), textcoords='offset points',
            bbox=dict(boxstyle="round", fc="0.8"),
            arrowprops=dict(arrowstyle="->",
                connectionstyle="angle,angleA=0,angleB=90,rad=10"),
            )
    pl.setp(ax7a.get_xticklabels(), visible=False)
    
    ax7b = pl.subplot(212, sharex=ax7a)
    pl.xlabel(r'$\text{Potential Shift}\ [K]$', fontsize=20)
    pl.ylabel('Bulk Density '+r'$[\si{\angstrom}^{-3}]$', fontsize=20)
    pl.grid(True)
    pl.xlim([lowX7,highX7])
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax7b.yaxis.get_major_ticks()
    yticks[0].set_visible(False)

    # set up bulk SVP densities for plot
    bulkVert = 15   # changes text box above (positive) or below (negative) line
    boxSubtract = 1.6
    pl.plot([lowX7, highX7], [0.02198, 0.02198], 'k-', lw=3)
    pl.annotate('3d SVP', xy=(1.2, 0.02198),  #xycoords='data',
            xytext=(-50, bulkVert), textcoords='offset points',
            bbox=dict(boxstyle="round", fc="0.8"),
            arrowprops=dict(arrowstyle="->",
                connectionstyle="angle,angleA=0,angleB=90,rad=10"),
            )
 
    # -------------------------------------------------------------------------
    # number of particles in bulk
    pl.figure(8)
    pl.xlabel(r'$\text{Potential Shift}\ [K]$', fontsize=20)
    pl.ylabel(r'$N_{\text{bulk}}$', fontsize=20)
    pl.grid(True)
    pl.xlim([-5.5,0.5])
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax.yaxis.get_major_ticks()
    yticks[0].set_visible(False)


    # -------------------------------------------------------------------------
    Tvals = glob.glob('*T*')
   
    Markers = ['-o','-d','-*']

    
    for nT,Tval in enumerate(sorted(Tvals)):
        
        os.chdir(Tval)

        T = Tval[1:]

        Svals = glob.glob('S*')
        
        # --- loop through known directory structure --------------------------
        for nS,Sval in enumerate(sorted(Svals)):
            
            os.chdir(Sval)

            Vdirs = glob.glob('*V*')

            print os.getcwd()

            # store bulk separation value
            S = re.search(r'\d+',Sval).group(0)
             
            # get label for plot
            if 'distinguishable' in Sval:
                labell = 'S = '+str(S)+', Boltzmannons, T='+str(T)
            else:
                labell = 'S = '+str(S)+', Bosons, T='+str(T)
                shortlabell = 'S = '+str(S)+', T='+str(T)

            # projected area of film region
            projArea = float(S)*Lx

            # accessible volume in cell
            #accessibleVol = Lx*(Ly*(float(S)+2.0*az) - float(S)*(Ly-2.0*ay))
            accessibleVol = 2.0*Lx*Ly*az + 2.0*ay*Lx*float(S)

            # multiply angular winding by norm. to be fixed in code later 
            omegaNorm = 4.0*(float(S)+1.0*Ly)**2

            # Arrays to hold all data
            Vs = pl.array([])
            Films = pl.array([])
            Bulks = pl.array([])
            filmErrs = pl.array([])
            bulkErrs = pl.array([])
            Omegas = pl.array([])
            omegaErrs = pl.array([])
            NumParts = pl.array([])
            NumPartErrs = pl.array([])
            Supers = pl.array([])
            SuperErrs = pl.array([])

            
            # pass potential shift directories for current S value
            for Vdir in Vdirs:

                os.chdir(Vdir)
                
                # get bipartition file name
                f = glob.glob('*Bipart*')[0]

                # get angular winding file name
                fw = glob.glob('*Ntwind*')[0]
                
                # get estimator file name (includes total number)
                fe = glob.glob('*Estimator*')[0]

                # get superfrac file name
                fs = glob.glob('*Super*')[0]
        
                # build array of film potential shifts from directory names
                Vs = pl.append(Vs,float(Vdir[1:])) 

                # --- Densities -----------------------------------------------
                filmavg,filmstd,filmbins,bulkavg,bulkstd,bulkbins = pl.genfromtxt(f,
                        unpack=True, usecols=(0,1,2,3,4,5), delimiter=',')
                
                # get rid of any items which are not numbers..
                # this is some beautiful Python juju.
                filmbins = filmbins[pl.logical_not(pl.isnan(filmbins))]
                filmstd = filmstd[pl.logical_not(pl.isnan(filmstd))]
                filmavg = filmavg[pl.logical_not(pl.isnan(filmavg))]
                bulkbins = bulkbins[pl.logical_not(pl.isnan(bulkbins))]
                bulkstd = bulkstd[pl.logical_not(pl.isnan(bulkstd))]
                bulkavg = bulkavg[pl.logical_not(pl.isnan(bulkavg))]
                filmweights = filmbins/pl.sum(filmbins)
                bulkweights = bulkbins/pl.sum(bulkbins)

                filmavg *= filmweights
                bulkavg *= bulkweights

                filmstd *= filmweights
                bulkstd *= bulkweights

                film = pl.sum(filmavg)
                bulk = pl.sum(bulkavg)
                filmstdErr = pl.sum(filmstd)
                bulkstdErr = pl.sum(bulkstd)

                Films = pl.append(Films, film)
                Bulks = pl.append(Bulks, bulk)
                filmErrs = pl.append(filmErrs, filmstdErr)
                bulkErrs = pl.append(bulkErrs, bulkstdErr)


                # ---- angular winding ----------------------------------------
                omegaAvg,omegaStd,omegaBins = pl.genfromtxt(fw,
                        unpack=True, usecols=(3,4,5), delimiter=',')

                # get rid of any items which are not numbers..
                omegaBins = omegaBins[pl.logical_not(pl.isnan(omegaBins))]
                omegaStd = omegaStd[pl.logical_not(pl.isnan(omegaStd))]
                omegaAvg = omegaAvg[pl.logical_not(pl.isnan(omegaAvg))]

                # normalize data.
                omegaStd *= omegaNorm
                omegaAvg *= omegaNorm

                weights = omegaBins/pl.sum(omegaBins)

                omegaAvg *= weights
                omegaStd *= weights

                Omega = pl.sum(omegaAvg)
                omegaErr = pl.sum(omegaStd)

                Omegas = pl.append(Omegas, Omega)
                omegaErrs = pl.append(omegaErrs, omegaErr)
               

                # ---- total number -------------------------------------------
                numAvg,numStd,numBins = pl.genfromtxt(fe,
                        unpack=True, usecols=(12,13,14), delimiter=',')

                # get rid of any items which are not numbers..
                numBins = numBins[pl.logical_not(pl.isnan(numBins))]
                numStd = numStd[pl.logical_not(pl.isnan(numStd))]
                numAvg = numAvg[pl.logical_not(pl.isnan(numAvg))]

                weights = numBins/pl.sum(numBins)

                numAvg *= weights
                numStd *= weights

                numPart = pl.sum(numAvg)
                numPartErr = pl.sum(numStd)

                NumParts = pl.append(NumParts, numPart)
                NumPartErrs = pl.append(NumPartErrs, numPartErr)


                # ---- superfluid fraction ------------------------------------
                supAvg,supStd,supBins = pl.genfromtxt(fs,
                        unpack=True, usecols=(0,1,2), delimiter=',')

                # get rid of any items which are not numbers..
                supBins = supBins[pl.logical_not(pl.isnan(supBins))]
                supStd = supStd[pl.logical_not(pl.isnan(supStd))]
                supAvg = supAvg[pl.logical_not(pl.isnan(supAvg))]

                # normalize data.
                #supStd /= (1.0*accessibleVol)
                #supAvg /= (1.0*accessibleVol)

                weights = supBins/pl.sum(supBins)

                supAvg *= weights
                supStd *= weights

                supPart = pl.sum(supAvg)
                supPartErr = pl.sum(supStd)

                Supers = pl.append(Supers, supPart)
                SuperErrs = pl.append(SuperErrs, supPartErr)


                os.chdir('..')


            # Sort data in order of increasing chemical potential.
            # This is another bit of magical Python juju.
            Vs, Films, Bulks, filmErrs, bulkErrs, Omegas, omegaErrs,\
                    NumParts, NumPartErrs, Supers, SuperErrs = pl.asarray(
                    zip(*sorted(zip(Vs, Films, Bulks, filmErrs, bulkErrs, 
                        Omegas, omegaErrs, NumParts, NumPartErrs, Supers, 
                        SuperErrs))))


            pl.figure(1) 
            pl.errorbar(Vs, Films, filmErrs, fmt=Markers[nT], color=colors[nS],
                    label=labell+', 2d',
                    markersize=8)
            pl.errorbar(Vs, Bulks, bulkErrs, fmt=Markers[nT], mec=colors[nS],
                    label=labell+', 3d', mfc='None', 
                    markersize=8)

            pl.figure(2) 
            pl.errorbar(Vs, Bulks, bulkErrs, fmt = Markers[nT], color=colors[nS],
                    label=labell, markersize=8)

            pl.figure(3)
            pl.errorbar(Vs, Films*projArea, fmt=Markers[nT], color=colors[nS],
                    label = labell, markersize=8)

            pl.figure(4)
            pl.errorbar(Vs, Omegas, omegaErrs, fmt=Markers[nT], color=colors[nS],
                    label = labell, markersize=8)

            pl.figure(5)
            pl.errorbar(Vs, Films, filmErrs, fmt=Markers[nT], color=colors[nS],
                    label = labell, markersize=8)
     
            pl.figure(6)
            pl.errorbar(Vs, Supers, SuperErrs, fmt=Markers[nT], color=colors[nS],
                    label = labell, markersize=8)

            pl.figure(7)
            ax7a.errorbar(Vs, Films, filmErrs, fmt=Markers[nT], color=colors[nS],
                    label=shortlabell, markersize=8)
            ax7b.errorbar(Vs, Bulks, bulkErrs, fmt = Markers[nT], color=colors[nS],
                    label=shortlabell, markersize=8)
 
            pl.figure(8) 
            pl.errorbar(Vs, Bulks*bulkVol, bulkErrs*bulkVol, 
                    fmt = Markers[nT], color=colors[nS],
                    label=labell, markersize=8)


            os.chdir('..')
        
        os.chdir('..')
   
    pl.figure(1)
    pl.legend(loc=1)
    pl.tight_layout()
    pl.figure(2)
    pl.legend(loc=1)
    pl.tight_layout()
 
    pl.figure(3)
    pl.legend(loc=1)
    pl.tight_layout()
  
    pl.figure(4)
    pl.legend(loc=1)
    pl.tight_layout()
   
    pl.figure(5)
    pl.legend(loc=1)
    pl.tight_layout()

    pl.figure(6)
    pl.legend(loc=1)
    pl.tight_layout()
 
    pl.figure(7)
    pl.legend(loc=1,bbox_to_anchor=(1.,2.1))
    major_formatter = FuncFormatter(my_formatter)
    ax7a.yaxis.set_major_formatter(major_formatter)
    pl.tight_layout()
    if savePDF:
        pl.savefig('Bulk_Film_Dens_vs_Vshift_allS_allT_18APR.pdf', format='pdf',
                bbox_inches='tight')


    pl.figure(8)
    pl.legend(loc=1)
    pl.tight_layout()
  

    pl.show()
Ejemplo n.º 55
0
import pylab
import os
from optparse import OptionParser

usage = "%prog [options] [flat file]"

parser = OptionParser(usage)
options, args = parser.parse_args()

if len(args) < 1:
    parser.error("Missing input file")

txtfile = args[0]

if not os.path.exists(txtfile):
    print "Error:", file, "does not exist"
    sys.exit(1)


dataArray = pylab.genfromtxt(txtfile)
nrows, ncols = dataArray.shape

x = dataArray[:, 0]
y = dataArray[:, 1]

pylab.plot(x, y)
pylab.axis("tight")
pylab.grid(True)
pylab.savefig("fig.png")
pylab.show()
Ejemplo n.º 56
0
def diffusion_coef(f):
    # depending on the variable we want we need a number that controles how we slice the data
    # lines. With the dimension of the system we can slice everything right. Can get the dimension with the
    # following line.
    Dim = (pl.shape(data)[1])/(2*N)
    print('dimension: '+str(Dim))


#    cur_poin_num = int(f[:f.find('p')])
#    if (str(cur_poin_num)+'RunImages') not in os.listdir('.'):
#        os.mkdir(str(cur_poin_num)+'RunImages')

    if Dim == 2:
        qq,dt,beta,A,cycles,N,x_num_cell,y_num_cell,order,sweep_str  = po.get_system_info()
    if Dim == 1:



#    to_save_dir = str(cur_poin_num)+'RunImages/TauACF_Var_'+v
#    os.mkdir(to_save_dir)

    work_file = open(f,'r')
    sweep_var = float(work_file.readline().split()[-1])
    data = pl.genfromtxt(work_file)
    work_file.close()

  

    # make em for every particle in the simulation
    for a in range(N):

        # can use the dimension to slice everything right
        cur_x = data[:,Dim*N+a]
        if Dim ==2:
            cur_y = data[:,(Dim+1)*N+a]
            distance_arr = pl.sqrt(cur_x**2 + cur_y**2)
        else:
            distance_arr = cur_x

        #print('shape of distance array: ' + str(pl.shape(input_arr)))
        
        # This equation generaly holds and for now this is how we are going to calculate the
        # diffusion coefficien:
        # <x^2>=q*D*t where q is numerical constant that depends on dimesionality. q=2*Dim. D is the
        # diffusion coefficient, and t is the time for which <x^2 is calculated>
        # this and some usefull equations from
        # http://www.life.illinois.edu/crofts/bioph354/diffusion1.html
        # So we can find D
        dist_arr_sqrd = distance_arr**2
        mean_sqrd_dist = dist_arr_sqrd.mean()
        total_time = len(distance_arr)*dt
        Diff_coef = mean_sqrd_dist/(2.0*Dim*total_time)
        
        print('Diffusion coefficient for particle ' + str(a) + ' = ' + str(Diff_coef))

        # Lets also try to print a tempature from the equation D = kT/f. T -> Absolute tempature.
        # k -> boltzman constat. f -> frictional constant (beta)
        Temp = Diff_coef * beta 
        
        print('Tempature from Diffusion coef = ' +str(Temp))
        

    print('\a')
    print('\a')

def main():

    parser = argparse.ArgumentParser()
    # d is for directory
    parser.add_argument('-d',action='store',dest = 'd',type = str, required = True)
    # f is for file this is needed for make_acf_tau_plot()
    parser.add_argument('-f',action='store',dest = 'f',type = str, required = False)
    # plot type
    parser.add_argument('-t',action='store',dest = 't',type = str,required = True)

    inargs = parser.parse_args()
    d = inargs.d
    f = inargs.f
    plot_type = inargs.t


    os.chdir(d)
    # This is also a single particle operation. Lets make a directory and have an individual image
    # for each run.

    if plot_type == 'diffusion':
        diffusion_coef(f)

if __name__ == '__main__':
    main()
Ejemplo n.º 57
0
    lp = log_prior(theta)
    if not pyl.isfinite(lp):
        return -pyl.inf
    return log_prior(theta) + log_likelihood(theta, x, y, xerr, yerr)


def mkError(x, x_err):
    return 0.434 * x_err / x


with hdf.File('./../results_cluster.hdf5', 'r') as f:
    dset = f[f.keys()[0]]
    data = dset.value
    data.sort(order='ID')

richnessData = pyl.genfromtxt('./boada_rich.txt', names=True, dtype=None)
richnessData.sort(order='name')

x_obs = pyl.log10(richnessData['lambda'])
xerr = mkError(richnessData['lambda'], richnessData['lambda_err'])
y_obs = data['MASS']
yerr = data['MASS_err']

f = pyl.figure(figsize=(7, 7 * (pyl.sqrt(5.) - 1.0) / 2.0))
ax = f.add_subplot(111)

# low mass
eb = pyl.errorbar(x_obs[:2],
                  y_obs[:2],
                  xerr=xerr[:2],
                  yerr=yerr[:2],
Ejemplo n.º 58
0
""" Plots all csv files in a directory """
import pylab as p
import os
import sys

data_dir = sys.argv[1] if len(sys.argv) > 1 else "../all_data/4-11/"

for f in [x for x in os.listdir(data_dir) if "csv" in x][::-1]:
    d = p.genfromtxt(os.path.join(data_dir, f), skip_header=2, delimiter=",")
    p.figure()
    p.title(f)
    [p.plot(d[:, 0], d[:, i], label="ch%i" % (i)) for i in range(1, d.shape[1] - 1) if i != 4]
    p.legend()
p.show()
def main():

    args = parseCMD()
    Temp = args.Temperature

    Lx = 12.0
    Ly = 12.0
    az = 47.5
    ay = 2.0

    Svals = glob.glob('S*')

    # define some plotting colors    
    colors = ['Salmon','Blue','DarkViolet','MediumSpringGreen','Fuchsia',
            'Yellow','Maroon']
        
    # set up figure that displays all data
    figg = pl.figure(1)
    ax = figg.add_subplot(111)
    pl.xlabel(r'$\mu\ [K]$', fontsize=20)
    pl.ylabel('Spatial Density '+r'$[\si{\angstrom}^{-d}]$', fontsize=20)
    pl.grid(True)
    #pl.xlim([0.5,3.0])
    #pl.ylim([0,0.07])
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax.yaxis.get_major_ticks()
    yticks[0].set_visible(False)

    # set up bulk SVP densities for plot
    bulkVert = -30
    minMus = -3.0
    maxMus = 5.0
    boxSubtract = 1.6
    pl.plot([minMus, maxMus], [0.02198, 0.02198], 'k-', lw=3)
    pl.annotate('3d SVP', xy=(maxMus - boxSubtract, 0.02195),  #xycoords='data',
            xytext=(-50, bulkVert), textcoords='offset points',
            bbox=dict(boxstyle="round", fc="0.8"),
            arrowprops=dict(arrowstyle="->",
                connectionstyle="angle,angleA=0,angleB=90,rad=10"),
            )

    pl.plot([minMus, maxMus], [0.0432, 0.0432], 'k-', lw=3)
    pl.annotate('2d SVP', xy=(maxMus - boxSubtract, 0.0432),  #xycoords='data',
            xytext=(-50, 30), textcoords='offset points',
            bbox=dict(boxstyle="round", fc="0.8"),
            arrowprops=dict(arrowstyle="->",
                connectionstyle="angle,angleA=0,angleB=90,rad=10"),
            )
    
    solidDens = True
    if solidDens:
        pl.plot([minMus, maxMus], [0.0248, 0.0248], 'k-', lw=3)
        pl.annotate('HCP solid SVP', xy=(maxMus - boxSubtract, 0.0248),  #xycoords='data',
                xytext=(-50, 30), textcoords='offset points',
                bbox=dict(boxstyle="round", fc="0.8"),
                arrowprops=dict(arrowstyle="->",
                    connectionstyle="angle,angleA=0,angleB=90,rad=10"),
                )

    # plot just the bulk density as a function of chemical potential
    figg2 = pl.figure(2)
    ax2 = figg2.add_subplot(111)
    pl.xlabel(r'$\mu\ [K]$', fontsize=20)
    pl.ylabel('Spatial Density '+r'$[\si{\angstrom}^{-d}]$', fontsize=20)
    pl.grid(True)
    #pl.xlim([0.5,3.0])
    #pl.ylim([0,0.07])
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax.yaxis.get_major_ticks()
    yticks[0].set_visible(False)

    # set up bulk SVP densities for plot
    bulkVert = -30
    minMus = -3.0
    maxMus = 5.0
    boxSubtract = 1.6
    pl.plot([minMus, maxMus], [0.02198, 0.02198], 'k-', lw=3)
    pl.annotate('3d SVP', xy=(maxMus - boxSubtract, 0.02195),  #xycoords='data',
            xytext=(-50, bulkVert), textcoords='offset points',
            bbox=dict(boxstyle="round", fc="0.8"),
            arrowprops=dict(arrowstyle="->",
                connectionstyle="angle,angleA=0,angleB=90,rad=10"),
            )

    # number of particles in film region vs chemical potential
    pl.figure(3)
    pl.xlabel(r'$\mu\ [K]$', fontsize=20)
    pl.ylabel('Number of Particles', fontsize=20)
    pl.grid(True)
    #pl.xlim([0.5,3.0])
    #pl.ylim([0,2])
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax.yaxis.get_major_ticks()
    yticks[0].set_visible(False)

    # normalized angular winding vs chemical potential
    pl.figure(4)
    pl.xlabel(r'$\mu\ [K]$', fontsize=20)
    pl.ylabel(r'$\Omega$', fontsize=20)
    pl.grid(True)
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax.yaxis.get_major_ticks()
    yticks[0].set_visible(False)

    # actual density in cell
    pl.figure(5)
    pl.xlabel(r'$\mu\ [K]$', fontsize=20)
    pl.ylabel(r'$\rho\ [\si{\angstrom}^{-3}]$', fontsize=20)
    pl.grid(True)
    pl.tick_params(axis='both', which='major', labelsize=16)
    pl.tick_params(axis='both', which='minor', labelsize=16)
    yticks = ax.yaxis.get_major_ticks()
    yticks[0].set_visible(False)

    # --- loop through known directory structure ------------------------------
    for nS, Sval in enumerate(sorted(Svals)):
        
        os.chdir(Sval)
        
        # get bipartition file name
        f = glob.glob('*Bipart*')[0]

        # get angular winding file name
        fw = glob.glob('*Ntwind*')[0]

        # get estimator file name (includes total number)
        fe = glob.glob('*Estimator*')[0]

        # store bulk separation value
        S = re.search(r'\d+',Sval).group(0)
         
        # get label for plot
        if 'distinguishable' in Sval:
            labell = 'S = '+str(S)+', Boltzmannons'
        else:
            labell = 'S = '+str(S)+', Bosons'

        # projected area of film region
        projArea = float(S)*Lx

        # accessible volume in cell
        accessibleVol = Lx*Ly*(2.0*az+float(S)) - Lx*float(S)*(Ly-2.0*ay)

        # multiply angular winding by normalization to be fixed in code later 
        omegaNorm = 4.0*(float(S)+1.0*Ly)**2

        # get chemical potential
        headers = getHeadersFromFile(f)

        mus = pl.array([])
        Films = pl.array([])
        Bulks = pl.array([])
        filmErrs = pl.array([])
        bulkErrs = pl.array([])
        Omegas = pl.array([])
        omegaErrs = pl.array([])
        NumParts = pl.array([])
        NumPartErrs = pl.array([])

        n = 0
        nw = 3
        nn = 12
        for head in headers:

            # get values of chemical potential from file header
            mus = pl.append(mus, float(head))

            # --- Densities ---------------------------------------------------
            filmavg,filmstd,filmbins,bulkavg,bulkstd,bulkbins = pl.genfromtxt(f,
                    unpack=True, usecols=(n,n+1,n+2,n+3,n+4,n+5), delimiter=',')
            n += 6
            
            # get rid of any items which are not numbers..
            # this is some beautiful Python juju.
            filmbins = filmbins[pl.logical_not(pl.isnan(filmbins))]
            filmstd = filmstd[pl.logical_not(pl.isnan(filmstd))]
            filmavg = filmavg[pl.logical_not(pl.isnan(filmavg))]
            bulkbins = bulkbins[pl.logical_not(pl.isnan(bulkbins))]
            bulkstd = bulkstd[pl.logical_not(pl.isnan(bulkstd))]
            bulkavg = bulkavg[pl.logical_not(pl.isnan(bulkavg))]
            filmweights = filmbins/pl.sum(filmbins)
            bulkweights = bulkbins/pl.sum(bulkbins)

            filmavg *= filmweights
            bulkavg *= bulkweights

            filmstd *= filmweights
            bulkstd *= bulkweights

            film = pl.sum(filmavg)
            bulk = pl.sum(bulkavg)
            filmstdErr = pl.sum(filmstd)
            bulkstdErr = pl.sum(bulkstd)

            Films = pl.append(Films, film)
            Bulks = pl.append(Bulks, bulk)
            filmErrs = pl.append(filmErrs, filmstdErr)
            bulkErrs = pl.append(bulkErrs, bulkstdErr)

            # ---- angular winding --------------------------------------------
            omegaAvg,omegaStd,omegaBins = pl.genfromtxt(fw,
                    unpack=True, usecols=(nw,nw+1,nw+2), delimiter=',')
            nw += 9

            # get rid of any items which are not numbers..
            omegaBins = omegaBins[pl.logical_not(pl.isnan(omegaBins))]
            omegaStd = omegaStd[pl.logical_not(pl.isnan(omegaStd))]
            omegaAvg = omegaAvg[pl.logical_not(pl.isnan(omegaAvg))]

            # normalize data.
            omegaStd *= omegaNorm
            omegaAvg *= omegaNorm

            weights = omegaBins/pl.sum(omegaBins)

            omegaAvg *= weights
            omegaStd *= weights

            Omega = pl.sum(omegaAvg)
            omegaErr = pl.sum(omegaStd)

            Omegas = pl.append(Omegas, Omega)
            omegaErrs = pl.append(omegaErrs, omegaErr)
            
            # ---- total number -----------------------------------------------
            numAvg,numStd,numBins = pl.genfromtxt(fe,
                    unpack=True, usecols=(nn,nn+1,nn+2), delimiter=',')
            nn += 15

            # get rid of any items which are not numbers..
            numBins = numBins[pl.logical_not(pl.isnan(numBins))]
            numStd = numStd[pl.logical_not(pl.isnan(numStd))]
            numAvg = numAvg[pl.logical_not(pl.isnan(numAvg))]

            # normalize data.
            numStd /= (1.0*accessibleVol)
            numAvg /= (1.0*accessibleVol)

            weights = numBins/pl.sum(numBins)

            numAvg *= weights
            numStd *= weights

            numPart = pl.sum(numAvg)
            numPartErr = pl.sum(numStd)

            NumParts = pl.append(NumParts, numPart)
            NumPartErrs = pl.append(NumPartErrs, numPartErr)

        # Sort data in order of increasing chemical potential. I love Python.
        # This is another bit of magical Python juju.
        mus, Films, Bulks, filmErrs, bulkErrs, Omegas, omegaErrs, NumParts, NumPartErrs = pl.asarray(
                zip(*sorted(zip(mus, Films, Bulks, filmErrs, bulkErrs, 
                    Omegas, omegaErrs, NumParts, NumPartErrs))))

        pl.figure(1) 
        pl.errorbar(mus, Films, filmErrs, fmt='--o', color=colors[nS],
                label=labell+', 2d',
                markersize=8)
        pl.errorbar(mus, Bulks, bulkErrs, fmt = '-d', color=colors[nS],
                label=labell+', 3d',
                markersize=8)

        pl.figure(2) 
        pl.errorbar(mus, Bulks, bulkErrs, fmt = '-d', color=colors[nS],
                label=labell+', 3d',
                markersize=8)

        pl.figure(3)
        pl.errorbar(mus, Films*projArea, fmt='-o', color=colors[nS],
                label = labell+', 2d',
                markersize=8)

        pl.figure(4)
        pl.errorbar(mus, Omegas, omegaErrs, fmt='-o', color=colors[nS],
                label = labell, markersize=8)

        pl.figure(5)
        pl.errorbar(mus, NumParts, NumPartErrs, fmt='-o', color=colors[nS],
                label = labell, markersize=8)

        os.chdir('..')

   
    pl.figure(1)
    pl.legend(loc=2)
    pl.savefig('densities_vs_chemicalPotential_allS_1APR.pdf', format='pdf',
            bbox_inches='tight')

    pl.figure(2)
    pl.legend(loc=2)
 
    pl.figure(3)
    pl.legend(loc=2)
  
    pl.figure(4)
    pl.legend(loc=2)
   
    pl.figure(5)
    pl.legend(loc=2)
    
  
    pl.show()
Ejemplo n.º 60
0
def crunchData(estimTypes, colNums, observables):
    '''
    Takes a list of pimc file types ['estimator', 'super', etc..]
    and a list of list of column numbers (in order of file types) as
    [[0,1], [1,4,6], etc...] and will combine data from jobs that were 
    the same but had different random number seeds all into one
    file.
    '''

    args = parseCMD()
    reduceType = args.reduceType
    numToDelete = args.deleteNum
    deleteList = []
    for n in range(numToDelete):
        deleteList.append(n)
    numLinesToSkip = 2+numToDelete
    print 'deleting elements indexed by: ',deleteList
    print 'and skipping',numLinesToSkip,'lines of data file.'


    # make list of data file names
    estimFiles  = glob.glob('*%s*' % estimTypes[0])
    
    # check which ensemble
    canonical = ensembleCheck(estimFiles[0][0])

    # make list of all reduced variable, in order, based on ensemble
    indList = indepVarList(estimFiles, canonical, reduceType)
   
    for nType, estType in enumerate(estimTypes):

        # dict to hold all estType data for all independent variables (T,..)
        allTemps = {}

        # dict to hold numBins, average, stdErr for each data file
        statTemps = {}

        # loop over independent variable for given estType
        for numTemp, temp in enumerate(indList):

            # Get filenames.  The chemical potential always has a sign
            # in front and the temperature always comes after the
            # file type and a dash.
            if reduceType == 'T':
                bipFiles = glob.glob('*%s-%s*' % (estimTypes[nType], temp))
            elif reduceType == 'u':
                bipFiles = glob.glob('*%s*%s*' % (estimTypes[nType], temp))
 
            arrs    = {}
            arrs2   = {}
            for nf, f in enumerate(bipFiles):
                if checkIfEmpty(f,numLinesToSkip):
                    print f,' is empty.'
                    pass
                else:
                    dat = pl.genfromtxt(f, unpack=True, usecols=colNums[nType])

                    # skip a few 
                    for d in dat:
                        d = pl.delete(d, deleteList)
                    
                    # define key that can be sorted properly
                    if numTemp < 10:
                        arrName = 'arr00'+str(numTemp)
                    elif (numTemp >= 10 and numTemp < 100):
                        arrName = 'arr0'+str(numTemp)
                    else:
                        arrName = 'arr'+str(numTemp)
                    
                    # treat data from one column
                    if int(len(colNums[nType])) == 1:
                        if nf == 0:
                            arrs[arrName] = dat
                            arrs2[arrName] = [[pl.average(dat),
                                    pl.std(dat)/pl.sqrt(1.0*int(len(dat))),
                                    int(len(dat))]]
                        else:
                            arrs[arrName] = pl.append(arrs[arrName], dat)
                            arrs2[arrName] = pl.append(
                                    arrs2[arrName], [[pl.average(dat),
                                    pl.std(dat)/pl.sqrt(1.0*int(len(dat))),
                                    int(len(dat))]])

                    # treat data from multiple columns
                    else:
                        if nf == 0:
                            for n, arr in enumerate(dat):
                                arrs[arrName+'_'+str(n)] = arr
                                arrs2[arrName+'_'+str(n)] = [[pl.average(arr),
                                        pl.std(arr)/pl.sqrt(1.0*int(len(arr))),
                                        int(len(arr))]]
                        else:
                            for n, arr in enumerate(dat):
                                arrs[arrName+'_'+str(n)] = pl.append(
                                        arrs[arrName+'_'+str(n)], arr)
                                arrs2[arrName+'_'+str(n)] = pl.append(
                                        arrs2[arrName+'_'+str(n)], [[pl.average(arr),
                                            pl.std(arr)/pl.sqrt(1.0*int(len(arr))),
                                            int(len(arr))]])
           
            # construct key name.  This assumes <1000 temperatures.
            if numTemp < 10:
                allArrName = 'allTemps00'+str(numTemp)
            elif (10 <= numTemp and numTemp < 100):
                allArrName = 'allTemps0'+str(numTemp)
            else:
                allArrName = 'allTemps'+str(numTemp)

            if nType < 10:
                allArrName += '00'+str(nType)
            elif (10 <= nType and nType < 100):
                allArrName += '0'+str(nType)
            else:
                allArrName += str(nType)

               
            allTemps[allArrName] = arrs
            statTemps[allArrName] = arrs2

        # ---- Write all data to disk ----
        # length of max. sized array for all data
        maxLen = 0
        for t in allTemps:
            for g in allTemps[t]:
                arrayLen = len(allTemps[t][g])
                if arrayLen > maxLen:
                    maxLen = arrayLen
        # open file to hold all of estType data
        newName = 'Reduced'+str(estType.capitalize())+'Data.dat'
        fout = open(newName, 'w')

        # write independent variable as top header line
        fout.write('#%15s\t' % indList[0])
        for n in range(len(colNums[nType])-1):
            fout.write('%16s\t'% '')
        for temp in indList[1:]:
            fout.write('%16s\t'% temp)
            for n in range(len(colNums[nType])-1):
                fout.write('%16s\t'% '')
        fout.write('\n')

        # write observable names as second header line
        fout.write('#%15s\t' % observables[nType][0])
        for n in range(len(observables[nType])-1):
            fout.write('%16s\t' % observables[nType][n+1])
        for temp in indList[1:]:
            for obs in observables[nType]:
                fout.write('%16s\t' % obs)
        fout.write('\n')
        
        # write data arrays to disk
        for line in range(maxLen):
            for a in sorted(allTemps.iterkeys()):
                for aa in sorted(allTemps[a]):
                    try:
                        fout.write('%16.8E,\t' % (
                            float(allTemps[a][aa][line]) ))
                    except:
                        fout.write('%16s,\t' % '')
            fout.write('\n')
        
        fout.close()

        # ---- Average individual files and write to disk ----
        # get length of max. sized array for averages
        maxLen2 = 0
        for t in statTemps:
            for g in statTemps[t]:
                arrayLen = len(statTemps[t][g])
                if arrayLen > maxLen2:
                    maxLen2 = arrayLen
        maxLen2 /= 3

        # open file for standard error
        newName = 'zAveraged'+str(estType.capitalize())+'Data.dat'
        fout = open(newName, 'w')

        # write independent variable as top header line
        fout.write('#%15s\t%16s\t%16s\t' % (
            indList[0], '',''))
        for n in range(len(colNums[nType])-1):
            fout.write('%16s\t%16s\t%16s\t'% ('','',''))
        for temp in indList[1:]:
            fout.write('%16s\t%16s\t%16s\t'% (temp,'',''))
            for n in range(len(colNums[nType])-1):
                fout.write('%16s\t%16s\t%16s\t'% ('','',''))
        fout.write('\n')

        # write observable names as second header line
        fout.write('#%15s\t%16s\t%16s\t' % (
            observables[nType][0],
            'std Err',
            'bins'))
        for n in range(len(observables[nType])-1):
            fout.write('%16s\t%16s\t%16s\t' % (
                observables[nType][n+1],
                'std Err',
                'bins'))
        for temp in indList[1:]:
            for obs in observables[nType]:
                fout.write('%16s\t%16s\t%16s\t' % (
                    obs, 'std Err', 'bins'))
        fout.write('\n')
        
        # write data arrays to disk
        en = 0
        for line in range(maxLen2):
            for a in sorted(statTemps.iterkeys()):
                for aa in sorted(statTemps[a]):
                    try:
                        fout.write('%15.8E,\t%15.8E,\t%15.8E,\t' % (
                            float(statTemps[a][aa][en]),
                            float(statTemps[a][aa][en+1]),
                            float(statTemps[a][aa][en+2])))
                    except:
                        fout.write('%15s,\t%15s,\t%15s,\t' % ('','',''))
                    
            fout.write('\n')
            en += 3
        
        fout.close()