def fit_template(template, model, img):
    """Try to improve the fit of a shape by trying different configurations for
    position, scale and rotation and returning the configuration with the best
    fit for the grey-level model.

    Args:
        template (Landmarks): The initial fit of an incisor.
        model (GreyLevelModel): The grey-level model of the incisor.
        img: The dental radiograph on which the shape should be fitted.

    Returns:
       Landmarks: The estimated location of the shape.

    """
    gimg = rg.togradient_sobel(img)

    dmin, best = np.inf, None
    for t_x in xrange(-5, 50, 10):
        for t_y in xrange(-50, 50, 10):
            for s in np.arange(0.8, 1.2, 0.1):
                for theta in np.arange(-math.pi/16, math.pi/16, math.pi/16):
                    dists = []
                    X = template.T([t_x, t_y], s, theta)
                    for ind in list(range(15)) + list(range(25,40)):
                        profile = Profile(img, gimg, X, ind, model.k)
                        dist = model.glms[0][ind].quality_of_fit(profile.samples)
                        dists.append(dist)
                    avg_dist = np.mean(np.array(dists))
                    if avg_dist < dmin:
                        dmin = avg_dist
                        best = X

                    Plotter.plot_landmarks_on_image([template, best, X], img, wait=False)

    return best
Example #2
0
def roiEnergyAnalysis(data):
    '''Troubleshooting function, compares the observed sum energy in an ROI to the genEnergy''' 
    genEnergies = [] 
    sumEnergies = []
    pbar = progressbar("Processing event &count&:", len(data)+1)
    pbar.start()
    count = 0
    for event in data:
        genEnergy = event[2]['getpt'] * np.cosh(event[2]['geneta'])         
        for i in range(len(genEnergy)):
            clustersIndices = np.compress(event[1]['ROI'] == i, event[1]['clusterID'], axis=0)      #|Only take clusters corresponding to right ROI
            clusterEnergies = []
            for clusterID in clustersIndices:                                                       #|Only take hits corresponding to correct cluster
                hits = np.compress(event[0]['clusterID'] == clusterID, event[0], axis=0) 
                energies = hits['en'] 
                for energy in energies: 
                    clusterEnergies.append(energy)                                                  #|Add the energy to the cluster energies
            ROIEnergy = np.sum(clusterEnergies)
            # Append to original lists
            genEnergies.append(genEnergy[i])
            sumEnergies.append(ROIEnergy)
        pbar.update(count)
        count += 1
    pbar.finish()
    # np.save("sums.npy", sumEnergies)
    # np.save("gens.npy", genEnergies)
    # Plot it
    Plotter.sumEnergyVsGenEnergy(sumEnergies, genEnergies) 
Example #3
0
 def run(self):
     
     # Check if Cassandra is running
     if self.isCassandraRunning():
         myLogger.info( 'An running Cassandra instance is found')
         
         self.startLoggingJmx()
         
         # Runs the external tool Cassandra Stress
         myThread = threading.Thread(target = self.runCassandraStress)
         myThread.start()
         
         # Begins recording JMX Metrics 
         count = 0
         while True:
             time.sleep(self._interval)
             count += 1
             self.logJmx(count)
             
             # Once the stress session has completed stop recording JMX Metrics
             if not myThread.isAlive():
                 break
             
         myLogger.info( 'Finish logging JMX metrics')
         self.stopLoggingJmx()
         
         # Record the metrics back into a Cassandra Table
         CassandraRecord.recordCsv(self._jmxLogFilename, self._host, 'JmxKeyspace', 'JmxRecord')
         
         # Graph the results
         Plotter.plotCsv(self._jmxLogFilename , self._jmxPlotFilename)
         
     else:
         myLogger.error( 'Cassandra instance is not found running')
Example #4
0
def create_profile():
    lm = Landmarks('Data/Landmarks/original/landmarks1-1.txt')
    img = rg.load()
    img = rg.enhance(img[0])
    grad_img = rg.togradient_sobel(img)
    profile = Profile(img, grad_img, lm, 30, 40)
    Plotter.plot_profile(grad_img, profile)
Example #5
0
def build_grey_level_model():
    lms = landmarks.load(2)
    images = rg.load()
    images = [rg.enhance(img) for img in images]
    gimages = [rg.togradient_sobel(img) for img in images]

    glm = GreyLevelModel()
    glm.build(images, gimages, lms, 10, 20)
    Plotter.plot_grey_level_model(glm, gimages)
Example #6
0
def align_shapes_T():
    X = Landmarks('Data/Landmarks/original/landmarks1-1.txt')
    Y = X.T(np.asarray([-12, -12]), 1.1, -math.pi/2)
    Plotter.plot_landmarks([X, Y])
    t, s, theta = procrustes_analysis.align_params(X, Y)
    print "Translation: [" + ", ".join(str(f) for f in t) + "] should be [-12, -12]"
    print "Rotation: " + str(theta) + " should be 1.6"
    print "Scale: " + str(s) + " should be 1.1"
    Z = Y.invT(np.asarray(t), s, theta)
    Plotter.plot_landmarks([X, Z])
Example #7
0
def align_shapes():
    X = Landmarks('Data/Landmarks/original/landmarks1-1.txt')
    Y = X.translate([-1, 6])
    Y = Y.rotate(math.pi)
    Y = Y.scale(1.2)
    Plotter.plot_landmarks([X, Y])
    t, s, theta = procrustes_analysis.align_params(X, Y)
    print "Translation: [" + ", ".join(str(f) for f in t) + "] should be [-1, 6]"
    print "Rotation: " + str(theta) + " should be pi"
    print "Scale: " + str(s) + " should be 1.2"
Example #8
0
def auto_init():
    imgs = rg.load()
    for radiograph in range(0, 14):
        img = imgs[radiograph]
        X = []
        for tooth in range(1, 9):
            model = IncisorModel.load(tooth)
            X.append(ai.init(model, img))
        Plotter.plot_landmarks_on_image(X, img, show=False, save=True,
                                        title='Autoinit/%02d' % (radiograph,))
Example #9
0
def fit(radiograph_index, incisor_ind, fit_mode, k, m, out):
    """Find the region of an incisor, using the given parameters.

    Args:
        radiograph_index (int): The index of the dental radiograph to fit on.
        incisor_ind (int): The index of the incisor to fit.
        fit mode (AUTO|MANUAL): Wheter to ask for a manual initial fit, or try
            to find one automatically.
        k (int): Number of pixels either side of point to represent in grey model.
        m (int): Number of sample points either side of current point for search.
        out (str): The location to store the result.
    """
    # leave-one-out
    train_indices = range(0, 14)
    train_indices.remove(radiograph_index-1)

    lms = landmarks.load_mirrored(incisor_ind)
    test_lm = lms[radiograph_index-1]
    train_lms = [lms[index] for index in train_indices]

    imgs = rg.load()
    test_img = imgs[radiograph_index-1]
    train_imgs = [imgs[index] for index in train_indices]

    # train
    model = IncisorModel(incisor_ind)
    model.train(train_lms, train_imgs, k)

    # fit
    X = model.estimate_fit(test_img, fit_mode)
    X = model.fit(X, test_img, m)

    # evaluate
    ## show live
    Plotter.plot_landmarks_on_image([test_lm, X], test_img, wait=False)
    ## save image with tooth circled
    img = test_img.copy()
    colors = [(255, 0, 0), (0, 255, 0)]
    for ind, lms in enumerate([test_lm, X]):
        points = lms.as_matrix()
        for i in range(len(points) - 1):
            cv2.line(img, (int(points[i, 0]), int(points[i, 1])),
                     (int(points[i + 1, 0]), int(points[i + 1, 1])),
                     colors[ind])
    cv2.imwrite('%s/%02d-%d.png' % (out, radiograph_index, incisor_ind,), img)
    ## save tooth region segmented
    height, width, _ = test_img.shape
    image2 = np.zeros((height, width), np.int8)
    mask = np.array([X.points], dtype=np.int32)
    cv2.fillPoly(image2, [mask], 255)
    maskimage2 = cv2.inRange(image2, 1, 255)
    segmented = cv2.bitwise_and(test_img, test_img, mask=maskimage2)
    cv2.imwrite('%s/%02d-%d-segmented.png' % (out, radiograph_index, incisor_ind,), segmented)
Example #10
0
def auto_fit():
    # parameters
    m = 15

    imgs = rg.load()
    for radiograph in range(0, 14):
        img = imgs[radiograph]
        Xs = []
        for tooth in range(1, 9):
            model = IncisorModel.load(tooth)
            X = model.estimate_fit(img, MODE_FIT_AUTO)
            X = model.fit(X, img, m)
            Xs.append(X)
        Plotter.plot_landmarks_on_image(Xs, img, show=False, save=True,
                                        title='Autofit/%02d' % (radiograph,))
Example #11
0
    def getInfo(self, service, date1, date2, span):

        #get the data
        data = SQLclient.getStats(self.cursor,service, date1, date2, span)

        #format data
        names = Plotter.plotCCinfo(data,date1,date2,span)
        return names
Example #12
0
    def getNewPie(self, span):

        #get the data from the DB
        data = SQLclient.getNewPieData(self.cursor, span)

        #format the data for Highcharts
        pie_gogn = Plotter.prepare_pie(data)
        return pie_gogn
Example #13
0
    def plot(self,wlrange=None, points=500, showdata=False):
        '''
        Plot the material refractive index
        * wrange is the wavelength range
        * points is the number of points to use in the plot
        '''
        if showdata:
            import Plotter as pl
            if wlrange:
                selectr = nonzero((min(wlrange)<self.wlr) & (self.wlr<max(wlrange)))
                selecti = nonzero((min(wlrange)<self.wli) & (self.wli<max(wlrange)))
            else:
                selectr = selecti = slice(None)
            
            pl.plot(self.wlr[selectr], self.nir[selectr],'x')
            pl.plot(self.wli[selecti], self.nii[selecti],'.')

        #Do default plot as well
        Material.plot(self,wlrange,points)
def getTrendsPlot(file_name,country,product,relative =1,absolute=50000,market=.15,trendline = "All"):
    
    final_trends = getTrends(file_name, country, product, relative, absolute, market, trendline)
    
    call =[]
    
    for trend in final_trends:
#         print(trend,final_trends[trend])
        call.append(trend.split(","))
    return Plotter.plot(country,call,main)
Example #15
0
def timeSmearingTest():
    '''Iterator function to run a resolution analysis on all of the data available.'''
    data           = np.load("Data/500GeVPhoton.npy")
    res            = 50
    diffsList      = np.array([])
    smearingValues = np.array([])
    num            = 0
    pbar           = progressbar("Computing &count&:", res*len(data))
    pbar.start()
    for smearingVal in np.linspace(0,50,res):
        data           = np.load("Data/500GeVPhoton.npy")                                           #|Quick and dirty reload, numpy was doing funky shit
        diffs          = vertexData(timeSmearing(data, smearingVal), runNumber=num, pbar=pbar, quiet=True)
        diffsList      = np.append(diffsList, diffs)
        smearingValues = np.append(smearingValues, np.repeat(smearingVal, len(diffs)))    
        num            += 1
    pbar.finish()
    np.save("diffs.npy", np.multiply(10,diffsList))
    np.save("vals.npy", smearingValues)
    Plotter.tVertexErrorHist2D(np.multiply(10,diffsList), smearingValues)
class Processor(object):

    def __init__(self):
        self.filer = FileHandler()
        self.validator = Validator()
        self.database = Database()
        self.editor = Editor()
        self.plotter = Plotter()

    def add_data(self, fileloc):
        self.database.empty_database()
        self.filer.set_filepath(fileloc)
        self.filer.load_file()
        self.filer.strip_tags()
        self.validator.set_raw_data(self.filer.export())
        self.validator.parse_data()
        self.database.add_people(self.validator.export_good_data())

    def process_bad(self):

        if self.validator.has_bad_data():
            self.editor.set_raw(self.validator.export_bad_data())
            self.editor.edit()
            self.database.add_people(self.editor.export_good_data())

    def set_file_path(self, new_path):
        self.database.set_directory(new_path)

    def get_file_path(self):
        return self.database.get_directory()

    def serialize(self, option):
        self.database.serialize(option)

    def deserialize(self, option):
        self.database.empty_database()
        self.database.deserialize(option)

    def pie_bmi(self):
        dist = self.database.get_bmi_distribution()
        self.plotter.pie_bmi(dist["normal"], dist["overweight"], dist["obese"], dist["underweight"])

    def pie_gender(self):
        dist = self.database.get_gender_distribution()
        self.plotter.pie_gender(dist["males"], dist["females"])

    def scatter_sales(self):
        sales_list = self.database.get_sales_ordered()
        self.plotter.scatter_sales(sales_list)

    def bar_bmi_vs_gender(self):
        self.plotter.bar_bmi_vs_gender(self.database.get_male_bmi(),self.database.get_female_bmi() )
Example #17
0
def three_model_venn(model_identities, filename, genes=False, morphed_model=None, title=None):
    rxn_sets = []
    for m in model_identities:
        rxn_sets.append(set([r.rxn_id() for r in FBAModel(m[0], m[1]).get_reactions()]))
    gene_dict = None
    if genes:
        rxn_analysis = common_reaction_analysis(model_identities)
        genes = gene_percents(morphed_model, rxn_analysis)
        gene_dict = dict()
        for model_set in genes:
            key = 0
            for i in range(0, 3):
                if model_identities[i][2] in model_set:
                    key += int(math.pow(10, i))
            key = str(key)
            while len(key) < 3:
                key = '0' + key
            value = '{0:.3g}'.format(genes[model_set])
            gene_dict[key] = str(value) + '%'



    Plotter.venn3(rxn_sets, title, 'Reaction', filename, set_labels=[m[2] for m in model_identities], annotation=gene_dict)
Example #18
0
def run0():

    print(os.getcwd())
    os.chdir('data')
    print(os.getcwd())
    print(dir(md))

    dt = 0.005
    ucells = 10
    sigma = 1
    N = ucells * ucells
    dens = 0.1
    nu = 0.3
    L = math.sqrt(N / dens)
    print(L)
    T = 2.0
    v0 = math.sqrt(2.0 * T)
    eps = np.array([1.0])
    rc = sigma * math.pow(2, 1.0 / 6.0)
    rc = 2.5
    rcut = np.array([rc])
    shift = np.array([0.0])
    E = 200
    k = 50

    pdb.set_trace()

    md.system_init(np.array([L, L]))
    fm.init_particles_simple_cubic(md, ucells, L)
    lbox = md.system_get_box()
    print("box", lbox)
    md.system_set_boundary_conditions(2)
    md.system_set_velocities(v0)
    md.system_set_zero_center_of_mass_vel()
    md.system_set_dt(dt)
    md.system_set_avg_steps(100)
    md.system_set_potential(eps, rcut, shift)
    md.system_init_neighbor_list()
    pos = md.system_get_positions1()
    vel = md.system_get_velocities()

    #upos = md.system_get_unfolded_positions()
    #tet = md.system_get_tetras()
    #bonds = md.system_get_bonds()

    #sigmas = md.system_get_particle_sizes()
    #sigmas *= sigma
    #R = R - 0.5 + sigma/2

    #xyzfile = fm.Saver(0,"test_test",pos)
    #vtkfile = fm.Saver(1,"test_test",pos,vel=vel,cells=tet)
    #vtffile = fm.Saver(2,"test_test",upos,bonds=bonds,box=[lbox[0],lbox[1]])
    #vtffile.append(0);

    plot = Plotter.Plotter(pos, vel, None, lbox[0], lbox[1], 'pts',
                           sigma)  # 2 is the radius
    plot.update(0)
    #r = 0.0
    #md.system_set_walls_moving_rate(r,r)
    #pdb.set_trace()
    for i in range(1000):
        md.system_run(100)
        avgs = md.system_get_avgs()
        print(avgs)
        #xyzfile.append(avgs[0])
        #vtkfile.append(avgs[0])
        #vtffile.append(avgs[0])
        #md.system_unfold_positions()
        plot.update(avgs[0])

    print("finished")
    xMat = getXMatrix(xlist, Order)
    xFineMat = getXMatrix(xFineList, Order)
    Wlist = getOptimalW(xMat, tlist)

    print("Weigths are: " + str(Wlist.reshape(-1)))

    err = Error(Wlist, xMat, tlist)
    errorList.append(err)
    print("Error is: " + str(err))

    err = getLogLiErr(Wlist, xMat, tlist, 11.1)
    lierrorList.append(err)
    print("Log Error is: " + str(err))

    axarr = plt.createaxis()
    plt.setupaxis(axarr, "X Values", "Y Values",
                  "Plot of least squares curve fitting for M=" + str(Order))
    plt.plot(xlist, tlist, axarr, graphtype="scatter")
    plt.plot(xFineList, f_WandX(Wlist, xFineMat), axarr)
    plt.showOutput(FileName="Q3P1Order_" + str(Order))
    #plt.showOutput()

axarr = plt.createaxis()
plt.setupaxis(axarr, "M value", "Error", "Plot of error versus M")
plt.plot(range(0, 10), errorList, axarr)
plt.showOutput(FileName="ErrvsM")
#plt.showOutput()

axarr = plt.createaxis()
plt.setupaxis(axarr, "M value", "Log Error", "Plot of log error versus M")
ncampaigns=3
c1 = Campaign(a1, nUsers=1000.0, probClick=0.5, convParams= convparams)
c2 = Campaign(a2, nUsers=1500.0, probClick=0.6, convParams= convparams)
c3 = Campaign(a3, nUsers=1500.0, probClick=0.6, convParams= convparams)
c4 = Campaign(a2, nUsers=1000.0, probClick=0.5, convParams= convparams)
c5 = Campaign(a4, nUsers=1250.0, probClick=0.4, convParams= convparams)


env = Environment([c1,c2,c3])
nBids=10
nIntervals=10
deadline = 2
maxBudget = 100
agent = Agent(1000, deadline, ncampaigns,nIntervals,nBids,maxBudget)
agent.initGPs()
plotter = Plotter(agent=agent,env=env)

# mi creo una lista con tutte le matrici dell'oracolo di ogni campagna
listMatrices = list()
for i in range(0,ncampaigns):
    matrix = plotter.oracleMatrix(indexCamp=i,nsimul=10)
    listMatrices.append(matrix)
    if i==0:
        optMatrix = np.array([matrix.max(axis=1)])
    else:
        maxrow = np.array([matrix.max(axis=1)])
        optMatrix = np.concatenate((optMatrix,maxrow))


[newBudgets,newCampaigns] = agent.optimize(optMatrix)
Example #21
0
plotType = 'Slope'


#filename = '/uscms/home/wnash/CSCUCLA/CSCPatterns/dat/Charmonium/charmonium2016F+2017BCEF/SingleMuon/zskim2018D/CLCTMatch-Full.root'
filename = '~/workspace/CSCUCLA/CSCPatterns/dat/Charmonium/charmonium2016F+2017BCEF/SingleMuon/zskim2018D/CLCTMatch-Full.root'

f = r.TFile(filename)
h_new = f.Get('h_lutSegment'+plotType+'Diff')
h_new.GetXaxis().SetTitle('Segment - LUT [strips]')
h_new.GetYaxis().SetTitle('Segments')

h_old = f.Get('h_legacy'+plotType+'Diff')


new = p.Plot(h_new, legType='l')
new.legName='#splitline{#bf{Comparator Codes}}{#splitline{Entries: %i}{Width: %5.4f}}'%(h_new.GetEntries(),h_new.GetStdDev())


old = p.Plot(h_old, legType='l')
old.legName='#splitline{#bf{Current Patterns}}{#splitline{Entries: %i}{Width: %5.4f}}'%(h_old.GetEntries(), h_old.GetStdDev())


can = p.Canvas(lumi='')

can.addMainPlot(new, color=r.kBlack)
can.addMainPlot(old, color = r.kRed)

#can = p.Canvas(lumi='')
#slopes = p.Plot(slopes_h,legType= 'l',legName='#splitline{#bf{Entries}: %i}{#bf{UFlow}: %i #bf{OFlow}: %i}'\
#                %(slopes_h.GetEntries(), slopes_h.GetBinContent(0), slopes_h.GetBinContent(slopes_h.GetNbinsX()+1)), option='hist')
Example #22
0
def load_landmarks_and_plot_on_image(examples=range(1, 15), save=False):
    for nr in examples:
        lms = landmarks.load_all_incisors_of_example(nr)
        img = cv2.imread('Data/Radiographs/'+str(nr).zfill(2)+'.tif')
        Plotter.plot_landmarks_on_image(lms, img, save=save, title='Radiograph%02d' % (nr,))
hCoeff[temp] = 1 - 2 * FL / FS

hCoeff = hCoeff * window

#Respuesta en frecuencia
w, h = signal.freqz(hCoeff)
f = FS * w / (2 * np.pi)

#Generar senal seno para comprobar el funcionamiento del filtro
f0 = 500

n = np.arange(0, 100, 1)
t = n * TS
x = np.sin(2 * np.pi * f0 * t)

#filtrar la señal
y = signal.lfilter(hCoeff, [1.0], x)

#Crear Graficas de la simulación
fig, ax = plt.subplots(3)

#graficar coeficientes
Plotter.myPlotter(ax[0], np.arange(0, len(hCoeff), 1), hCoeff, stem=True)
#Graficar respuesta en frecuencia
Plotter.myPlotter(ax[1], f, abs(h))
#Graficar señal de entrada y salida del filtro
Plotter.myPlotter(ax[2], t, x, {'color': 'blue'})
Plotter.myPlotter(ax[2], t, y, {'color': 'green'})

Plotter.myPlotterShow(fig)
Example #24
0
def main(self):
    # self.df = _load_df2(self.df, directory,'14-11')
    self._clean_df()
    d, h, m, s = slt.get_total_recording_time(self.df)
    print("total recorded time: {:.2f}d, {:.0f}h, {:.0f}m, {:.0f}s".format(
        d, h, m, s))
    select = [
        2,
        41235,
        # 41236,
        # 41237,
        # 41238,
        # # 41239,
        # 41240,
        # 41241,
        # 41242,
        # # 41243,
        # 41244,
        # 41245,
        # 41264,
        # 41265,
        # # 41266,
        # 41267,
        # 41268,
        # 41269,
        # 41270,
        # 41271
    ]

    # self.df = slt.select_certain_shows(self.df, 'channel_id', select)
    self.df = slt.shows_with_min_time(self.df, 30)
    self.df = slt.drop_unneeded_columns_and_rows(self.df, nan='drop')
    dupes = slt.check_for_duplicates(self.df)
    self.df = slt.drop_shows_from_channel(self.df, 4)
    # self.df = slt.drop_vl(self.df)
    self.df, no = slt.select_shows_w_x_instances(self.df, 5, keep='all')

    channel_target = self.df['channel_id']
    show_target = self.df['show_id']
    name_target = self.df['file_name']
    recording_target = self.df['recording_id']
    target = channel_target
    print(target.shape)
    select = 'channel_id'
    # no = target.unique().size

    self.df.drop(
        ['file_name', 'show_id', 'recording_id', 'channel_id', 'length'],
        axis=1,
        inplace=True)

    self.df = self.df.reset_index()
    self._run_scaler()

    ## PCA
    X_kpca = PCA.run_kpca(self.df, 4)
    # X_kpca2d = PCA.run_kpca(self.df, 2)
    # PCA.get_vip_feature_count(self.df, 15, 300)
    # PCA.print_cumsum_trend(self.df, 100, ker='linear')
    # PCA.print_cumsum_trend(self.df, 100)

    ## TSNE
    perplexity = 110
    X_tsne = tsne.run_tsne(X_kpca, 2, perplexity)
    # X_tsne_300 = tsne.run_tsne(X_kpca, 2, 300)
    # plt.plot_real_clusters(X_tsne_300, channel_target)
    # tsne.tsne_perplexity_test(X_kpca, channel_target , 2)
    name = directory + "tSNE/" + "tSNE_>30min_>5x" + ".pkl"

    # self._save_arr(X_tsne, name)
    # data = self._load_arr(name)
    data = X_tsne

    # plt.plot_genre_clusters(data, channel_target)
    # plt.plot_genre(data, channel_target, 1)
    # plt.plot_each_genre(data, channel_target)
    # plt.plot_real_clusters(X_kpca, show_target, label='show', title=None)
    # plt.plot_clusters(data, hdbscan.HDBSCAN, (), {'min_cluster_size':10})
    plt.plot_with_annotation(data, channel_target, show_target)

    # KNN
    # knn._get_nearest_neigbours(X_kpca, y, 4)
    # knn._get_nearest_neigbours(X_tsne2d, y, 4)
    # knn._kkn_cross_validation(X_kpca, y)
    # knn._kkn_cross_validation(X_tsne2d, y)
    # knn.get_neigh(X_tsne, 15, 3)
    # knn._knn_graph(X_tsne, y, 3)
    # knn.draw_roc(X_tsne2d, y, 9)

    # show = 620
    # nearestn = knn.get_neigh(X_kpca, show, 10)
    # print("knn kpca")
    # print(odf.iloc[nearestn, 0:4])

    ## Archetype
    n_arch = 4
    X_train_minmax = np.rot90(data, -1)
    XC, S, C, SSE, varexpl = pcha.PCHA(X_train_minmax, n_arch)
    X_train_minmax = np.rot90(X_train_minmax, 1)
    XC = np.array(XC)
    plt.plot_archetypes(X_train_minmax,
                        channel_target,
                        XC,
                        label='channel',
                        title="Archetypes with tSNE")
    # plt.plot_real_clusters(X_tsne, channel_target, label='channel', title=None)

    a = arch.Archetypes(self.df)
    archetypesList = a.findAllArchetypes()
Example #25
0
def build_asm():
    inc = 1
    lms = landmarks.load_mirrored(inc)
    asm = ASM(lms)
    import pdb; pdb.set_trace()
    Plotter.plot_asm(asm, incisor_nr=inc, save=True)
Example #26
0
import ROOT as r
import sys
sys.path.append("../")
import Plotter as p

writePath = '~/Documents/Presentations/2018/181128-Multiplicity/'

f = r.TFile('../../dat/SingleMuon/zskim2018D/Multiplicity-Full.root')
#f = r.TFile('../../dat/SingleMuon/zskim2018D/Multiplicity-184.root')

suffixes = ['', '_1', '_2', '_3', '_4', '_5']

for suffix in suffixes:
    can = p.Canvas(lumi='')
    energyVsP = p.Plot('h_energyPerChamberVsP' + suffix,
                       f,
                       '',
                       legType='l',
                       option='colz')
    #if len(suffix):
    #    energyVsP.SetTitle('Average Energy Deposited Over '+suffix+' Chambers')
    energyVsP.GetYaxis().SetRangeUser(-20000, -2000)
    #energyVsP.GetXaxis().SetRangeUser(1,800)

    for i in range(0, energyVsP.GetNbinsX() + 1):
        print "=== New P Bin ==="
        norm = 0
        P = energyVsP.GetXaxis().GetBinCenter(i)

        for j in range(0, energyVsP.GetNbinsY() + 1):
            norm += energyVsP.GetBinContent(i, j)
Example #27
0
                  unit='simulations'):
        results.append(simulate(country, n_days, False))
    results = collate(results, func=np.stack)

    return results


if __name__ == "__main__":
    ### SINGLE SIMULATION
    copenhagen = Region('Copenhagen', population_size, sampler, I_initial)
    country = Country([copenhagen], hospital_beds)
    result = simulate(country)

    if plot_data:
        # %% Plotting
        Plotter.plot_fatalities(result['R_dead'])
        plt.show()

        if SIR:
            sick_people_on_hospital_fraction = 0.1
            Plotter.plot_hospitalized_people(
                result['I_symp'] * sick_people_on_hospital_fraction,
                hospital_beds)
        else:
            Plotter.plot_hospitalized_people(result['I_crit'], hospital_beds)
        plt.show()

        # Plotting
        Plotter.plot_fatalities(result['R_dead'])
        plt.show()
 def _createPlotter(self):
     if self._plt is None:
         self._plt = P.Plotter()
         self._pltReady.set()
    def __fit_one_level(self, X, testimg, glms, m, max_iter):
        """Fit the model for one level of the image pyramid.
        """
        # Prepare test image
        img = rg.enhance(testimg)
        gimg = rg.togradient_sobel(img)

        # 0. Initialise the shape parameters, b, to zero (the mean shape)
        b = np.zeros(self.asm.pc_modes.shape[1])
        X_prev = Landmarks(np.zeros_like(X.points))

        # 4. Repeat until convergence.
        nb_iter = 0
        n_close = 0
        best = np.inf
        best_Y = None
        total_s = 1
        total_theta = 0
        while (n_close < 16 and nb_iter <= max_iter):

            with Timer("Fit iteration %d" % (nb_iter,)):
                # 1. Examine a region of the image around each point Xi to find the
                # best nearby match for the point
                Y, n_close, quality = self.__findfits(X, img, gimg, glms, m)
                if quality < best:
                    best = quality
                    best_Y = Y
                Plotter.plot_landmarks_on_image([X, Y], testimg, wait=False,
                                                title="Fitting incisor nr. %d" % (self.incisor_nr,))

                # no good fit found => go back to best one
                if nb_iter == max_iter:
                    Y = best_Y

                # 2. Update the parameters (Xt, Yt, s, theta, b) to best fit the
                # new found points X
                b, t, s, theta = self.__update_fit_params(X, Y, testimg)

                # 3. Apply constraints to the parameters, b, to ensure plausible
                # shapes
                # We clip each element b_i of b to b_max*sqrt(l_i) where l_i is the
                # corresponding eigenvalue.
                b = np.clip(b, -3, 3)
                # t = np.clip(t, -5, 5)
                # limit scaling
                s = np.clip(s, 0.95, 1.05)
                if total_s * s > 1.20 or total_s * s < 0.8:
                    s = 1
                total_s *= s
                # limit rotation
                theta = np.clip(theta, -math.pi/8, math.pi/8)
                if total_theta + theta > math.pi/4 or total_theta + theta < - math.pi/4:
                    theta = 0
                total_theta += theta

                # The positions of the model points in the image, X, are then given
                # by X = TXt,Yt,s,theta(X + Pb)
                X_prev = X
                X = Landmarks(X.as_vector() + np.dot(self.asm.pc_modes, b)).T(t, s, theta)
                Plotter.plot_landmarks_on_image([X_prev, X], testimg, wait=False,
                                                title="Fitting incisor nr. %d" % (self.incisor_nr,))

                nb_iter += 1

        return X
import ROOT as r
import Plotter as p


writePath ='~/Documents/Presentations/2018/181026-3LayerEff/'

f = r.TFile('../data/SingleMuon/zskim2018D-full/CLCTLayerAnalysis-Full.root')
print f
if not hasattr(f, 'IsOpen'):
    print "can't open file"
    exit()

can = p.Canvas(True,lumi='')

mep11a ='me_p11a_11'
mep11b ='me_p11b_11'
mem11a ='me_m11a_11'
mem11b ='me_m11b_11'

chambers =[mep11a,mep11b,mem11a,mem11b]

for chamber in chambers:
    me11 = p.Plot('h_allLctQuality_'+chamber,f,'',legType = 'l',option='hist') 
    if chamber is mep11a: me11.legName = '#splitline{#bf{ME+11A}}{Entries:%i}'%me11.GetEntries()
    if chamber is mep11b: me11.legName = '#splitline{#bf{ME+11B}}{Entries:%i}'%me11.GetEntries()
    if chamber is mem11a: me11.legName = '#splitline{#bf{ME-11A}}{Entries:%i}'%me11.GetEntries()
    if chamber is mem11b: me11.legName = '#splitline{#bf{ME-11B}}{Entries:%i}'%me11.GetEntries()
    if chamber is mep11a or chamber is mem11a:
        can.addMainPlot(me11, color=r.kBlack)
    else:
        can.addMainPlot(me11,color=r.kRed)
Example #31
0
import ROOT as r
import sys
sys.path.append("../")
import Plotter as p

f = r.TFile('../../dat/SingleMuon/zskim2018D/emulation.root')
can = p.Canvas(True, lumi='')

emu_h = f.Get('emulatedLayerCount')
real_h = f.Get('realLayerCount')
# '#splitline{#bf{ME+1/1/11A}}{Entries:%i}'%me11.GetEntries()

emu = p.Plot(emu_h,
             legName='#splitline{#bf{Emulation}}{Entries:%i}' %
             emu_h.GetEntries(),
             legType='l',
             option='hist')
real = p.Plot(real_h,
              legName='#splitline{#bf{Real}}{Entries:%i}' %
              real_h.GetEntries(),
              legType='l',
              option='hist')
can.addMainPlot(emu, color=r.kBlue, addS=True)
can.addMainPlot(real, color=r.kBlack, addS=True)
#pt.GetXaxis().SetRangeUser(60.,120.)
can.makeLegend(pos='tl')

# r.gStyle.SetStatX(0.35)
# r.gStyle.SetStatY(0.85)
# r.gStyle.SetOptStat(111110)
# can.makeStatsBox(match)
Example #32
0
hCoeff[temp]=2*FH/FS-2*FL/FS

hCoeff=hCoeff*window

#Respuesta en frecuencia
w, h = signal.freqz(hCoeff)
f = FS*w/(2*np.pi)

#Generate Input
t=np.linspace(0,8*np.pi,1000)
x=np.sin(t)
noise=np.random.normal(0,1,1000)
x=x+noise
x=x/max(x)

d=signal.lfilter(hCoeff,[1.0],x)

# identification
f = pa.filters.FilterLMS(n=N, mu=0.4, w="random")
y, e, w = f.run(d, x)

#######Plotting functions#########
fig,ax =plt.subplots(3)

#graficar coeficientes
Plotter.myPlotter(ax[0],np.arange(0,len(hCoeff),1),hCoeff,stem=True)
#Graficar respuesta en frecuencia
Plotter.myPlotter(ax[1],t,x)
Plotter.myPlotter(ax[2],t,d)

Plotter.myPlotterShow(fig)
inducer_gradient__ = []
param1_wellcontent__ = []
param2_wellcontent__ = []
param3_wellcontent__ = []
param4_wellcontent__ = []

for keys in plate_to_excel:
    selection_gradient__.append(selection_gradient_.conc_at(keys))
    inducer_gradient__.append(inducer_gradient_.conc_at(keys))
    
    param1_wellcontent__.append(param1_wellcontent_.conc_at(keys))
    param2_wellcontent__.append(param2_wellcontent_.conc_at(keys))
    param3_wellcontent__.append(param3_wellcontent_.conc_at(keys))
    param4_wellcontent__.append(param4_wellcontent_.conc_at(keys))

heat_map_param1 = Plotter('param1', selection_gradient__, inducer_gradient__, param1_wellcontent__)
heat_map_param1.plot()

heat_map_param2 = Plotter('param2', selection_gradient__, inducer_gradient__, param2_wellcontent__)
heat_map_param2.plot()

heat_map_param3 = Plotter('param3', selection_gradient__, inducer_gradient__, param3_wellcontent__)
heat_map_param3.plot()

heat_map_param4 = Plotter('param4', selection_gradient__, inducer_gradient__, param4_wellcontent__)
heat_map_param4.plot()


exit()

D3 = TimeCourse('D3',expt1.extract_timecourse('D3'))
Example #34
0
#Width = 1920
Fov = 60.87  # Camera's field of view in degrees
'''cam_holder = camVideoStream(0,30,Width,Height)
cam_holder.start()'''
cam_holder = cv2.VideoCapture(
    'C:\\Users\\David\\Desktop\\DynExp\\NewVideos\\T11.mp4')
record_count = 0

division_bin = 2  # To differentiate points
max_uniaxial_bins = 2  # This number squared will divide the clusters of data
threshold_min = 0.05
threshold_max = division_bin * max_uniaxial_bins + threshold_min

Camera_Distance = 1.6  # Distance from camera to track in meters

if animate: my_plot = Plotter.plotter(200, 200, 50, 50, 2, False)


def compare_velocities(buffer, point_velocities, point_distances, image,
                       movement_rectangles, FOV, Z, W, H):
    velocity_organizer = np.zeros(
        shape=(2 * max_uniaxial_bins + 1, 2 * max_uniaxial_bins + 1,
               2))  # Refresh arrays used in velocity isolation
    position_organizer = np.zeros(shape=(2 * max_uniaxial_bins + 1,
                                         2 * max_uniaxial_bins + 1, 2))
    frequency_organizer = np.zeros(shape=(2 * max_uniaxial_bins + 1,
                                          2 * max_uniaxial_bins + 1))
    extreme_points = np.zeros(shape=(2 * max_uniaxial_bins + 1,
                                     2 * max_uniaxial_bins + 1, 4))
    captured_base = np.zeros(
        shape=(2 * max_uniaxial_bins + 1, 2 * max_uniaxial_bins +
Example #35
0
def rotate_landmarks():
    lm = Landmarks('Data/Landmarks/original/landmarks1-1.txt')
    Plotter.plot_landmarks(lm)
    rotated = lm.rotate(math.pi/2)
    Plotter.plot_landmarks(rotated)
def find_bbox(mean,
              evecs,
              image,
              width,
              height,
              is_upper,
              jaw_split,
              show=False):
    """Finds a bounding box around the four upper or lower incisors.
    A sliding window is moved over the given image. The window which matches best
    with the given appearance model is returned.

    Args:
        mean: PCA mean.
        evecs: PCA eigen vectors.
        image: The dental radiograph on which the incisors should be located.
        width (int): The default width of the search window.
        height (int): The default height of the search window.
        is_upper (bool): Wheter to look for the upper (True) or lower (False) incisors.
        jaw_split (Path): The jaw split.

    Returns:
        A bounding box around what looks like four incisors.
        The region of the image selected by the bounding box.

    """
    h, w = image.shape

    # [b1, a1]---------------
    # -----------------------
    # -----------------------
    # -----------------------
    # ---------------[b2, a2]

    if is_upper:
        b1 = int(w / 2 - w / 10)
        b2 = int(w / 2 + w / 10)
        a1 = int(np.max(jaw_split.get_part(b1, b2), axis=0)[1]) - 350
        a2 = int(np.max(jaw_split.get_part(b1, b2), axis=0)[1])
    else:
        b1 = int(w / 2 - w / 12)
        b2 = int(w / 2 + w / 12)
        a1 = int(np.min(jaw_split.get_part(b1, b2), axis=0)[1])
        a2 = int(np.min(jaw_split.get_part(b1, b2), axis=0)[1]) + 350

    search_region = [(b1, a1), (b2, a2)]

    best_score = float("inf")
    best_score_bbox = [(-1, -1), (-1, -1)]
    best_score_img = np.zeros((500, 400))
    for wscale in np.arange(0.8, 1.3, 0.1):
        for hscale in np.arange(0.7, 1.3, 0.1):
            winW = int(width * wscale)
            winH = int(height * hscale)
            for (x, y, window) in sliding_window(image,
                                                 search_region,
                                                 step_size=36,
                                                 window_size=(winW, winH)):
                # if the window does not meet our desired window size, ignore it
                if window.shape[0] != winH or window.shape[1] != winW:
                    continue

                reCut = cv2.resize(window, (width, height))

                X = reCut.flatten()
                Y = project(evecs, X, mean)
                Xacc = reconstruct(evecs, Y, mean)

                score = np.linalg.norm(Xacc - X)
                if score < best_score:
                    best_score = score
                    best_score_bbox = [(x, y), (x + winW, y + winH)]
                    best_score_img = reCut

                if show:
                    window = [(x, y), (x + winW, y + winH)]
                    Plotter.plot_autoinit(image,
                                          window,
                                          score,
                                          jaw_split,
                                          search_region,
                                          best_score_bbox,
                                          title="wscale=" + str(wscale) +
                                          " hscale=" + str(hscale))

    return (best_score_bbox, best_score_img)
Example #37
0
print(den2)

#Discrete Bode
omega_disc = np.linspace(0.01, np.pi * 3 / 4, 6000)
magD, phaseD = myBode(sys2, omega_disc, fs)

#Continous Bode
magC, phaseC, omega_conti = ct.bode(sys1,
                                    omega_disc * fs,
                                    dB=True,
                                    deg=True,
                                    Plot=False)

#Plotting
fig, axes = plt.subplots(2)
axes[0], axes[1] = mp.myPlotterBodeLabels(axes[0], axes[1])

mp.myPlotterBode(axes[0],
                 axes[1],
                 omega_disc * fs / (2 * np.pi),
                 magD,
                 phaseD,
                 param_dict={'color': 'red'})
mp.myPlotterBode(axes[0], axes[1], omega_conti / (2 * np.pi), magC, phaseC)

#plot 3dB line and 60 deg line
mp.myPlotterBode(
    axes[0],
    axes[1],
    [omega_disc[0] * fs / (2 * np.pi), omega_disc[-1] * fs / (2 * np.pi)],
    [-3, -3], [-60, -60],
def init(model, img, show=False):
    """Find an initial estimate for the model in the given image.

    Args:
        model (Landmarks): The shape which should be fitted.
        img: The dental radiograph on which the shape should be fitted.

    Returns:
        Landmarks: An initial estimate for the position of the model in the given image.

    """
    # Are we fitting a lower or an upper incisor?
    tooth = model.incisor_nr
    is_upper = tooth < 5
    if is_upper:
        # UPPER: Avg. height: 314.714285714, Avg. width: 381.380952381
        width = 380
        height = 315
    else:
        # LOWER: Avg. height: 259.518518519, Avg. width: 281.518518519
        width = 280
        height = 260

    # Create the appearance model for the four upper/lower teeth
    radiographs = rg.load()
    data = load_database(radiographs, is_upper, width, height)
    [_, evecs, mean] = pca(data, 5)

    # Visualize the appearance model
    # cv2.imshow('img',np.hstack( (mean.reshape(height,width),
    #                              normalize(evecs[:,0].reshape(height,width)),
    #                              normalize(evecs[:,1].reshape(height,width)),
    #                              normalize(evecs[:,2].reshape(height,width)))
    #                            ).astype(np.uint8))
    # cv2.waitKey(0)

    # Find the jaw split
    jaw_split = split_jaws(img)

    # Find the region of the radiograph that matches best with the appearance model
    img = rg.enhance(img)
    [(a, b), (c, d)], _ = find_bbox(mean,
                                    evecs,
                                    img,
                                    width,
                                    height,
                                    is_upper,
                                    jaw_split,
                                    show=show)

    # Assume all teeth have more or less the same width
    ind = tooth if tooth < 5 else tooth - 4
    bbox = [(a + (ind - 1) * (c - a) / 4, b), (a + (ind) * (c - a) / 4, d)]
    center = np.mean(bbox, axis=0)

    # Plot a bounding box around the estimated region of the requested tooth
    if show:
        Plotter.plot_autoinit(img, bbox, 0, jaw_split, wait=True)

    # Position the mean shape of the requested incisor inside the bbox
    template = model.asm.mean_shape.scale_to_bbox(bbox).translate(center)

    # The position of the lower incisors is further improved using the grey-level model
    if is_upper:
        X = template
    else:
        X = template
        # X = fit_template(template, model, img)

    # Show the final result
    if show:
        Plotter.plot_landmarks_on_image([X], img)

    # Return the estimated position of the shape's landmark points
    return X
Example #39
0
def run1():

    print(os.getcwd())
    os.chdir('c:\\tmp\\test1')
    print(os.getcwd())
    print(dir(md))

    dt = 0.002
    ucells = 2
    sigma = 2
    N = ucells * ucells
    dens = 0.2
    #nu = 0.5
    nu = 0.3
    dens = nu / np.pi
    L = math.sqrt(N / dens)
    print(L)
    T = 1
    #v0 = math.sqrt(2.0 * T * (1.0-1.0/N))
    v0 = math.sqrt(2.0 * T)
    eps = np.array([1.0])
    rc = sigma * math.pow(2, 1.0 / 6.0)
    rcut = np.array([rc])
    shift = np.array([1.0])
    E = 200
    k = 200

    pdb.set_trace()
    box = np.array([L, L])
    md.system_init(box)
    #nodes = np.loadtxt("diskR1Vertices1.txt");
    #cells = np.loadtxt("diskR1Triangles1.txt") - 1;
    nodes, cells = mdmesh.uniform_mesh_on_unit_circle(.15)
    #L,R = fm.add_clusters(md,nodes,cells,1.0,ucells,nu,'sc1')
    L, R = fm.add_binary_clusters(md, nodes, cells, 1.0, 1.0, .15, ucells, nu,
                                  'sc')
    box = md.system_get_box()
    md.system_set_boundary_conditions(0)
    md.system_set_moving_walls(0)
    md.system_set_velocities(v0)
    md.system_set_zero_center_of_mass_vel()
    md.system_set_dt(dt)
    md.system_set_avg_steps(100)
    md.system_set_potential(eps, rcut, shift)
    md.system_set_youngs_modulus(E)
    #md.system_set_swelling_energy_constants(100,250,0.0)
    md.system_set_friction(.1)
    #md.system_set_bond_spring_constant(k)
    md.system_init_neighbor_list()
    pos = md.system_get_positions()
    vel = md.system_get_velocities()
    upos = md.system_get_unfolded_positions()
    tet = md.system_get_tetras()

    #plt.scatter(pos[:,0],pos[:,1])
    #plt.show()
    #plt.triplot(pos[:,0], pos[:,1], tet, 'go-', lw=1.0)
    bonds = md.system_get_bonds()
    refVol, currVol = md.system_get_elements_volumes()
    I1, I2 = md.system_get_elements_invariants()
    print("Volume check: ", refVol.shape[0], currVol.shape[0], np.sum(refVol),
          np.sum(currVol))

    sigmas = md.system_get_particle_sizes()
    sigmas *= sigma
    R = R - 0.5 + sigma / 2
    n1 = 0.79
    L1 = np.sqrt(ucells**2 * np.pi * R**2 / n1)
    nf = 0.95
    Lf = np.sqrt(ucells**2 * np.pi * R**2 / nf)
    print(L, Lf, R)

    pdb.set_trace()
    #xyzfile = fm.Saver(0,"test_test",pos)
    vtkfile = fm.Saver(1, "test_test", pos, vel=vel, cells=tet, I2=I2)
    #vtffile = fm.Saver(2,"test_test",upos,bonds=bonds,box=[Lx,Lx])
    vtkfile.append(0)
    walls = md.system_get_walls_pos()
    print("walls", walls)
    walls = [walls[0][0], walls[1][0], walls[0][1], walls[1][1]]
    print("walls", walls)
    plot = Plotter.Plotter(pos, vel, tet, box[0], box[1], 'tripts1', I1,
                           sigma)  # 2 is the radius
    plot.update(0, walls, "")
    #plt.show()
    r = Lf / 800
    r00 = np.array([1.01 * r, r])
    r10 = np.array([-1.01 * r, r])
    r0 = np.array([r, r])
    r1 = np.array([-r, -r])
    r02 = np.array([-r, r])
    r12 = np.array([r, -r])
    r_stop = np.array([0, 0])
    data = []
    totRefVol = np.sum(refVol)
    totCurrVol = np.sum(currVol)
    md.system_set_moving_walls(0)
    md.system_set_walls_moving_rate(r0, r1)
    pdb.set_trace()
    shear = 0

    Ws = 0
    for Ws in np.arange(0, 0, 100):
        md.system_run(100)
        avgs = md.system_get_avgs()
        print("\nstep %d" % avgs[0])
        print(avgs)
        title = "steps = %d" % (avgs[0])
        #if Ws % 10 == 0:
        #Ws += 10
        md.system_set_swelling_energy_constants(100, Ws, 0.0)
        plot.update(avgs[0], walls, title)
        #vtkfile.append(avgs[0])

    md.system_set_friction(1)
    md.system_set_moving_walls(1)
    md.system_set_walls_moving_rate(r0, r1)
    for i in range(10000):
        md.system_run(100)
        avgs = md.system_get_avgs()
        walls = md.system_get_walls_pos()
        walls = [walls[0][0], walls[1][0], walls[0][1], walls[1][1]]
        print("\nstep %d" % avgs[0])
        print("walls", walls)
        Lx = walls[1] - walls[0]
        Ly = walls[3] - walls[2]
        print("Box Vol = ", Lx * Ly)

        if i == 20:
            md.system_set_walls_moving_rate(r00, r10)
        if Ly / Lx > 1.1 and shear == 0:
            md.system_set_walls_moving_rate(r0, r1)
        if totCurrVol / totRefVol < 1.5 and i > 20:
            shear = 1
        if shear == 1:
            c = Lx / Ly
            r02 = np.array([-c * r, r])
            r12 = np.array([c * r, -r])
            md.system_set_walls_moving_rate(r02, r12)
            #md.system_set_walls_shear(1)
        if Lx > L:
            md.system_set_walls_moving_rate(r_stop, r_stop)

        md.system_unfold_positions()
        #xyzfile.append(avgs[0])
        vtkfile.append(avgs[0])
        #vtffile.append(avgs[0])
        #if i % 1 == 0:
        print(avgs)
        wf = md.system_get_walls_forces()
        #return Py_BuildValue("((dd)(dd))", f[0][0], f[0][1], f[1][0], f[1][1]);
        wf = [wf[0][0], wf[0][1], wf[1][0], wf[1][1]]
        print("force on walls :", wf)
        totCurrVol = np.sum(currVol)
        data.append([
            dt * avgs[0], wf[0], wf[1], wf[2], wf[3], totCurrVol / totRefVol,
            avgs[5]
        ])
        print("Volumes: ", totRefVol, totCurrVol, totCurrVol / totRefVol * 100)
        print("I1 max", I1.max())

        title = "steps = %d Lini = %f Lx = %f Ly = %f nu = %f V/Vref= %f" % (
            avgs[0], L, Lx, Ly, ucells**2 * np.pi * R**2 /
            (Lx * Ly), totCurrVol / totRefVol)
        if i % 10 == 0:
            plot.update(avgs[0], walls, title)
            wf = np.array(data)
            np.savetxt("data.dat", wf)
            vtkfile.append(avgs[0])

    wf = np.array(data)
    np.savetxt("data.dat", wf)
    print("finished")
    plt.show()
def split_jaws(radiograph, interval=50, show=False):
    """Computes a path that indicates the split between the upper and lower jaw.

    Based on histograms of the intensities in the columns of the radiograph, it
    detects the darkest points. A path between these points in the center region
    of the image is considered as the jaw split.

    Args:
        radiograph: The dental radiograph for which the split is computed.
        interval (int): The width of the rows for which histograms are computed.
        show (bool): Whether to visualize the result.

    Returns:
        Path: The estimated jaw split.

    """
    # Transform the image to grayscale format
    img = cv2.cvtColor(radiograph, cv2.COLOR_BGR2GRAY)
    # Top-hat transform image to enhance brighter structures
    img = rg.top_hat_transform(img)

    # Apply a Gaussian filter in the horizontal direction over the inverse
    # of the preprocessed image.
    height, width = img.shape
    mask = 255-img
    filt = gaussian_filter(450, width)
    if width % 2 == 0:
        filt = filt[:-1]
    mask = np.multiply(mask, filt)

    # Create intensity histograms for columns of the image.
    minimal_points = []
    for x in range(interval, width, interval):
        ## generating histogram
        hist = []
        for y in range(int(height*0.4), int(height*0.7), 1):
            hist.append((np.sum(mask[y][x-interval:x+interval+1]), x, y))

        ## smooth the histogram using a Fourier transformation
        fft = scipy.fftpack.rfft([intensity for (intensity, _, _) in hist])
        fft[30:] = 0
        smoothed = scipy.fftpack.irfft(fft)

        ## find maxima in the histogram and sort them
        indices = scipy.signal.argrelmax(smoothed)[0]
        minimal_points_width = []
        for idx in indices:
            minimal_points_width.append(hist[idx])
        minimal_points_width.sort(reverse=True)

        ## keep the best 3 local maxima which lie atleast 200 apart from another point
        count = 0
        to_keep = []
        for min_point in minimal_points_width:
            _, _, d = min_point
            if all(abs(b-d) > 150 for _, _, b in to_keep) and count < 4:
                count += 1
                to_keep.append(min_point)
        minimal_points.extend(to_keep)

    # Find pairs of points such that the summed intensities of the pixels
    # along a straight line between both points is minimal
    edges = []
    for _, x, y in minimal_points:
        min_intensity = float('inf')
        min_coords = (-1, -1)
        for _, u, v in minimal_points:
            intensity = _edge_intensity(mask, (x, y), (u, v))
            if x < u and intensity < min_intensity and abs(v-y) < 0.1*height:
                min_intensity = intensity
                min_coords = (u, v)
        if min_coords != (-1, -1):
            edges.append([(x, y), min_coords])

    # Try to form paths from the found edges
    paths = []
    for edge in edges:
        new_path = True
        # Check if edge can be added to an existing path
        for path in paths:
            if path.edges[-1] == edge[0]:
                new_path = False
                path.extend(edge)
        if new_path:
            paths.append(Path([edge[0], edge[1]]))

    mask2 = mask * (255/mask.max())
    mask2 = mask2.astype('uint8')

    # Trim the outer edges of paths
    map(lambda p: p.trim(mask2), paths)
    # Remove too short paths
    paths = remove_short_paths(paths, width, 0.3)
    # Select the best path
    best_path = sorted([(p.intensity(img) / (p.length()), p) for p in paths])[0][1]

    # Show the result
    if show:
        Plotter.plot_jaw_split(mask2, minimal_points, paths, best_path)

    # Return the best candidate
    return best_path
#inputMatrix
inputMatrix = pa.input_from_history(x, n2)
#inputMatrix=np.zeros((inputSize,n1+20))
#for i in range(n1):
#   inputMatrix[:,i]=x

# identification
f = pa.filters.FilterNLMS(mu=0.8, n=n2)
y, e, w = f.run(d, inputMatrix)

omega, mag2 = signal.freqz(w[-1], omega)

#Plot
fig, ax = plt.subplots(4)
mp.myPlotter(ax[0], freq, abs(mag))
mp.myPlotter(ax[0], freq, abs(mag2), param_dict={'color': 'red'})

mp.myPlotter(ax[1], t, x)
mp.myPlotter(ax[2], t[:], d)
mp.myPlotter(ax[2], t[:], y, param_dict={'color': 'red'})

mp.myPlotter(ax[3], np.arange(0, n1), hCoeff, stem=True)
mp.myPlotter(ax[3],
             np.arange(0, n1 + 20),
             w[-1],
             param_dict={'color': 'red'},
             stem=True)

mp.myPlotterShow(fig)
def init(model, img, show=False):
    """Find an initial estimate for the model in the given image.

    Args:
        model (Landmarks): The shape which should be fitted.
        img: The dental radiograph on which the shape should be fitted.

    Returns:
        Landmarks: An initial estimate for the position of the model in the given image.

    """
    # Are we fitting a lower or an upper incisor?
    tooth = model.incisor_nr
    is_upper = tooth < 5
    if is_upper:
        # UPPER: Avg. height: 314.714285714, Avg. width: 381.380952381
        width = 380
        height = 315
    else:
        # LOWER: Avg. height: 259.518518519, Avg. width: 281.518518519
        width = 280
        height = 260

    # Create the appearance model for the four upper/lower teeth
    radiographs = rg.load()
    data = load_database(radiographs, is_upper, width, height)
    [_, evecs, mean] = pca(data, 5)

    # Visualize the appearance model
    # cv2.imshow('img',np.hstack( (mean.reshape(height,width),
    #                              normalize(evecs[:,0].reshape(height,width)),
    #                              normalize(evecs[:,1].reshape(height,width)),
    #                              normalize(evecs[:,2].reshape(height,width)))
    #                            ).astype(np.uint8))
    # cv2.waitKey(0)

    # Find the jaw split
    jaw_split = split_jaws(img)

    # Find the region of the radiograph that matches best with the appearance model
    img = rg.enhance(img)
    [(a, b), (c, d)], _ = find_bbox(mean, evecs, img, width, height, is_upper, jaw_split, show=show)

    # Assume all teeth have more or less the same width
    ind = tooth if tooth < 5 else tooth - 4
    bbox = [(a +(ind-1)*(c-a)/4, b), (a +(ind)*(c-a)/4, d)]
    center = np.mean(bbox, axis=0)

    # Plot a bounding box around the estimated region of the requested tooth
    if show:
        Plotter.plot_autoinit(img, bbox, 0, jaw_split, wait=True)

    # Position the mean shape of the requested incisor inside the bbox
    template = model.asm.mean_shape.scale_to_bbox(bbox).translate(center)

    # The position of the lower incisors is further improved using the grey-level model
    if is_upper:
        X = template
    else:
        X = template
        # X = fit_template(template, model, img)

    # Show the final result
    if show:
        Plotter.plot_landmarks_on_image([X], img)

    # Return the estimated position of the shape's landmark points
    return X
Example #43
0
#Sample continous system
fs = 8000
sys_d = ct.sample_system(sys_c, 1 / fs, method='tustin')

#Print Continous and Discrete system
print(sys_d)
print(sys_c)

#generate Continous transfer function Bode
omega = np.linspace(100, 10000, 6000, endpoint=True)
mag, phase, omega = ct.bode(sys_c, omega, dB=True, Plot=False)

#plot
fig, axes = plt.subplots(2)
mp.myPlotterBodeLabels(axes[0], axes[1])
mp.myPlotterBode(axes[0],
                 axes[1],
                 omega,
                 mag,
                 phase,
                 param_dict={'color': 'red'})

#Generate Discrete transfer function Bode
mag, phase, omega = ct.bode(sys_d, omega, dB=True, Plot=False)

#plot
mp.myPlotterBodeLabels(axes[0], axes[1])
mp.myPlotterBode(axes[0],
                 axes[1],
                 omega,
Example #44
0
    def predict_load(self, graph=True, store=False, plot_groupby=None):
        """ Evaluate the trained models (one per cross validation set).
            Args:
                graph : (boolean) : Weather or not to plot graphs at the end (true vs prediction graph by default)
                store : (boolean) : Weather or not to store results in the database
                plot_groupby : (str) : Change the graph type (graph=True) to plot grouped error. Legal string are those in method 'Utils.get_groupby_time()' 
            Returns:
                None
            Todo:
                TODO
        """
        # Just in case, since data isn't pickled.
        self.__build_data(self.settings["Data"]["data_shape_mode"] == 'RNN')

        cv_train_results = []
        cv_test_results = []
        self.last_predict_info = np.array([]).reshape(0, 2)
        train_results, train_inference_time = -1, -1

        # Get back the original load from the differentiated data. Yes could be improved, for example use np.r_[start, X[1:]].cumsum()
        if self.settings["ML_Preprocessor"]["load_difference"]:
            non_diff_data = self.preprocessors['Load'].get_data()
        else:
            load_difference_start = None

        for ((X_train, y_train),
             (X_test, y_test)), model, ((_, y_train_raw), (_, _)) in zip(
                 self.splits[self.settings["Data"]["data_shape_mode"]],
                 self.models, self.splits['raw']):

            # TODO : Assign predict function to var in if then do stuff without ifs
            if self.settings["Data"]["data_shape_mode"] == 'non-RNN':
                if self.settings["ML_Preprocessor"]["load_difference"]:
                    load_difference_start = non_diff_data.loc[
                        non_diff_data.index == min(y_test[0].index), "TS"][0]

                if self.evaluate_training_set:
                    X_train_tmp = deepcopy(X_train)
                    X_train_tmp = mlu.split_dataset_by_date(
                        X_train_tmp,
                        split_on=self.settings["Data"]["forecast_horizon"])
                    y_train_tmp = deepcopy(y_train)
                    y_train_tmp['TS'] = self.sc_y.inverse_transform(y_train)
                    y_train_tmp = mlu.split_dataset_by_date(
                        y_train_tmp,
                        split_on=self.settings["Data"]["forecast_horizon"])
                    train_results, train_inference_time = mlu.recurrent_predict_evaluate(
                        model,
                        self.sc_y,
                        X_train_tmp,
                        y_train_tmp,
                        load_difference_start=load_difference_start)
                else:
                    train_inference_time = 0

                test_results, test_inference_time = mlu.recurrent_predict_evaluate(
                    model,
                    self.sc_y,
                    X_test,
                    y_test,
                    load_difference_start=load_difference_start)

            else:
                if self.settings["ML_Preprocessor"]["load_difference"]:
                    load_difference_start = non_diff_data.loc[
                        non_diff_data.index == min(index), "TS"][0]

                if self.evaluate_training_set:
                    y_train_tmp = deepcopy(y_train)
                    y_train_tmp = pd.concat(y_train)
                    y_train_tmp['TS'] = self.sc_y.inverse_transform(
                        y_train_tmp)
                    train_results, train_inference_time = mlu.predict_evaluate(
                        model,
                        self.sc_y,
                        X_train,
                        y_train_tmp,
                        load_difference_start=load_difference_start)

                test_results, test_inference_time = mlu.predict_evaluate(
                    model,
                    self.sc_y,
                    X_test,
                    pd.concat(y_test),
                    load_difference_start=load_difference_start)

            if self.verbose > 0:
                if self.evaluate_training_set:
                    train_metric_results = mlu.get_measures(
                        train_results, **self.settings["Evaluation"])
                    print("[TRAINING SET] - {period} error : {res}% {metric}".
                          format(res=train_metric_results,
                                 **self.settings["Evaluation"]))

                test_metric_results = mlu.get_measures(
                    test_results, **self.settings["Evaluation"])
                print(
                    "[TESTING  SET] - {period} error : {res}% {metric}".format(
                        res=test_metric_results,
                        **self.settings["Evaluation"]))

            cv_train_results.append(train_results)
            cv_test_results.append(test_results)

            self.last_predict_info = np.vstack([
                self.last_predict_info,
                np.hstack([train_inference_time, test_inference_time])
            ])

        if graph:
            import Plotter
            Plotter.plot_results(pd.concat(
                [pd.concat(cv_train_results),
                 pd.concat(cv_test_results)]),
                                 rolling=24,
                                 groupby=plot_groupby)

        self.last_training_results = cv_train_results
        self.last_testing_results = cv_test_results

        if store:
            self.__save_results_to_db()

        # Clear memory
        # NOTE : May not be necessary depending on Keras / TF version
        # NOTE 2 : May even <<<SEGFAULT>>>.
        # Necessary to prevent memory use buildup -> Slower training
        #K.get_session().close()
        #cfg = K.tf.ConfigProto()
        #cfg.gpu_options.allow_growth = True
        #K.set_session(K.tf.Session(config=cfg))

        K.clear_session()
Example #45
0
if __name__ == "__main__":

    # get file paths
    # data obtained from http://www.physics.emory.edu/faculty/weeks/data/arches/
    abspath = os.path.abspath(__file__)
    dname = os.path.dirname(abspath)
    os.chdir(dname)
    mypath = "../data"
    oldfiles = [join(mypath, f) for f in listdir(mypath) if isfile(join(mypath, f)) and re.search(r'.*yale\.txt', f)]
    newfiles = [join(mypath, f) for f in listdir(mypath) if isfile(join(mypath, f)) and re.search(r'^sav.*\.txt', f)]
    # processing data
    
    old_interface = OldInterface(oldfiles)
    old_interface.cal_angles()
    concave_list = old_interface.pick_concave()
    plotter = Plotter.PlotCircle(concave_list)
    #Plotter.AngleDistribution_shaded(old_interface.combine_angle_with_width())
    

    new_interface = NewInterface(newfiles)
    new_interface.cal_angles()
    angle_data = new_interface.combine_angle_with_width()
    Plotter.AngleDistribution(angle_data)
    Plotter.AngleDistribution_shaded(angle_data)
    Plotter.NDistribution(new_interface.combine_N_with_width())
    Plotter.AngleDistributionVSN(new_interface.combine_angle_with_N())
    Plotter.AngleDistributionVSN_shaded(new_interface.combine_angle_with_N())
    Plotter.AngleGravityN_shaded(new_interface.combine_angle_with_N_and_g())
    

Example #46
0
 def process(self, **kwargs):
     Plotter.graph(Graph(self.graphChoice.get()), self.data,
                   self.selectedAxes)
Example #47
0
t5 = runExperiment(Processing.removeTopPercentile5, "text/t5-remove_word.txt",
                   "text/t5-model-2018.txt", "text/t5-baseline-result.txt",
                   "text/t5-vocabulary.txt")
print(str(t5) + "\n")

t10 = runExperiment(Processing.removeTopPercentile10,
                    "text/t10-remove_word.txt", "text/t10-model-2018.txt",
                    "text/t10-baseline-result.txt", "text/t10-vocabulary.txt")
print(str(t10) + "\n")

t15 = runExperiment(Processing.removeTopPercentile15,
                    "text/t15-remove_word.txt", "text/t15-model-2018.txt",
                    "text/t15-baseline-result.txt", "text/t15-vocabulary.txt")
print(str(t15) + "\n")

t20 = runExperiment(Processing.removeTopPercentile20,
                    "text/t20-remove_word.txt", "text/t20-model-2018.txt",
                    "text/t20-baseline-result.txt", "text/t20-vocabulary.txt")
print(str(t20) + "\n")

t25 = runExperiment(Processing.removeTopPercentile25,
                    "text/t25-remove_word.txt", "text/t25-model-2018.txt",
                    "text/t25-baseline-result.txt", "text/t25-vocabulary.txt")
print(str(t25) + "\n")

f = [f1, f5, f10, f15, f20]
t = [t5, t10, t15, t20, t25]

Plotter.displayStuff(f, t)
Example #48
0
MnEqns = beta * np.dot(xMat.T, tlist)

Mn = np.linalg.solve(Sninv, MnEqns)

xFineList = np.arange(xlist[0], xlist[-1], 0.001)
xFineMat = getXMatrix(xFineList, sudoM)

Sn = np.linalg.inv(Sninv)
sigmaxlist = []
for xp in xFineList:
    phi = createSQM(xp, sudoM).reshape((-1, 1))
    sigma_2n = 1 / beta + np.dot(phi.T, np.dot(Sn, phi))
    sigmaxlist += [np.sqrt(sigma_2n[0, 0])]

axarr = plt.createaxis()
#plt.setupaxis(axarr, "X Values", "Y Values", "Plot of Gaussian curve fitting for M="+str(M)+" with half of the points")
plt.setupaxis(axarr, "X Values", "Y Values",
              "Plot of Gaussian curve fitting for M=" + str(M))
plt.plot(xlist, tlist, axarr, graphtype="scatter")
f_W = f_WandX(Mn, xFineMat).reshape(-1)
plt.plot(xFineList, f_W, axarr)
plt.plot(xFineList, f_W + sigmaxlist, axarr, ls='--')
plt.plot(xFineList, f_W - sigmaxlist, axarr, ls='--')
#plt.showOutput(FileName="Q3P3_minus_a_few_points")
#plt.showOutput(FileName="Q3P3")
plt.showOutput()

axarr = plt.createaxis()
#plt.setupaxis(axarr, "X Values", "Y Values", "Plot of 10 randomly drawn curves from the posterior distribution with half of the points")
plt.setupaxis(
Example #49
0
        tmp2.append(ll)
        count += 1

    xdata = np.array(tmp1)
    ydata = np.array(tmp2)
    print tmp1, xdata
    print tmp2, ydata
    popt, pcov = scipy.optimize.curve_fit(sigmoid, xdata, ydata)
    print popt
    x = np.linspace(0,85,85)
    y = sigmoid(x, *popt)
    pylab.plot(xdata, ydata, 'o', label='data')
    pylab.plot(x,y, label='fit')
#    pylab.ylim(0, 1.05)
    pylab.legend(loc='best')
    pylab.show()

exit()
#    print item.name, item.mean(),selection_gradient.conc_at(item.name),inducer_gradient.conc_at(item.name)
#    selection.append(selection_gradient.conc_at(item.name))
#    inducer.append(inducer_gradient.conc_at(item.name))
#    OD.append(item.mean())
    
    

exit()
heat_map = Plotter('junk',selection,inducer,OD)
heat_map.plot()


import ROOT as r
import Plotter as p

writePath = '~/Documents/Presentations/2018/181026-3LayerEff/'
f = r.TFile('../data/SingleMuon/zskim2018D/CLCTLayerAnalysis-Aug-Sep.root')

#writePath ='~/Documents/Presentations/2018/181026-3LayerEff/secondFirmwareUpdate/'
#f = r.TFile('../data/SingleMuon/zskim2018D/CLCTLayerAnalysis-Sep+.root')

print f
if not hasattr(f, 'IsOpen'):
    print "can't open file"
    exit()

can = p.Canvas(True, lumi='')

mep11a = 'me_p11a_11'
mep11b = 'me_p11b_11'
mem11a = 'me_m11a_11'
mem11b = 'me_m11b_11'

chambers = [mem11b, mem11a, mep11b, mep11a]

for chamber in chambers:
    me11 = p.Plot('h_clctLayerCount_' + chamber,
                  f,
                  '',
                  legType='l',
                  option='hist')
    if chamber is mep11a:
        me11.legName = '#splitline{#bf{ME+1/1/11A}}{Entries:%i}' % me11.GetEntries(
Example #51
0
def main(args=None):

    config = configp.start()
    if args is None:
        args = parser.start()
    elif isinstance(args,basestring):
        args = parser.start(args)
    
    cedfile = args.ced
    stdfile = args.std
    plotFields = args.field 
    multi = args.multi
    print_shapes = args.print_shapes
    print_global_atts = args.print_global_atts
    print_axis = args.print_axis
    print_list_synth = args.print_list_synth
    terrain = args.terrain
    slope = args.slope
    meteo = args.meteo
    valid = args.valid
    prof = args.prof
    nearest = args.nearest
    noplot = args.no_plot

    """ print synhesis availables """
    if print_list_synth:
        synth_folder=fh.set_working_files(config=config)
        out=os.listdir(synth_folder)
        print "Synthesis directories available:"
        for f in out:
            print f
        usr_input = raw_input('\nIndicate directory: ')
        out=os.listdir(synth_folder+'/'+usr_input)
        print "\nSyntheses available:"
        out.sort()
        for f in out:
            if f[-3:]=='cdf': print f
        print '\n'
        sys.exit()


    """ retrieves synthesis and flight instances
        from AircraftAnalysis
    """
    SYNTH, FLIGHT, TERRAIN = fh.set_working_files(cedfile=cedfile,
                                                stdfile=stdfile,
                                                config=config)
    
    """ print shape of attribute arrays """
    if print_shapes:
        SYNTH.print_shapes()
        if not print_global_atts: 
            sys.exit()

    """ print global attirutes of cedric synthesis """
    if print_global_atts:
        SYNTH.print_global_atts()
        sys.exit()

    """ print axis values """
    if print_axis:
        for ax in print_axis:
            if ax.isupper():
                ax=ax.lower()
            SYNTH.print_axis(ax)
        sys.exit()

    """ print synthesis time """
#    print "Synthesis start time :%s" % SYNTH.start
#    print "Synthesis end time :%s\n" % SYNTH.end

    """ make synthesis plots """
    if plotFields:
        for f in plotFields:
            P=Plotter.plot_synth(SYNTH,FLIGHT,TERRAIN,
                                var=f,
                                wind=args.wind,
                                panel=args.panel,
                                slicem = args.slicem,
                                slicez = args.slicez,
                                slice = args.slice,
#                                azimuth = args.azimuth,
#                                distance = args.distance,
                                zoomIn=args.zoomin,
                                mask = args.mask,
                                config=config)

    """ make terrain plots """
    if terrain or slope:
        # P[0] might produce error if P is not a list, 
        # check in ploth_synth.cross_section
        Plotter.plot_terrain(P[0],
                             terrain=terrain,
                             slope=slope,
                             terrain_file=config['filepath_dtm'])

    """ make flight level meteo plot """
    if meteo:
        Plotter.plot_flight_meteo(SYNTH,FLIGHT)

    """ compare synth and flight level """
    if valid:
        out = Plotter.compare_synth_flight(SYNTH,FLIGHT,
                                     level=valid,
                                     zoomin=args.zoomin,
                                     noplot=noplot)
        
#        if 'wind_profiler' in config:
#            case=int(cedfile[1:3])
#            Plotter.compare_with_windprof(SYNTH,
#                                          location=config['wind_profiler'],
#                                          case=case)
        return out
        
    """ make profile from synthesis """
    if prof:
        if nearest is None:
            markers=['o','s','D','*']
            out = Plotter.make_synth_profile(SYNTH,coords=prof,
                                             markers=markers,
                                             noplot=noplot)
        else:
            ' (4.5km,n=12) or (7.0km,n=30) seem good choices '
            out = Plotter.make_synth_profile_withnearest(SYNTH,
                                                         target_latlon=prof,
                                                         max_dist=nearest[0][0], # [km]
                                                         n_neigh =nearest[0][1])        
        
        try:        
            ' if P exists '
            for i,p in enumerate(prof):
                lat,lon = p
                P.haxis.scatter(lon,lat,
                                s=config['sounding_size'],
                                c=config['sounding_color'],
                                marker=config['sounding_marker'],
                                lw=3)
        except UnboundLocalError:
            ' if P does not exist just pass '
            pass
        return out
        
        
    # if turbulence:
    # Plotter.print_covariance(SYNTH,FLIGHT)
    # Plotter.print_correlation(SYNTH,FLIGHT)
    # Plotter.plot_wind_comp_var(SYNTH,FLIGHT)
    # Plotter.plot_tke(SYNTH,FLIGHT)
    # Plotter.plot_vertical_heat_flux(SYNTH,FLIGHT)
    # Plotter.plot_vertical_momentum_flux(SYNTH,FLIGHT,config['filepath_dtm'])
    # Plotter.plot_turbulence_spectra(SYNTH,FLIGHT)

    if multi:
        plt.close('all')
        return P
    else:        
        ''' use this one with ipython '''
        plt.show(block=False)    
        ''' use this one with the shell '''
Example #52
0
#den_h = f.Get('h_clctEff_den')
#hasClct = f.Get('')

#pt_h = f.Get('h_eventCuts')

mep11a = 'me_p11a_11'
mep11b = 'me_p11b_11'
mem11a = 'me_m11a_11'
mem11b = 'me_m11b_11'

chambers = [mep11a, mep11b, mem11a, mem11b]

for chamber in chambers:

    can = p.Canvas(logy=True, lumi='')

    den = p.Plot('h_clctEff_den_' + chamber,
                 f,
                 legName=chamber + ' Segments',
                 legType='',
                 option='hist')
    hasClct = p.Plot('h_clctEff_hasClct_' + chamber,
                     f,
                     legName='w/ CLCT ',
                     legType='l',
                     option='pe')
    has3LayClct = p.Plot('h_clctEff_has3layClct_' + chamber,
                         f,
                         legName='w/ 3Lay CLCT',
                         legType='l',
Example #53
0
def procrustes():
    ind = 1
    # lms = landmarks.load(ind)
    lms = landmarks.load_mirrored(ind)
    mean_shape, aligned_shapes = procrustes_analysis.GPA(lms)
    Plotter.plot_procrustes(mean_shape, aligned_shapes, incisor_nr=ind, save=True)
Example #54
0
    ##error estimates, using formula for stepsize of 2h, where y is the value with stepsize of h and u is with stepsize 2h:
    ##  E_n = (y_n - u_n) / (2^m -1), m = h for R-K 4th order method so E_n = (y_n - u_n) / 31
    ##order of error for 1 timeStepsPerHour is O(h^5), so total ~O(h^4)

    ##plotting here
    #timeStepsPerHour = 40

    (S, I, R) = ODEsolver.rungeKuttaIterator(
        S, I, R, timeStepsPerHour,
        humanContactsPerHour[0][0] * infectionProbPerContact, recovRate,
        vaccinationChancePerHour, movementChances * 0, movementChancesLD * 0,
        places)

    print('Regular Plot, timeStepsPerHour = ' + str(timeStepsPerHour))

    Plotter.plotThis22(S[0], I[0], R[0], places)
    #Plotter.plotThis22Multi(S, I, R, places, True)

    #print('Using the theory to calculate time passed, it should be: ' + str( (-1 / humanContactsPerHour[0][0] / infectionProbPerContact) * numpy.log( (S[0][runLength - 1] * ( (S[0][0] + I[0][0]) - S[0][0])) / (S[0][0] * ( (S[0][0] + I[0][0]) - S[0][runLength - 1] )) )) + ' hours. In the simulation this change was over: ' +str(runLength - 1)+ ' hours.')

    plt.show()
    plt.clf()

    ##now with double the timeStepsPerHour (should be approx ~2^4 times the error)

    #===========================================================================
    # timeStepsPerHour = timeStepsPerHour * 2
    #
    # print('Error-Comparison Plot, timeStepsPerHour = '+str(timeStepsPerHour))
    #
    # (S2, I2, R2) = setInitialPops(runLength, numPlaces, placePop, infectedFrac)
Example #55
0
def vertexData(data, pbar=None, currentFile="", runNumber=None, dataLength=None,
                      errorPlot=False, plotTitle=None, quiet=False, returnDiffs=True, 
                      invMassVsError=False):
    '''Apply the tVertexing/pVertexing algorithm to a large number of events.'''
    if invMassVsError: returnDiffs = False
    i                 = 0
    skippedCount      = 0                                                                           #|Number of skipped events
    incorrectCount    = 0                                                                           #|Number of incorrect vertex predictions
    tVertexZList      = []                                                                          #|List of tVertexed vertices
    pVertexZList      = []                                                                          #|List of pVertexed vertices
    pVertex0List      = []
    pVertex1List      = []
    genVertexZList    = []                                                                          #|List of actual vertices
    correctVertexList = []
    invariantMassList = []
    ldata             = len(data)
    nAttempted        = len(data)
    for event in data:
        try: 
            # Get data, run tests
            ROIs         = np.sort(event[2], order='pt')[::-1]                                      #|Sort ROIs by decreasing pt
            cluster0     = np.sort(np.compress(event[1]['ROI'] == ROIs['roiID'][0], event[1]),
                                   order='en')[::-1]                                                #|Gets highest energy cluster of highest-pT ROI
            cluster1     = np.sort(np.compress(event[1]['ROI'] == ROIs['roiID'][1], event[1]),
                                   order='en')[::-1]
            # Parse data 
            c0ID         = cluster0['clusterID'][0] 
            c1ID         = cluster1['clusterID'][0] 
            c0t          = getClusterArrivalTimes(event[0], c0ID)                                   #|Get flat-averaged arrival times
            c1t          = getClusterArrivalTimes(event[0], c1ID)
            # c0x,c0y,c0z  = cluster0['centerX'][0], cluster0['centerY'][0], cluster0['centerZ'][0]
            # c1x,c1y,c1z  = cluster1['centerX'][0], cluster1['centerY'][0], cluster1['centerZ'][0]
            c0x,c0y,c0z  = getClusterXYZ(event[0], c0ID)                                            #|Get log-energy weighted arrival times
            c1x,c1y,c1z  = getClusterXYZ(event[0], c1ID)
            cluster0Hits = [c0x, c0y, c0z, c0t]                                                     #|Package together for twoVertex() function
            cluster1Hits = [c1x, c1y, c1z, c1t]
            genVertexZ   = event[4]['z'][0] #|Generator level "true" vertices

            # tVertexing
            vertices     = twoVertex(cluster0Hits, cluster1Hits)
            tVertexZ     = vertices[0]                                                              #|Optimal solution 
            soln1, soln2 = vertices[2:4]                                                            #|Pass both solutions to compare
            # error        = vertices[4] 
            if np.absolute(tVertexZ) > 15.0:                                                        #|Automatically discard any fishy looking solutions
                nAttempted   -= 1
                i            += 1
                skippedCount += 1
                if not quiet: print ">> Skipped!"
                continue                                                                            #|Get rid of the ~3sigma solutions that are probably errors.

            # pVertexing
            c0hits       = np.compress(np.logical_and(
                                event[0]['clusterID']==c0ID, event[0]['t']>0), event[0], axis=0)
            c1hits       = np.compress(np.logical_and(
                                event[0]['clusterID']==c1ID, event[0]['t']>0), event[0], axis=0)
            x0,y0,z0,t0  = c0hits['x'], c0hits['y'], c0hits['z'], c0hits['tofT']
            x1,y1,z1,t1  = c1hits['x'], c1hits['y'], c1hits['z'], c1hits['tofT']
            # pVertex0, pVertex0t = pVertex(x0,y0,z0,t0)['x'][2:]
            # pVertex1, pVertex1t = pVertex(x1,y1,z1,t1)['x'][2:]
            pVertex0 = pVertex(x0,y0,z0,t0)['x'][2]
            pVertex1 = pVertex(x1,y1,z1,t1)['x'][2]
            pVertexZ = np.mean((pVertex0, pVertex1))

            # Append to lists
            tVertexError = np.absolute(tVertexZ-genVertexZ)
            pVertexError = np.absolute(pVertexZ - genVertexZ)
            tVertexZList.append(tVertexZ)
            pVertex0List.append(pVertex0)
            pVertex1List.append(pVertex1)
            pVertexZList.append(pVertexZ)
            genVertexZList.append(genVertexZ)
            invariantMassList.append(invariantMass(event))

            # Build output string
            if not quiet:
                string = "Event: %3i  |  NumClusters: %2i  |  NumROIs: %2i  |  " \
                        % (i,len(event[1]), len(event[2])) 
                string += "tVertexZ: {0:9.5f}cm  |  genVertexZ:{1:9.5f}cm  |  Error:{2:>9.5f}cm"\
                        .format(tVertexZ, genVertexZ, tVertexError)
                string += "  |  pV0:{0:>9.5f}cm  |  pV1:{1:>9.5f}cm  |  pVertex Error:{2:>9.5f}cm"\
                        .format(pVertex0, pVertex1, pVertexError)
                string += "  |  tV-pV:{0:>9.5f}cm"\
                        .format(np.absolute(tVertexZ-pVertexZ))
                # string += "  |  pV0t:{0:>9.2f}ns  |  pV1t:{1:>9.2f}ns"\
                #         .format(pVertex0t, pVertex1t)        
                if np.absolute(tVertexZ-genVertexZ) < .001:  string += "<"+"-"*7                     #|Points out very good fits
                elif np.absolute(tVertexZ-genVertexZ) < .01: string += "<"+"-"*3

            # Test for incorrectly identified vertices
            if np.absolute(soln1-genVertexZ) < tVertexError:
                incorrectCount += 1
                if not quiet:
                    string += "<-- Incorrect choice of point. Correct: %.5fcm with error %.5fcm." \
                            % (soln1, np.absolute(soln1-genVertexZ))
            elif np.absolute(soln2-genVertexZ) < tVertexError:
                incorrectCount +=1
                if not quiet:
                    string += "<-- Incorrect choice of point. Correct: %.5fcm with error %.5fcm." \
                            % (soln2, np.absolute(soln2-genVertexZ))
            else:
                correctVertexList.append(tVertexZ-genVertexZ)                                       #|Pick out vertices the algorithm correctly identified

        except RuntimeWarning:            
            string        = ">> Runtime warning, skipping this event..."
            nAttempted   -= 1
            skippedCount += 1
        # except IndexError:
        #     string     = "Event: %3i   "%i + "-"*15 + "Two clusters/ROIs not found" + "-"*15
        #     nAttempted -= 1
        #     skippedCount += 1

        if not quiet: print string
        i += 1

        # Update progressbar
        if runNumber != None and pbar != None:
            pbar.update(ldata*runNumber + i) 
        elif pbar!=None:
            pbar.update(i)

    # Do some manipulation to the lists and possibly clean it up to get a fit
    diffs    = np.subtract(tVertexZList, genVertexZList)
    pDiffs   = np.subtract(pVertexZList, genVertexZList)
    pDiffs0  = np.subtract(pVertex0List, genVertexZList)
    pDiffs1  = np.subtract(pVertex1List, genVertexZList)
    oldlen   = len(diffs)
    if oldlen == 0: return np.array([0])                                                            #|-1 is returned for empty array
    # Extract worst x% of events to improve fitting
    diffs    = np.extract(np.absolute(diffs) < np.percentile(np.absolute(diffs), 99.0), diffs)  #|np.percentile() doesn't exist in lxplus numpy. Remove this if using there.
    pDiffs   = np.extract(np.absolute(pDiffs) < np.percentile(np.absolute(pDiffs), 99.0), pDiffs)
    pDiffs0  = np.extract(np.absolute(pDiffs0) < np.percentile(np.absolute(pDiffs0), 99.0), pDiffs0)
    pDiffs1  = np.extract(np.absolute(pDiffs1) < np.percentile(np.absolute(pDiffs1), 99.0), pDiffs1)
    #invariantMassList = np.extract(np.absolute(diffs) < 1.5, invariantMassList)

    if not quiet: 
        print "Correctly identified %i/%i events in %s." % \
                (nAttempted-incorrectCount, nAttempted, currentFile)
        print "%s events trimmed" % str(oldlen-len(diffs))
        print "%i events skipped." % skippedCount
    if dataLength:
        print "Processed %i/%i events (%.2f%%)." % \
                (nAttempted, dataLength, 100.0*float(nAttempted)/dataLength)

    if errorPlot:
        Plotter.tVertexErrorHist(10*diffs, len(diffs), title=plotTitle, 
                                 ranges=[-7,7], quiet=False)
        Plotter.tVertexErrorHist(10*pDiffs, len(pDiffs), 
                                 title="pVertexed $z$ - genVertex $z$ for 500GeV $\gamma$-gun", 
                                 ranges=[-300,300], quiet=False)
        Plotter.tVertexErrorHist(10*pDiffs0, len(pDiffs0), 
                                 title="pVertexed $z$ - genVertex $z$ for 500GeV $\gamma$-gun (C0)", 
                                 ranges=[-300,300], quiet=False)
        Plotter.tVertexErrorHist(10*pDiffs1, len(pDiffs1), 
                                 title="pVertexed $z$ - genVertex $z$ for 500GeV $\gamma$-gun (C1)", 
                                 ranges=[-300,300], quiet=False)

    if invMassVsError:
        Plotter.invariantMassErrorPlot(np.absolute(diffs), invariantMassList)

    if returnDiffs:
        return diffs 
def find_bbox(mean, evecs, image, width, height, is_upper, jaw_split, show=False):
    """Finds a bounding box around the four upper or lower incisors.
    A sliding window is moved over the given image. The window which matches best
    with the given appearance model is returned.

    Args:
        mean: PCA mean.
        evecs: PCA eigen vectors.
        image: The dental radiograph on which the incisors should be located.
        width (int): The default width of the search window.
        height (int): The default height of the search window.
        is_upper (bool): Wheter to look for the upper (True) or lower (False) incisors.
        jaw_split (Path): The jaw split.

    Returns:
        A bounding box around what looks like four incisors.
        The region of the image selected by the bounding box.

    """
    h, w = image.shape

    # [b1, a1]---------------
    # -----------------------
    # -----------------------
    # -----------------------
    # ---------------[b2, a2]

    if is_upper:
        b1 = int(w/2 - w/10)
        b2 = int(w/2 + w/10)
        a1 = int(np.max(jaw_split.get_part(b1, b2), axis=0)[1]) - 350
        a2 = int(np.max(jaw_split.get_part(b1, b2), axis=0)[1])
    else:
        b1 = int(w/2 - w/12)
        b2 = int(w/2 + w/12)
        a1 = int(np.min(jaw_split.get_part(b1, b2), axis=0)[1])
        a2 = int(np.min(jaw_split.get_part(b1, b2), axis=0)[1]) + 350

    search_region = [(b1, a1), (b2, a2)]

    best_score = float("inf")
    best_score_bbox = [(-1, -1), (-1, -1)]
    best_score_img = np.zeros((500, 400))
    for wscale in np.arange(0.8, 1.3, 0.1):
        for hscale in np.arange(0.7, 1.3, 0.1):
            winW = int(width * wscale)
            winH = int(height * hscale)
            for (x, y, window) in sliding_window(image, search_region, step_size=36, window_size=(winW, winH)):
                # if the window does not meet our desired window size, ignore it
                if window.shape[0] != winH or window.shape[1] != winW:
                    continue

                reCut = cv2.resize(window, (width, height))

                X = reCut.flatten()
                Y = project(evecs, X, mean)
                Xacc = reconstruct(evecs, Y, mean)

                score = np.linalg.norm(Xacc - X)
                if score < best_score:
                    best_score = score
                    best_score_bbox = [(x, y), (x + winW, y + winH)]
                    best_score_img = reCut

                if show:
                    window = [(x, y), (x + winW, y + winH)]
                    Plotter.plot_autoinit(image, window, score, jaw_split, search_region, best_score_bbox,
                                          title="wscale="+str(wscale)+" hscale="+str(hscale))

    return (best_score_bbox, best_score_img)
Example #57
0
import ROOT as r
import sys
sys.path.append("../")
import Plotter as p
 
f = r.TFile('../../dat/SingleMuon/zskim2018D/emulation.root')
can = p.Canvas(True,lumi='')
 
emu_h = f.Get('emulatedMultiplicity')
real_h = f.Get('realMultiplicity')
 
 
emu = p.Plot(emu_h,legName='#bf{Emulation}', legType='l',option='hist')
real = p.Plot(real_h,legName='#bf{Real}', legType='l',option='hist')
can.addMainPlot(emu, color=r.kBlue,addS=True)
can.addMainPlot(real, color=r.kBlack,addS=True)
#pt.GetXaxis().SetRangeUser(60.,120.)
can.makeLegend(pos='tl')

# r.gStyle.SetStatX(0.35)
# r.gStyle.SetStatY(0.85)
# r.gStyle.SetOptStat(111110)
# can.makeStatsBox(match)
   
can.cleanup('multiplicity.pdf', mode='BOB')#
Example #58
0
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import Plotter
import time

accuracyPlotter = Plotter.Plotter(['Train_Accuracy'], 'Accuracy')
lossPlotter = Plotter.Plotter(['Training_Loss', 'Test_Loss'], 'Loss')
testAccPlotter = Plotter.Plotter(['Accuracy'], 'Test_Accuracy')
testLossPlotter = Plotter.Plotter(['Loss'], 'Test_Loss')

# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size',
                    type=int,
                    default=64,
                    metavar='N',
                    help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size',
                    type=int,
                    default=1000,
                    metavar='N',
                    help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs',
                    type=int,
                    default=50,
Example #59
0
 def __init__(self):
     self.filer = FileHandler()
     self.validator = Validator()
     self.database = Database()
     self.editor = Editor()
     self.plotter = Plotter()
Example #60
0
import ROOT as r
import Plotter as p

writePath = '~/Documents/Presentations/2018/181026-3LayerEff/'

f = r.TFile('../data/SingleMuon/zskim2018D-full/CLCTLayerAnalysis-Full.root')
can = p.Canvas(lumi='', logy=True)

mep11a = 'me_p11a_11'
mep11b = 'me_p11b_11'
mem11a = 'me_m11a_11'
mem11b = 'me_m11b_11'

chambers = [mep11a, mep11b, mem11a, mem11b]

for chamber in chambers:
    me11 = p.Plot('h_clctEff_cuts_' + chamber,
                  f,
                  '',
                  legType='l',
                  option='hist')
    me11.setTitles(X='')
    #     if chamber is mep11a: me11.legName = '#splitline{#bf{ME+11A}}{Entries:%i}'%me11.GetEntries()
    #     if chamber is mep11b: me11.legName = '#splitline{#bf{ME+11B}}{Entries:%i}'%me11.GetEntries()
    #     if chamber is mem11a: me11.legName = '#splitline{#bf{ME-11A}}{Entries:%i}'%me11.GetEntries()
    #     if chamber is mem11b: me11.legName = '#splitline{#bf{ME-11B}}{Entries:%i}'%me11.GetEntries()
    #

    if chamber is mep11a: me11.legName = '#bf{ME+11A}'
    if chamber is mep11b: me11.legName = '#bf{ME+11B}'
    if chamber is mem11a: me11.legName = '#bf{ME-11A}'