예제 #1
0
 def testOverallMatrix(self):
     sample_list = ['A', 'B', 'C']
     clusters = [[['A', 'C'], ['B']], [['A', 'C'], ['B']]]
     res = at.clustersToMatrix(clusters, sample_list)
     overall = at.overallMatrix(res)
     expected = pd.DataFrame([[2, 0, 2], [0, 2, 0], [2, 0, 2]],
                             columns=['A', 'B', 'C'],
                             index=['A', 'B', 'C'])
     assert np.all(np.equal(overall.index, expected.index))
     assert np.all(np.equal(overall.values, expected.values))
예제 #2
0
 def test_validate(self):
     probes = 'caisson2D.csv'
     datalist = at.readProbeFile(probes)
     time = datalist[1]
     data = datalist[2]
     rz = data[:, 5]
     alpha = []
     for i in range(0, len(rz)):
         alpha.append(rz[i] * 180 / math.pi)
     alpha = np.array(alpha)
     it = np.where(time > 2.5)[0][0]
     period = at.zeroCrossing(time[:it], alpha[:it], up=False)[0]
     period_ref = 0.93
     err = 100 * abs(period_ref - period) / abs(period_ref)
     assert (err < 7.0)
예제 #3
0
    def test_validate(self):
        probes = 'caisson2D.csv'
        datalist = at.readProbeFile(probes)
        time = datalist[1]
        data = datalist[2]
        ux_plastic = data[:,25]
        t = []
        j = 4 # time when the first wave gets the structure
        wave_T = 1.3
        ux_plast_per_wave = []
        for n in range(13):
            t.append(np.where(time>j)[0][0])
            ux_plast_per_wave.append(ux_plastic[t[n]])
            j = j + wave_T
       
        disp_ref = 0.0015
        k = 0
        disp = []
        diff = []
        for m in range (8): # 8 is the number of possible windows of 5 waves until the end of the simuation
            disp.append(ux_plast_per_wave[k+5]-ux_plast_per_wave[k])
            diff.append(abs(disp[m] - disp_ref))
            k = k + 1
        
        pos_min = np.where(diff == min(diff))[0]

        diff = np.array(diff)

        err = 100 * (diff[pos_min]/disp_ref)
        assert(err<10.0)
예제 #4
0
 def test_validate(self):
     probes = 'record_rectangle1.csv'
     datalist = at.readProbeFile(probes)
     time = datalist[1]
     data = datalist[2]
     rotq_e3 = data[:,7]
     alpha = []
     for i in range(0,len(rotq_e3)):
         alpha.append(2*math.asin(rotq_e3[i]))
         alpha[i] *= 360/(2*math.pi)
     alpha = np.array(alpha)
     it = np.where(time>2.5)[0][0]
     period = at.zeroCrossing(time[:it],alpha[:it],up=False)[0]
     period_ref = 0.93
     err = 100*abs(period_ref-period)/abs(period_ref)
     assert(err<4.0)
예제 #5
0
    def testColorTable(self):
        group_names = ['A', 'B']
        overall_clusters = [['W', 'X'], ['Y', 'Z']]
        sample_list = ['W', 'X', 'Y', 'Z']
        color_table = at.createColorTable(group_names, overall_clusters,
                                          sample_list)

        print(color_table)
예제 #6
0
    def __init__(self,
                 ParameterFile,
                 DataFile,
                 xRange=[-float('inf'), float('inf')]):

        with open(ParameterFile[0] + '/' + ParameterFile[1] + '.yaml',
                  'r') as stream:
            par = yaml.safe_load(stream)

        dt = AnalysisTools.DataTools()
        Data, ErrorBars = dt.LoadData(DataFile, par)

        ##### Scale Data #####

        if 'Scaling' in par:

            if par['Scaling']['Type'] == 'Data':
                Data = Data * par['Scaling']['Factor']
                ErrorBars = ErrorBars * par['Scaling']['Factor']
                print('Scaling data by ' + str(par['Scaling']['Factor']))

        ##### Background Subtraction #####

        if 'Background' in par:

            BackgroundFile = par['Background']

            string = DataFile.replace('.h5', '')
            string = string.split('_')[1:]
            for segment in string:
                if segment[0] == 'E':
                    BackgroundFile += '_' + segment + '.h5'

            if os.path.isfile(par['FolderPath'] + '/' + BackgroundFile):
                Background, Background_ErrorBars = dt.LoadData(
                    BackgroundFile, par)
                Background = dt.TrimData(Background, xRange)
                Background_ErrorBars = dt.TrimData(Background_ErrorBars,
                                                   xRange)
                if 'Scaling' in par and par['Scaling']['Type'] == 'Background':
                    Background = Background * par['Scaling']['Factor']
                    Background_ErrorBars = Background_ErrorBars * par[
                        'Scaling']['Factor']
                    print('Scaling background by ' +
                          str(par['Scaling']['Factor']))
                Data, ErrorBars = dt.SubtractBackground(
                    Data, ErrorBars, Background, Background_ErrorBars, par)
            else:
                print(
                    'Background file not found. Background subtraction canceled.'
                )

        self.ParameterFile = ParameterFile
        self.DataFile = DataFile
        self.par = par
        self.Data = Data
        self.ErrorBars = ErrorBars
예제 #7
0
    def testClusterToMatrix(self):
        sample_list = ['A', 'B', 'C']
        clusters = [[['A', 'C'], ['B']], [['A', 'C'], ['B']]]
        res = at.clustersToMatrix(clusters, sample_list)
        expected = [
            pd.DataFrame([[1, 0, 1], [0, 1, 0], [1, 0, 1]],
                         columns=['A', 'B', 'C'],
                         index=['A', 'B', 'C'])
        ] * 2

        for x, y in zip(res, expected):
            assert np.all(np.equal(x.index, y.index))
            assert np.all(np.equal(x.values, y.values))
예제 #8
0
    def testExport(self):
        overall_matrix = pd.DataFrame(
            [[15, 15, 2, 2], [15, 15, 2, 2], [2, 2, 15, 15], [2, 2, 15, 15]],
            columns=['W', 'X', 'Y', 'Z'],
            index=['W', 'X', 'Y', 'Z'])
        overall_clusters = [['W', 'X'], ['Y', 'Z']]
        group_names = ['A', 'B']
        sample_list = ['W', 'X', 'Y', 'Z']
        color_table = at.createColorTable(group_names, overall_clusters,
                                          sample_list)

        composition = [[(10, -1), (2, 0), (1, -1)], [(10, -1), (2, 0),
                                                     (1, -1)],
                       [(10, -1), (2, 1), (1, -1)], [(10, -1), (2, 1),
                                                     (1, -1)]]
        prefix = 'genome'
        output = exporter.parse(overall_matrix, color_table, composition,
                                group_names, overall_clusters, sample_list,
                                prefix)
예제 #9
0
# --> Declaration of the list (python dictionary) of TEfficiency objects
Efficiencies_s = {}
for tool in ID_Tools_s:
    Efficiencies_s[tool] = {}
    Efficiencies_s[tool][tool+"_all"] = ROOT.TEfficiency( "Efficiency_"+tool+"_all_bdt_cuts_signal","",
                                                          len(aux.bins[tool][0])-1 ,aux.bins[tool][0] )
    for cat in aux.prong_cat+aux.prongpi0_cat:
        Efficiencies_s[tool][tool+"_"+cat] = ROOT.TEfficiency( "Efficiency_"+tool+"_"+cat+"_bdt_cuts_signal","",
                                                               len(aux.bins[tool][0])-1 ,aux.bins[tool][0] )

#--------------------------------------------------------------
#-------------> LOOP OVER THE EVENTS OF THE INPUT TREE --------
#--------------------------------------------------------------
for entry in xrange(entries_s):
    AnalysisTools.Processing(entry,entries_s,float(entries_s)/100.)
    tauCell_s.GetEntry( entry )

    if tauCell_s.EF_ismatched != 1: continue
    if tauCell_s.L2_ismatched != 1: continue
    if tauCell_s.L1_ismatched != 1: continue
    if tauCell_s.truth_ismatched!=1:continue

    # --> tauNoCut is not implemented in 14 TeV MC
    # --> L2_tau20_medium is the 'backup' solution for now
    isTrigger = False
    if '14TeV' in args.sample_type:
        isTrigger = tauCell_s.L2_L2_tau20_medium
    elif '8TeV' in args.sample_type:
        isTrigger = tauCell_s.EF_EF_tauNoCut
        
예제 #10
0
import Simulator as Sim
import AnalysisTools as Analysis

#instantiate simulator
sim = Sim.Simulator()
sim.exampleData()

#read in
anaTool = Analysis.AnalysisTools()
anaTool.loadData('exampleData.txt')

#display statistics
anaTool.displayPlot()
anaTool.displayStatistics()
예제 #11
0
def main(config_file_path):
    #config loading
    var_list = ['base_directory', 'organism', 'input_type', 'file_name', 'section_length', 'S1_iVal', 'S1_piVal', 'S2_iVal', 'S2_piVal', \
                'reference', 'optimize']

    config_file_path = sys.argv[1]
    config = configparser.SafeConfigParser()
    config.read(config_file_path)

    #setup
    start_time = time.time()

    #Settings
    try:
        output_directory = Path(config.get('Settings', 'output_directory'))
        input_path = Path(config.get('Settings', 'input_path'))
        prefix = input_path.parts[-1].split('.')[0]

        #set these according to config info
        s1_params = pw.ParamWrapper()
        s2_params = pw.ParamWrapper()

        s1_params.setSectionLength(config.getint('Settings', 'section_length'))
        s1_params.setIVal(config.getfloat('Settings', 'S1_iVal'))
        s1_params.setPiVal(config.getfloat('Settings', 'S1_piVal'))

        s2_params.setIVal(config.getfloat('Settings', 'S2_iVal'))
        s2_params.setPiVal(config.getfloat('Settings', 'S2_piVal'))

        #set stuff for autogroup
        s2_params.setIMax(10)
        s2_params.setIMin(2)
        s2_params.setIStep(0.5)
        s2_params.setPiMax(10)
        s2_params.setPiMin(1)
        s2_params.setPiStep(0.5)

        reference = config.get('Settings', 'reference')
        autogroup = bool(config.getboolean('Settings', 'autogroup'))

    except:
        raise RuntimeError('Error reading configuration file')

    #output paths
    if not output_directory.is_dir():
        output_directory.mkdir()
    os.chdir(output_directory)

    cytoscape_path = Path("{0}.xgmml".format(prefix))
    json_path = Path("{0}.json".format(prefix))

    tab_network_path = Path("chromosome_paintings.tsv")
    matrixout_path = Path("overall_similarity.tsv")
    heatmaps_path = Path("heatmaps.pdf")

    density_path = Path("density.txt")
    group_path = Path("groups.txt")
    tab_path = Path("tab.txt")

    colorout_path = Path("colors.txt")
    log_path = Path("log.txt")
    nn_out_path = Path("{0}_nn.tsv".format(prefix))

    hdf_path = input_path.parent / '{0}.h5'.format(prefix)
    matrices_hdf_path = input_path.parent / '{0}_matrices.h5'.format(prefix)
    save_state_path = input_path.parent / '{0}_savestate.json'.format(prefix)

    #other variables
    logger = configLogger(log_path)

    #sanitize input
    try:
        assert (input_path.is_file())
        assert (0 <= s1_params.getPiVal() <= 20)
        assert (0 <= s1_params.getIVal() <= 20)
        assert (0 <= s2_params.getPiVal() <= 20)
        assert (0 <= s2_params.getIVal() <= 20)
    except:
        raise ValueError('Configuration file contains bad values')

    #let's log some params used later
    logger.info('config loaded')

    #Input Processing

    #true means need to cluster
    if not io.checkPrimaryClustering(s1_params, save_state_path):
        logger.info('Primary Clustering exists, loading existing matrices')
        save_state, matrices = io.loadSaveState(save_state_path,
                                                matrices_hdf_path)
        sample_list = save_state['sample_list']
        chr_names = save_state['chr_names']
        chr_breaks = save_state['chr_breaks']
        io.writeTab(sample_list, tab_path)
    else:
        #tabular data from GTAK loaded to pandas
        df, sample_list = loadToPandas(hdf_path, input_path, reference,
                                       s1_params, True)
        os.chdir(output_directory)

        logger.info('Start Primary Clustering')
        io.writeTab(sample_list, tab_path)
        #TODO: check for whether we're skipping primary clustering
        clusters, chr_names, chr_breaks = primaryCluster(
            df, sample_list, s1_params, logger)
        genNNData(clusters, chr_names, chr_breaks, s1_params, sample_list,
                  nn_out_path)
        io.writePrimaryClusters(chr_names, chr_breaks, clusters,
                                Path('pclusters.txt'))
        matrices = at.clustersToMatrix(clusters, sample_list)
        logger.info('Writing Save State')
        io.writeSaveState(s1_params, sample_list, chr_names, chr_breaks,
                          matrices, save_state_path, matrices_hdf_path)

    overall_matrix = at.overallMatrix(matrices)

    logger.info('Start Secondary Clustering')
    group_names, overall_clusters = sc.group(overall_matrix, tab_path,
                                             group_path, s2_params, logger,
                                             autogroup)

    color_table = at.createColorTable(group_names, overall_clusters,
                                      sample_list)
    color_table.to_csv(colorout_path)

    logger.info('calculating composition')
    condensed_matrices = gc.condenseToGroupMatrix(matrices, group_names,
                                                  overall_clusters,
                                                  sample_list)
    composition = gc.getChromosomePaintings(condensed_matrices, chr_breaks,
                                            overall_clusters, group_names,
                                            sample_list)

    #    for the whole thing
    logger.info('writing output')
    io.writeTabularPainting(composition, chr_names,
                            s1_params.getSectionLength(), sample_list,
                            tab_network_path)
    io.writeOverallMatrix(overall_matrix, matrixout_path)
    exporter.parse(overall_matrix, color_table, composition, group_names,
                   overall_clusters, sample_list, prefix)
    print("PopNet Completed")
    print('Run time was {0} seconds'.format(time.time() - start_time))
예제 #12
0
 def __init__(self) :
     
     self.dt = AnalysisTools.DataTools()
     self.Folders = Folders
예제 #13
0
    def FitData(self) :
        
        dt = self.dt
        
        Data = self.Data
        Info = self.Info
        DataName = str(self.DataName)
        
        print('Data: '+DataName)
        print('Description: '+Info['Description'])
        
        ##### Prepare Data #####
        
        if 'Files' in Info['Background'] :
            BackgroundFromFile = self.BackgroundFromFile
            for column in Data :
                Data[column] = Data[column]-BackgroundFromFile[1]
        if 'Level' in Info['Background'] :
            xRange = Info['Background']['Level']['xRange']
            Mean = Data
            Mean = Mean[Mean.index>min(xRange)]
            Mean = Mean[Mean.index<max(xRange)]
            Mean = Mean.mean().mean()
            Data -= Mean
        
        TBackground = Info['Background']['zRange']
        DataNames = list()
        for i in Data.columns :
            if i >= min(TBackground) and i <= max(TBackground) :
                DataNames.append(i)
        Background = df(Data[DataNames].mean(axis=1),columns=['Data'])
        
        Resolution = Info['Resolution']
        Data = dt.ReduceResolution(Data,Resolution)
        
        ##### Fit Data #####
        
        try :
            Info['Background']['Models']
        except :
            Data_BC = Data.divide(Background['Data'],axis=0)
        else :
            print('Fitting Background')
            fit = AnalysisTools.FitTools(Background,Info['Background'],'Background')
            fit.Fit()
            Background['Fit'] = fit.Fits['Data']
            Data_BC = Data.divide(Background['Fit'],axis=0)
        
        if 'xRange' in Info['Fit'] :
            Data_BC = dt.TrimData(Data_BC,Info['Fit']['xRange'][0],Info['Fit']['xRange'][1])
        
        if 'zRange' in Info['Fit'] :
            T_mask = []
            T_mask.append(Data.columns<=max(Info['Fit']['zRange']))
            T_mask.append(Data.columns>=min(Info['Fit']['zRange']))
            T_mask = np.all(T_mask, axis=0)
            Data_BC = Data_BC.T[T_mask].T
        
        fit = AnalysisTools.FitTools(Data_BC,Info['Fit'])
        fit.Fit(fit_x=Data.index.values)

        Fits_BC = fit.Fits
        FitsParameters = fit.FitsParameters
        
        if 'Fit' in Background :
            Fits = Fits_BC.multiply(Background['Fit'],axis=0)
        else :
            Fits = Fits_BC.multiply(Background['Data'],axis=0)
        
        print('\n'+100*'_')
        
        ##### Peak Assignments #####
        
        PeakList = list()
        AssignmentList = list()
        for Peak in Info['Fit']['Models'] :
            PeakList.append(Peak)
            if 'assignment' in Info['Fit']['Models'][Peak] :
                AssignmentList.append(Info['Fit']['Models'][Peak]['assignment'])
            else :
                AssignmentList.append(Peak)
        FitsAssignments = df(AssignmentList,index=PeakList,columns=['Assignment'])
        
        ##### Show Fits & Data #####
        
        if 'ShowFits' in Info['Fit'] :
            ShowFits = Info['Fit']['ShowFits']
        else :
            ShowFits = True

        if ShowFits :
            
            plt.figure(figsize = [6,4])
            plt.plot(Background.index, Background['Data'],'k.', label='Data')
            if 'Fit' in Background :
                plt.plot(Background.index, Background['Fit'], 'r-', label='Fit')
            plt.xlabel('WaveNumber (cm$^{-1}$)'), plt.ylabel('Intensity (au)')
            plt.title('Background')
            plt.show()

            print(100*'_')
        
            for Column in Data_BC :

                plt.figure(figsize = [12,4])

                plt.subplot(1, 2, 1)
                plt.plot(Data.index, Data[Column],'k.', label='Data')
                plt.plot(Fits.index, Fits[Column], 'r-', label='Fit')
                plt.xlabel('WaveNumber (cm$^{-1}$)'), plt.ylabel('Intensity (au)')
                plt.title('Temperature: '+str(Column)+' K')

                plt.subplot(1, 2, 2)
                plt.plot(Data_BC.index, Data_BC[Column],'k.', label='Data')
                plt.plot(Fits_BC.index, Fits_BC[Column], 'r-', label='Fit')
                plt.xlabel('WaveNumber (cm$^{-1}$)'), plt.ylabel('Intensity (au)')
                if 'xRange' in Info['Fit'] :
                    plt.xlim(Info['Fit']['xRange'][0],Info['Fit']['xRange'][1])

                plt.legend(frameon=False, loc='upper center', bbox_to_anchor=(1.2, 1), ncol=1)
                plt.show()

                Peaks = list()
                for Parameter in FitsParameters.index :
                    Name = Parameter.split('_')[0]
                    if Name not in Peaks :
                        Peaks.append(Name)

                string = ''
                for Peak in Peaks :
                    if 'assignment' in Info['Fit']['Models'][Peak] :
                        string += Info['Fit']['Models'][Peak]['assignment'] + ' | '
                    else :
                        string += Peak + ' | '
                    for Parameter in FitsParameters.index :
                        if Peak == Parameter.split('_')[0] : 
                            string += Parameter.split('_')[1] + ': ' + str(round(FitsParameters[Column][Parameter],2))
                            string += ', '
                    string = string[:-2] + '\n'
                print(string)
                print(100*'_')
        FitsParameters = FitsParameters.T
        FitsParameters = FitsParameters[np.concatenate((FitsParameters.columns.values[1:],FitsParameters.columns.values[0:1]))]
        
        # Plot 2D Data & Fits
        
        plt.figure(figsize = [8,12])
        
        plt.subplot(2, 1, 1)
        x = Data.index.values
        y = Data.columns.values
        z = np.transpose(Data.values)
        plt.ylabel('Temperature (K)', fontsize=16)
        plt.tick_params(axis = 'both', which = 'major', labelsize = 16)
        plt.title('Data: '+DataName, fontsize=16)
        pcm = plt.pcolor(x, y, z, cmap='jet', shading='auto')
        
        plt.subplot(2, 1, 2)
        x = Fits.index.values
        y = Fits.columns.values
        z = np.transpose(Fits.values)
        plt.xlabel('Wavenumber (cm$^-$$^1$)', fontsize=16)
        plt.ylabel('Temperature (K)', fontsize=16)
        plt.tick_params(axis = 'both', which = 'major', labelsize = 16)
        plt.title('Fits: '+DataName, fontsize=16)
        pcm = plt.pcolor(x, y, z, cmap='jet', shading='auto')
        
        plt.show()
        
        # Plot Trends
        
        UniqueParameters = []
        [UniqueParameters.append(x.split('_')[1]) for x in FitsParameters.columns if x.split('_')[1] not in UniqueParameters][0]
        for uniqueParameter in UniqueParameters :
            fig = go.Figure()
            for parameter in FitsParameters :
                if uniqueParameter in parameter :
                    Name = parameter.split('_')[0]
                    if 'assignment' in Info['Fit']['Models'][Name] :
                        Name = Info['Fit']['Models'][Name]['assignment']
                    fig.add_trace(go.Scatter(x=FitsParameters.index,y=FitsParameters[parameter],name=Name,mode='lines+markers'))
            fig.update_layout(xaxis_title='Temperature (K)',yaxis_title=uniqueParameter,title=DataName,legend_title='',width=800,height=400)
            fig.show()
        
        ##### Store Fits ####
        
        self.Fits = Fits
        self.FitsData = Data
        self.FitsBackground = Background
        self.FitsParameters = FitsParameters
        self.FitsAssignments = FitsAssignments
        
        ##### Widgets #####

        def CopyData_Clicked(b) :
            Data.to_clipboard()
        CopyData = widgets.Button(description="Copy Data")
        CopyData.on_click(CopyData_Clicked)

        def CopyFits_Clicked(b) :
            Fits.to_clipboard()
        CopyFits = widgets.Button(description="Copy Fits")
        CopyFits.on_click(CopyFits_Clicked)

        def CopyParameters_Clicked(b) :
            FitsParameters.to_clipboard()
        CopyParameters = widgets.Button(description="Copy Parameters")
        CopyParameters.on_click(CopyParameters_Clicked)

        def Save2File_Clicked(b) :
            os.makedirs(Folders['Fits'], exist_ok=True)
            FitsFile = Folders['Fits'] +'/' + DataName + '.hdf'
            Data.to_hdf(FitsFile,'Data')
            Fits.to_hdf(FitsFile,'Fits',mode='a')
            FitsParameters.to_hdf(FitsFile,'Fits_Parameters',mode='a')
            FitsAssignments.to_hdf(FitsFile,'Fits_Assignments',mode='a')
        Save2File = widgets.Button(description="Save to File")
        Save2File.on_click(Save2File_Clicked)

        display(widgets.Box([CopyData,CopyFits,CopyParameters,Save2File]))
예제 #14
0
    def FitData(self, Region):

        Data = self.Data
        ErrorBars = self.ErrorBars
        par = self.par

        dt = AnalysisTools.DataTools()

        print(par['Description'])

        Data = dt.TrimData(Data, par['Spectra'][Region]['xRange'])
        ErrorBars = dt.TrimData(ErrorBars, par['Spectra'][Region]['xRange'])

        ##### Peak Assignments #####

        PeakList = list()
        AssignmentList = list()
        for Peak in par['Spectra'][Region]['Models']:
            PeakList.append(Peak)
            if 'assignment' in par['Spectra'][Region]['Models'][Peak]:
                AssignmentList.append(
                    par['Spectra'][Region]['Models'][Peak]['assignment'])
            else:
                AssignmentList.append(Peak)
        FitsAssignments = df(AssignmentList,
                             index=PeakList,
                             columns=['Assignment'])

        ##### Fit Data #####

        print('\nFitting Data...')

        fit = AnalysisTools.FitTools(Data, ErrorBars, par['Spectra'][Region])
        fit.Fit(NumberPoints=501)
        fit.ShowFits()

        Fits = fit.Fits
        FitsParameters = fit.FitsParameters

        ##### Convert FitsComponents to DataFrame #####

        FitsComponents = pd.DataFrame(fit.FitsComponents)
        FitsComponents.index = Data.columns
        for key in FitsComponents:
            FitsComponents = FitsComponents.rename(
                columns={key: str.replace(key, '_', '')})

        print('\nDone fitting data')

        ##### Plot 2D Data & Fits #####

        plt.figure(figsize=[16, 6])

        plt.subplot(1, 2, 1)
        x = Data.index.values
        y = Data.columns.values
        z = np.transpose(Data.values)
        plt.ylabel('Delay (fs)', fontsize=16)
        plt.tick_params(axis='both', which='major', labelsize=16)
        plt.title('Data', fontsize=16)
        pcm = plt.pcolor(x, y, z, cmap='jet', shading='auto')

        plt.subplot(1, 2, 2)
        x = Fits.index.values
        y = Fits.columns.values
        z = np.transpose(Fits.values)
        plt.xlabel('Wavenumber (cm$^-$$^1$)', fontsize=16)
        plt.ylabel('Delay (fs)', fontsize=16)
        plt.tick_params(axis='both', which='major', labelsize=16)
        plt.title('Fits', fontsize=16)
        pcm = plt.pcolor(x, y, z, cmap='jet', shading='auto')

        plt.show()

        ##### Plot Trends #####

        FitsParameters = FitsParameters.T
        UniqueParameters = ('amplitude', 'center', 'sigma')
        for uniqueParameter in UniqueParameters:
            fig = go.Figure()
            for parameter in FitsParameters:
                if uniqueParameter in parameter:
                    Name = parameter.split('_')[0]
                    if 'assignment' in par['Spectra'][Region]['Models'][Name]:
                        Name = par['Spectra'][Region]['Models'][Name][
                            'assignment']
                    fig.add_trace(
                        go.Scatter(x=FitsParameters.index,
                                   y=FitsParameters[parameter],
                                   name=Name,
                                   mode='lines+markers'))
            fig.update_layout(xaxis_title='Delay (fs)',
                              yaxis_title='Fit Value',
                              title=uniqueParameter,
                              legend_title='',
                              width=800,
                              height=400)
            fig.show()

        self.Data = Data
        self.ErrorBars = ErrorBars
        self.Fits = Fits
        self.FitsParameters = FitsParameters
        self.FitsComponents = FitsComponents
        self.FitsAssignments = FitsAssignments
예제 #15
0
import numpy as np
import matplotlib.pyplot as plt
import math
import AnalysisTools as at

## Reading probes into the file
probes = 'caisson2D.csv'

datalist = at.readProbeFile(probes)
probeType = datalist[0]
time = datalist[1]
data = datalist[2]
rz = data[:, 5]

alpha = []
for i in range(0, len(rz)):
    alpha.append(rz[i] * 180 / math.pi)
alpha = np.array(alpha)

it = np.where(time > 2.5)[0][0]
period = at.zeroCrossing(time[:it], alpha[:it], up=False)[0]

period_ref = 0.93
err = 100 * abs(period_ref - period) / abs(period_ref)
val = open('validation_FCBD.txt', 'w')
val.write('Period for the rotation angle' + '\n')
val.write('Theory' + '\t' + 'Simulation' + '\t' + '\t' + 'Error (%)' + '\n')
val.write(str(period_ref) + '\t' + str(period) + '\t' + str(err))
val.close()

plt.plot(time, alpha)
예제 #16
0
import AnalysisTools as AT
import os
import numpy as np

print "Reading generation probes"

T = 1.94
H = 0.025
depth = 1.
L = 5.
folder = "../output"
os.chdir(folder)
dataW = AT.readProbeFile("pressure_gaugeArray.csv")

print dataW[1]
Z = -depth + dataW[1][0][1]

Nwaves = 3

Tend = dataW[2][-1]
Tstart = Tend - Nwaves * T

print Tstart, Tend, Z

Ht = 0
Hi = 0
bf = 1.2

zc = []
for dd in range(0, len(dataW[3][0, :])):
    dat = AT.zeroCrossing(dataW[2],