示例#1
0
    def view_waveform(self, start=0, end=None, precision=50):
        """
        cat: info
        desc: show the waveform of this audio
        args:
            [start: seconds/beats to begin view window. default beginning]
            [end: seconds/beats to end view window. -1 selects end. default end]
            [precision: percent of how detailed the plot should be. default 50]
        """
        start = inpt_validate(start, 'beatsec')
        if (end is None) or (end == "-1"):
            end = self.size_samps()
        else:
            end = inpt_validate(end, 'beatsec')
            if end.samples_value >= self.size_samps():
                end = self.size_samps()
        if end <= start:
            err_mess("End cannot be before or equal to start")
            return
        precision = inpt_validate(precision, 'pcnt', allowed=[5, 10000])

        info_block("Generating waveform at {0}%...".format(precision))

        anlsys = Analysis(self, start=start, end=end)
        frame_len = (end - start) / (precision * 2)
        anlsys.set_frame_lengths(frame_len)

        left = anlsys.arr[:, 0]
        right = anlsys.arr[:, 1]

        anlsys.plot(left, right, fill=True)
示例#2
0
def __lint_file(file_path: str) -> None:
    '''
    ファイルを静的解析、整形します。

    Args:
        file_path (str): 静的解析、整形する`.md`ファイル。

    Raises:
        FileNotFounfError: It does not have the extension `.md`.
    '''
    if os.path.splitext(os.path.basename(file_path))[1] != '.md':
        raise FileNotFoundError('It does not have the extension `.md`.')

    directory = os.path.dirname(file_path)
    save_file_dir = click.prompt('save direcry.', default=directory)
    old_file_name = os.path.splitext(os.path.basename(file_path))[0]
    new_file_name = click.prompt(
        f'save file name.(read file: {old_file_name}.md)',
        default='lint_' + old_file_name)
    analysis = Analysis(save_file_dir, file_path)
    analysis.check_blank_line()
    analysis.check_title()
    analysis.check_header()
    analysis.check_link(vaild_link=True)
    analysis.check_image()
    analysis.export_md(new_file_name)
示例#3
0
def __lint_dir(directory: str) -> None:
    '''
    ディレクトリから`.md`を抽出し、それら全てを静的解析、整形します。

    Args:
        directory (str): `.md`ファイルが存在するディレクトリ。

    Raises:
        FileNotFoundError: `.md` file not found.
    '''
    save_file_dir = click.prompt('save direcry.', default=directory)
    md_set_path = glob(os.path.join(directory, '*.md'))

    for md_file in md_set_path:
        old_file_name = os.path.splitext(os.path.basename(md_file))[0]
        new_file_name = click.prompt(
            f'save file name.(read file: {old_file_name}.md)',
            default='lint_' + old_file_name)
        analysis = Analysis(save_file_dir, md_file)
        analysis.check_blank_line()
        analysis.check_title()
        analysis.check_header()
        analysis.check_link(vaild_link=True)
        analysis.check_image()
        analysis.export_md(new_file_name)
    else:  # pylint: disable=W0120
        raise FileNotFoundError('`.md`file not found.')
示例#4
0
                         out=d.joinpath(step),
                         geo=self.toml_name(self.Steps[i - 1]) if i else None,
                         section=step,
                         cfg='align')
            else:
                warning('geo file already exists!')
        print_elapsed_time(t)

    def recon(self, raw=False):
        """ step 3: based on the alignment generate the tracks with proteus. """
        self.Out.parent.mkdir(exist_ok=True)
        self.run('pt-recon',
                 out=self.Out,
                 cfg=None if raw else self.toml_name())

    # endregion RUN
    # ----------------------------------------


if __name__ == '__main__':
    from src.analysis import Analysis, Dir

    a = Analysis()
    sdir = Path(a.Config.get('SOFTWARE', 'dir')).expanduser().joinpath(
        a.Config.get('SOFTWARE', 'proteus'))
    f_ = a.BeamTest.Path.joinpath('data', f'run{11:06d}.root')
    z = Proteus(sdir, a.BeamTest.Path.joinpath('proteus'),
                Dir.joinpath('proteus'), f_,
                a.Config.getint('align', 'max events'),
                a.Config.getint('align', 'skip events'))
示例#5
0
    # Open --------------------------------------------------------------------------------------------------------
    if args.open:
        pkg = open_pkl(args.open)
        # set vd
        vd = {'pkg': 'Content in .pkl'}

        args.shell = True

    # Analyze ------------------------------------------------------------------------------------------------------
    if args.analyze:
        from src.analysis import Analysis

        if args.analyze == 'model':
            # This analyze the training results stored with each model in the .pkl
            if args.data:
                analysis = Analysis(p_data=args.data)
            else:
                analysis = Analysis(p_data=_d_model_)
            analysis.use_f1 = args.use_f1
            data = analysis.compile_batch_train_results()

            vd = {'data': 'Compiled training data (Pandas dataframe)'}

        if args.analyze == 'precision' and args.pred_data and args.pred_data is not 'param':
            # Generate a series of predictions with incremental rounding cutoff (greater precision)
            Analysis().step_precisions(d_out=args.out,
                                       model=m,
                                       data=m.datas[-1],
                                       predictions=res['pred'],
                                       evaluations=res['eval'])
示例#6
0
import src.persistence.article_service as articles_service
from kafka import KafkaConsumer
import src.config as config
import src.constants as constants
from json import loads
from src.producer import Producer
import src.api.service as api
from threading import Thread
from src.analysis import Analysis

TFIDF_TOPIC = 'tfidf-input'
UNIQUE_TOPIC = 'unique-articles-input'
analysis = Analysis()

unique_consumer = KafkaConsumer(
    UNIQUE_TOPIC,
    bootstrap_servers=[
        config.CONNECTION['host'] + ':' + config.CONNECTION['port']
    ],
    auto_offset_reset='earliest',
    enable_auto_commit=True,
    group_id='articles_consumer',
    value_deserializer=lambda x: loads(x.decode(constants.UTF_ENCODING)))

tfidf_producer = Producer(TFIDF_TOPIC)


def format_message(message):
    message['title'] = ' '.join(message['title'])
    message['text'] = ' '.join(message['text'])
    return message
示例#7
0
    # Open --------------------------------------------------------------------------------------------------------
    if args.open:
        pkg = open_pkl(args.open)
        # set vd
        vd = {'pkg': 'Content in .pkl'}

        args.shell = True

    # Analyze ------------------------------------------------------------------------------------------------------
    if args.analyze:
        from src.analysis import Analysis

        if args.analyze == 'model':
            # This analyze the training results stored with each model in the .pkl
            if args.data:
                analysis = Analysis(p_data=args.data)
            else:
                analysis = Analysis(p_data=_d_model_)
            analysis.use_f1 = args.use_f1
            data = analysis.compile_batch_train_results()

            vd = {'data': 'Compiled training data (Pandas dataframe)'}

    # Enable interaction \______________________________________________________________________________________________
    if args.shell:
        interact(var_desc=vd, local=locals())

    # For testing new code \____________________________________________________________________________________________
    if args.test:
        d = Dummy().init_networks()
        print('  Generated pure network: %s' %
            'lepton': 'muon',
            'label': r'$W^+\rightarrow\mu\nu$',
        },
        'wplustaunu': {
            'data_path':
            '/data/atlas/HighMassDrellYan/test_mc16a/wplustaunu/*.root',
            'cutfile_path': '../options/jesal_cutflow/DY_peak.txt',
            'lepton': 'tau',
            'label': r'$W^+\rightarrow\tau\nu\rightarrow\mu\nu$',
        }
    }

    analysis = Analysis(datasets,
                        'mutau_compare',
                        log_level=10,
                        log_out='both',
                        timedatelog=False,
                        year='2015+2016',
                        force_rebuild=False,
                        TTree_name='nominal_Loose')

    analysis.merge_datasets("wminmunu", "wminmunu_hm", verify=True)
    analysis.merge_datasets("wmintaunu", "wmintaunu_hm", verify=True)
    analysis.merge_datasets("wplusmunu", "wplusmunu_hm", verify=True)
    analysis.merge_datasets("wplustaunu", "wplustaunu_hm", verify=True)

    # normalised
    analysis.plot_hist(['wminmunu', 'wmintaunu'],
                       'met_met',
                       weight='reco_weight',
                       title='reco 139fb$^{-1}$',
                       bins=(30, 150, 5000),
示例#9
0
        #     'datapath': '/data/atlas/HighMassDrellYan/test_mc16a/zzllnunu/*.root',
        #     'cutfile': 'options/jesal_cutflow/cutfile_jesal.txt',
        #     'TTree_name': 'nominal_Loose',
        #     'is_slices': False,
        # },
        # 'zzqqll': {
        #     'datapath': '/data/atlas/HighMassDrellYan/test_mc16a/zzqqll/*.root',
        #     'cutfile': 'options/jesal_cutflow/cutfile_jesal.txt',
        #     'TTree_name': 'nominal_Loose',
        #     'is_slices': False,
        # },
    }

    my_analysis = Analysis(datasets,
                           analysis_label='jesal_cutflow',
                           force_rebuild=False,
                           log_level=10,
                           log_out='console')

    my_analysis.plot_1d(x='mu_mt',
                        bins=(50, 120, 4000),
                        title=r'$W\rightarrow\tau\nu$ (13 TeV)',
                        scaling='xs',
                        log_y=True)
    # pipeline
    # my_analysis.plot_mass_slices(ds_name='wmintaunu_slices', xvar='MC_WZ_m',
    #                              inclusive_dataset='wmintaunu_inclusive', logx=True, to_pkl=True)
    # my_analysis.plot_mass_slices(ds_name='wplustaunu_slices', xvar='MC_WZ_m',
    #                              inclusive_dataset='wplustaunu_inclusive', logx=True, to_pkl=True)
    # my_analysis.plot_mass_slices(ds_name='wminmunu_slices', xvar='MC_WZ_m',
    #                              inclusive_dataset='wminmunu_inclusive', logx=True, to_pkl=True)
示例#10
0
    def DoSignalHeightScan(self, heights=None, hits_per_height=300000):
        gc.disable()
        starttime = datetime.today()

        # ROOT Logfile:
        # path = "MC/Performance_Results/"+str(starttime)
        path = "MC/Performance_Results/_" + str(
            self.minimum_statistics) + "_" + str(
                self.binning) + "_" + str(hits_per_height) + "_"
        os.makedirs(path)
        rootfile = TFile(path + '/MCPerformanceLog.root', 'RECREATE')
        LogTree = TTree('LogTree', 'MC Log Tree')
        RealSignalAmplitude = array('f', [0])
        Repetition = array('i', [0])
        TrueNPeaks = array('i', [0])
        Ninjas = array('i', [0])
        Ghosts = array('i', [0])
        Minimas = array('i', [0])
        RecSA_Quantiles = array('f', [0])
        RecSA_MinMax = array('f', [0])

        LogTree.Branch('RealSignalAmplitude', RealSignalAmplitude,
                       'RealSignalAmplitude/F')
        LogTree.Branch('Repetition', Repetition, 'Repetition/I')
        LogTree.Branch('TrueNPeaks', TrueNPeaks, 'TrueNPeaks/I')
        LogTree.Branch('Ninjas', Ninjas, 'Ninjas/I')
        LogTree.Branch('Ghosts', Ghosts, 'Ghosts/I')
        LogTree.Branch('Minimas', Minimas, 'Minimas/I')
        LogTree.Branch('RecSA_Quantiles', RecSA_Quantiles, 'RecSA_Quantiles/F')
        LogTree.Branch('RecSA_MinMax', RecSA_MinMax, 'RecSA_MinMax/F')

        # copy Config files:
        shutil.copy("Configuration/MonteCarloConfig.cfg",
                    path + "/MonteCarloConfig.cfg")
        shutil.copy("Configuration/AnalysisConfig.cfg",
                    path + "/AnalysisConfig.cfg")

        if heights == None:
            heights = [
                0.0, 0.05, 0.08, 0.1, 0.125, 0.15, 0.175, 0.2, 0.3, 0.5, 0.8,
                1.0
            ]

        # infofile:
        infofile = open(path + "/info.txt", "w")
        infofile.write("DoSignalHeightScan\n\n")
        infofile.write("Timestamp: " + str(starttime) + "\n\n")
        infofile.write("Number of Repetitions for each Amplitude: " +
                       str(self.tries) + "\n")
        infofile.write("Number of different Amplitudes:           " +
                       str(len(heights)) + "\n")
        infofile.write("Hits per Amplitude:                       " +
                       str(hits_per_height) + "\n")
        infofile.write("Quantiles:                                " +
                       str(self.min_percent) + "/" + str(self.max_percent) +
                       "\n")
        infofile.write("Binning:                                  " +
                       str(self.binning) + "\n")
        infofile.write("Minimum Statistics:                       " +
                       str(self.minimum_statistics) + "\n")
        infofile.write("Extrema Configuration:                    " +
                       self.extremaconfiguration)

        success_prob = []
        ghost_prob = []
        cycle_nr = 0
        cycles = self.tries * len(heights)
        for height in heights:  # add more statistics for each height, not just one try..
            fails = 0
            tot_ghosts = 0
            peaks_generated = 0
            for repetition in range(self.tries):
                cycle_nr += 1
                print("\n{0}th repetition with Signal height set to: {1}\n".
                      format(repetition, height))
                run_object = MCRun(validate=False,
                                   verbose=self.verbose,
                                   run_number=364)
                run_object.MCAttributes['PeakHeight'] = height
                run_object.SetNumberOfHits(hits_per_height)
                print("newAnalysis = Analysis(run_object)")
                newAnalysis = Analysis(run_object, verbose=self.verbose)
                print("newAnalysis.FindMaxima()")
                newAnalysis.FindMaxima(
                    binning=self.binning,
                    minimum_bincontent=self.minimum_statistics)
                print("newAnalysis.FindMinima()")
                newAnalysis.FindMinima(
                    binning=self.binning,
                    minimum_bincontent=self.minimum_statistics)
                npeaks = newAnalysis.ExtremeAnalysis.ExtremaResults[
                    'TrueNPeaks']
                ninjas = newAnalysis.ExtremeAnalysis.ExtremaResults['Ninjas']
                ghosts = newAnalysis.ExtremeAnalysis.ExtremaResults['Ghosts']
                maxima = newAnalysis.ExtremeAnalysis.ExtremaResults[
                    'FoundMaxima']
                minima = newAnalysis.ExtremeAnalysis.ExtremaResults[
                    'FoundMinima']
                # Reconstruct Signal Amplitude:
                if len(maxima) * len(minima) > 0:
                    maxbin = newAnalysis.ExtremeAnalysis.Pad.GetBinByCoordinates(
                        *(maxima[0]))
                    maxbin.FitLandau()
                    minbin = newAnalysis.ExtremeAnalysis.Pad.GetBinByCoordinates(
                        *(minima[0]))
                    minbin.FitLandau()
                    rec_sa_minmax = maxbin.Fit['MPV'] / minbin.Fit['MPV'] - 1.
                else:
                    rec_sa_minmax = -99
                q = array('d', [
                    1. * self.min_percent / 100., 1. * self.max_percent / 100.
                ])
                y = array('d', [0, 0])
                newAnalysis.ExtremeAnalysis.CreateMeanSignalHistogram()
                newAnalysis.ExtremeAnalysis.MeanSignalHisto.GetQuantiles(
                    2, y, q)
                rec_sa_quantiles = y[1] / y[0] - 1.

                # Fill ROOT file:
                RealSignalAmplitude[0] = height
                Repetition[0] = repetition
                TrueNPeaks[0] = npeaks
                Ninjas[0] = ninjas
                Ghosts[0] = ghosts
                Minimas[0] = len(minima)
                RecSA_Quantiles[0] = rec_sa_quantiles
                RecSA_MinMax[0] = rec_sa_minmax
                LogTree.Fill()

                assert (npeaks > 0), 'no peak in MC created'
                peaks_generated += npeaks
                fails += ninjas
                tot_ghosts += ghosts
                # self.AddAnalysis(newAnalysis)
                del newAnalysis
                del run_object
                elapsed_time = datetime.today() - starttime
                estimated_time = elapsed_time / cycle_nr * cycles
                remaining_time = estimated_time - elapsed_time
                print("\n\nAPPROXIMATED TIME LEFT: " + str(remaining_time) +
                      "\n")
            success = 1. * (peaks_generated - fails) / peaks_generated
            ghost = 4. * ghosts / self.tries
            success_prob.append(success)
            ghost_prob.append(ghost)

        print("Write ROOT-file")
        rootfile.Write()
        rootfile.Close()
        print("ROOT File written. Write infofile")
        infofile.write("\nTotal Time elapsed: " +
                       str(datetime.today() - starttime))
        infofile.close()
        print("infofile written")

        # canvas = ROOT.TCanvas('canvas', 'canvas') # HERE IT CRASHES DUE TO MEMORY PROBLEMS
        # canvas.cd()
        # graph1 = ROOT.TGraph()
        # graph1.SetNameTitle('graph1', 'success')
        # graph1.SaveAs(path+"/SuccessGraph.root")
        # graph2 = ROOT.TGraph()
        # graph2.SetNameTitle('graph2', 'ghosts')
        # graph2.SaveAs(path+"/GhostsGraph.root")
        # for i in range(len(heights)):
        #     graph1.SetPoint(i, heights[i], success_prob[i])
        #     graph2.SetPoint(i, heights[i], ghost_prob[i])
        # graph1.Draw('ALP*')
        # graph2.Draw('SAME LP*')
        # self.SavePlots("PerformanceResult", "png", path+"/")
        answer = input('Wanna crash?')
        ROOT.gDirectory.GetList().ls()
        ROOT.gROOT.GetListOfFiles().ls()
        if answer == 'yes':
            gc.collect()
示例#11
0
        'wmintaunu': {
            'data_path': '../data/test_mc16a_wmintaunu/*/*.root',
            # 'data_path': '/data/atlas/HighMassDrellYan/test_mc16a/wmintaunu_*/*.root',
            'cutfile_path': '../options/cutfile_EXAMPLE.txt',
            'TTree_name': 'truth',
            'year': '2015+2016',
            'hard_cut': r'Muon $|#eta|$',
            'lepton': 'tau',
            'label': r'$W^-\rightarrow\tau\nu\rightarrow\mu\nu$',
        }
    }

    my_analysis = Analysis(datasets,
                           analysis_label='test_analysis',
                           force_rebuild=False,
                           log_level=10,
                           log_out='both',
                           timedatelog=False,
                           separate_loggers=False)
    my_analysis.print_latex_table(['wminmunu', 'wmintaunu'])

    # my_analysis.apply_cuts()
    # my_analysis.merge_datasets('wminmunu', 'wmintaunu', apply_cuts=r'Muon $|#eta|$')

    my_analysis.plot_hist(['wminmunu', 'wmintaunu'],
                          'MC_WZmu_el_eta_born',
                          bins=(30, -5, 5),
                          weight='truth_weight',
                          normalise='lumi',
                          lepton='muon',
                          yerr='rsumw2',
示例#12
0
        },
        'wplustaunu': {
            'data_path':
            '/data/atlas/HighMassDrellYan/mc16a/wplustaunu/*.root',
            'cutfile_path': '../options/joanna_cutflow/DY_peak.txt',
            'lepton': 'tau',
            'label': r'$W^+\rightarrow\tau\nu\rightarrow\mu\nu$',
        }
    }

    analysis = Analysis(
        datasets,
        'mutau_compare_full',
        data_dir='/data/keanu/framework_outputs/mutau_compare_full/',
        log_level=10,
        log_out='both',
        timedatelog=True,
        year='2015+2016',
        force_rebuild=False,
        TTree_name='truth',
        hard_cut='M_W')

    analysis.print_latex_table()

    analysis.merge_datasets("wminmunu", "wminmunu_hm")
    analysis.merge_datasets("wmintaunu", "wmintaunu_hm")
    analysis.merge_datasets("wplusmunu", "wplusmunu_hm")
    analysis.merge_datasets("wplustaunu", "wplustaunu_hm")

    # =========================
    # ===== TRUTH - UNCUT =====