Пример #1
0
    def add_metric(self, labels, buckets, sum_value, timestamp=None):
        """Add a metric to the metric family.

        Args:
          labels: A list of label values
          buckets: A list of lists.
              Each inner list can be a pair of bucket name and value,
              or a triple of bucket name, value, and exemplar.
              The buckets must be sorted, and +Inf present.
          sum_value: The sum value of the metric.
        """
        for b in buckets:
            bucket, value = b[:2]
            exemplar = None
            if len(b) == 3:
                exemplar = b[2]
            self.samples.append(Sample(
                self.name + '_bucket',
                dict(list(zip(self._labelnames, labels)) + [('le', bucket)]),
                value,
                timestamp,
                exemplar,
            ))
        # +Inf is last and provides the count value.
        self.samples.extend([
            Sample(self.name + '_count', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp),
            Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp),
        ])
    def getrate(self, usebkg=True):
        if self.productionmode != "ggH" and not usebkg: return 0.00001
        return self.yamlrate() * self.scalefactor()
        if self.productionmode == "ZX" and self.production in ("160725", "160729"):
            if self.channel == "4e":    return 2.39 * float(self.luminosity)/12.9
            if self.channel == "4mu":   return 3.66 * float(self.luminosity)/12.9
            if self.channel == "2e2mu": return 6.29 * float(self.luminosity)/12.9
        if self.productionmode == "ZX" and self.production == "160720":
            if self.channel == "4e":    return 1.36 * float(self.luminosity)/7.65
            if self.channel == "4mu":   return 1.64 * float(self.luminosity)/7.65
            if self.channel == "2e2mu": return 2.81 * float(self.luminosity)/7.65
        if self.productionmode == "ZX" and self.production == "160225": #email from Simon, Feb 9 at 4:56 PM, "inputs for the cards"
            if self.channel == "4e":    return (0.311745 + 0.0106453) * float(self.luminosity)/2.8
            if self.channel == "4mu":   return 0.408547 * float(self.luminosity)/2.8
            if self.channel == "2e2mu": return (0.716686 + 0.0199815) * float(self.luminosity)/2.8
        if self.productionmode == "ZX":
            assert False

        if self.productionmode == "ggH":
            result = Template("fa3", self.productionmode, self.channel, "0+", self.production).gettemplate().Integral()*float(self.luminosity)
            for productionmode in "VBF", "WplusH", "WminusH", "ZH", "ttH":
                sample = Sample(productionmode, "0+", self.production)
                f = tfiles[sample.withdiscriminantsfile()]
                t = f.candTree
                ZZFlav = self.channel.ZZFlav
                additionalxsec = 0
                for event in t:
                    if config.m4lmin < t.ZZMass < config.m4lmax and t.Z1Flav*t.Z2Flav == ZZFlav:
                        additionalxsec += getattr(t, sample.weightname())
                result += additionalxsec * float(self.luminosity)
            return result

        result = Template("fa3", self.productionmode, self.channel, self.production).gettemplate().Integral()*float(self.luminosity)
        return result
Пример #3
0
 def __init__(self, name='New Core', plans=[], properties={}):
     self.name = name
     self.runs = set(plans)
     self.runs.add('input')
     self.properties = Sample()
     self.properties.update(properties)
     self.loaded = False
     super(Core, self).__init__([])
Пример #4
0
    def add_metric(self, labels, count_value, sum_value, timestamp=None):
        """Add a metric to the metric family.

        Args:
          labels: A list of label values
          count_value: The count value of the metric.
          sum_value: The sum value of the metric.
        """
        self.samples.append(Sample(self.name + '_count', dict(zip(self._labelnames, labels)), count_value, timestamp))
        self.samples.append(Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp))
Пример #5
0
    def add_metric(self, labels, value, created=None, timestamp=None):
        """Add a metric to the metric family.

        Args:
          labels: A list of label values
          value: The value of the metric
          created: Optional unix timestamp the child was created at.
        """
        self.samples.append(Sample(self.name + '_total', dict(zip(self._labelnames, labels)), value, timestamp))
        if created is not None:
            self.samples.append(Sample(self.name + '_created', dict(zip(self._labelnames, labels)), created, timestamp))
Пример #6
0
 def forcesample(self, depth):
     try:
         return self[depth]
     except KeyError:
         s = Sample(exp_data={'depth': depth})
         self.add(s)
         return s
Пример #7
0
 def add_metric(self, labels, value, timestamp=None):
     """Add a metric to the metric family.
     Args:
     labels: A list of label values
     value: The value of the metric.
     """
     self.samples.append(Sample(self.name, dict(zip(self._labelnames, labels)), value, timestamp))
Пример #8
0
    def add_metric(self, labels, buckets, gsum_value, timestamp=None):
        """Add a metric to the metric family.

        Args:
          labels: A list of label values
          buckets: A list of pairs of bucket names and values.
              The buckets must be sorted, and +Inf present.
          gsum_value: The sum value of the metric.
        """
        for bucket, value in buckets:
            self.samples.append(Sample(
                self.name + '_bucket',
                dict(list(zip(self._labelnames, labels)) + [('le', bucket)]),
                value, timestamp))
        # +Inf is last and provides the count value.
        self.samples.extend([
            Sample(self.name + '_gcount', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp),
            Sample(self.name + '_gsum', dict(zip(self._labelnames, labels)), gsum_value, timestamp),
        ])
Пример #9
0
 def passescut(self, t):
     if self.subtracttree is None:
         from samples import Sample
         self.subtracttree = tfiles[Sample(
             "data", "unblind",
             str(self).replace("subtract",
                               "")).withdiscriminantsfile()].candTree
     run, event, lumi = t.RunNumber, t.EventNumber, t.LumiNumber
     for t2 in self.subtracttree:
         if (run, event, lumi) == (t2.RunNumber, t2.EventNumber,
                                   t2.LumiNumber):
             return False
     return True
Пример #10
0
    def add_metric(self, labels, value, timestamp=None):
        """Add a metric to the metric family.

        Args:
          labels: A list of label values
          value: A dict of labels
        """
        self.samples.append(Sample(
            self.name + '_info',
            dict(dict(zip(self._labelnames, labels)), **value),
            1,
            timestamp,
        ))
    def handleForce(self, querySt):
        time = querySt[0]
        step = querySt[1]
        pf = (querySt[2],querySt[3],querySt[4])
        vf = (querySt[5],querySt[6],querySt[7])
        pm = (querySt[8],querySt[9],querySt[10])
        vm = (querySt[11],querySt[12],querySt[13])
        sample = Sample(time, step, ForceData(pf, vf, pm, vm))
        logging.debug("Collector- Handling force value. step=%d - time=%.3f - pf=%s - ..."
                      % (step, time, pf))

        with self.context.lock:
            self.samples.addForce(sample)
Пример #12
0
def _parse_sample(text):
    # Detect the labels in the text
    try:
        label_start, label_end = text.index("{"), text.rindex("}")
        # The name is before the labels
        name = text[:label_start].strip()
        # We ignore the starting curly brace
        label = text[label_start + 1:label_end]
        # The value is after the label end (ignoring curly brace and space)
        value = float(_parse_value(text[label_end + 2:]))
        return Sample(name, _parse_labels(label), value)

    # We don't have labels
    except ValueError:
        # Detect what separator is used
        separator = " "
        if separator not in text:
            separator = "\t"
        name_end = text.index(separator)
        name = text[:name_end]
        # The value is after the name
        value = float(_parse_value(text[name_end:]))
        return Sample(name, {}, value)
Пример #13
0
 def build_metric(name, documentation, typ, samples):
     # Munge counters into OpenMetrics representation
     # used internally.
     if typ == 'counter':
         if name.endswith('_total'):
             name = name[:-6]
         else:
             new_samples = []
             for s in samples:
                 new_samples.append(Sample(s[0] + '_total', *s[1:]))
                 samples = new_samples
     metric = Metric(name, documentation, typ)
     metric.samples = samples
     return metric
Пример #14
0
    def add_metric(self, labels, value, timestamp=None):
        """Add a metric to the metric family.

        Args:
          labels: A list of label values
          value: A dict of string state names to booleans
        """
        labels = tuple(labels)
        for state, enabled in sorted(value.items()):
            v = (1 if enabled else 0)
            self.samples.append(Sample(
                self.name,
                dict(zip(self._labelnames + (self.name,), labels + (state,))),
                v,
                timestamp,
            ))
Пример #15
0
def predict(queue, stack, model, vocab, tree, max_edus, top_ind_in_queue, actions=None):
    state = get_state(stack, [top_ind_in_queue] if queue else [])
    sample = Sample(state=state, tree=tree)
    _, x_vecs = add_features_per_sample(sample, vocab, max_edus)
    x = np.array(x_vecs).reshape(1, -1)
    action, alter_action = actions if actions else model.predict(x)

    # correct invalid action
    if len(stack) < 2 and action != 'SHIFT':
        action = 'SHIFT'
    elif not queue and action == 'SHIFT':
        action = alter_action

    if action == 'SHIFT':
        return Transition(action=action, nuclearity=None, relation=None)
    else:
        return Transition(*action.split('-', 2))
    def handleLocation(self, querySt):
        time = querySt[0]
        step = querySt[1]
        value = querySt[2]

        with self.context.lock:
            (variable, position) = self.context.nextVariableAndPosition()

            sample = Sample(time, step, ProbeData(position, variable, value))
            logging.debug("Collector- Handling position value. step=%d - time=%.3f - position=%s - variable=%s - value=%f"
                          % (step, time, position, variable, value))
            self.samples.addProbe(sample)
            expectedSamples = self.context.totalVariablesQty * self.context.totalPositionsQty
            if len(self.samples.currentSamples) == expectedSamples:
                self.samples.currentTimeCompleted = True
                logging.info("Collector - Notifying all samples received for time: %.3f. Qty: %d" % (time, expectedSamples))
                self.context.samplesCond.notify()
Пример #17
0
 def on_response(self, widget, response_id, sampleID, date, layers, materials, magPower, growthTimes, gasses, backgroundPressure, period, gamma, bias, comments, specpath, offspecpath, mappath):
     self.added_sample = Sample(sampleID.get_text(), date.get_text(), layers.get_text(), materials.get_text(), magPower.get_text(), growthTimes.get_text(), gasses.get_text(), backgroundPressure.get_text(), period.get_text(), gamma.get_text(), bias.get_text(), comments.get_text(), specpath.get_text(), offspecpath.get_text(), mappath.get_text())
Пример #18
0
 def makesample(cls, data):
     instance = Sample()
     instance.update(cls._table.loaddictformat(data))
     return instance
Пример #19
0
def main(args):

    # List all the json files in indir
    input_files = [
        filename for filename in sorted(os.listdir(args.indir))
        if '.json' in filename
    ]
    print("\nFound {} json files in {}: ".format(len(input_files), args.indir))
    for fn in input_files:
        print(fn)
    print()

    # Make outdir if it doesn't already exist
    if not os.path.isdir(args.outdir):
        os.mkdir(args.outdir)

    # Get the list of metrics we'll measure
    if args.metrics == 'all':
        metrics = get_all_metrics()  # list of fns
    else:
        metrics = args.metrics.strip().split(',')  # list of strings
        metrics = [
            get_metric_from_name(metric_name) for metric_name in metrics
        ]  # list of fns
    print("\nAnnotating for these metrics:")
    for metric in metrics:
        print('{0:<30}   uses_spacy={1}'.format(metric.__name__,
                                                uses_spacy(metric)))

    # Init some logging stuff
    time_per_metric = TimePerMetric()
    last_logged = None

    for infile_idx, infile in enumerate(input_files):
        print("\nProcessing file {} of {}: {}...".format(
            infile_idx, len(input_files), infile))

        # Check if output filepath already exists.
        # If so, load it. Otherwise, load original json file
        outfile = infile.replace('.json', '.metric_annotated.pkl')
        outpath = os.path.join(args.outdir, outfile)
        if os.path.isfile(outpath):
            print('\nOutput file {} already exists. Loading it...'.format(
                outpath))
            with open(outpath, 'rb') as f:
                sampleid2sample = pickle.load(f)  # int -> Sample
            print('Finished loading.')
        else:
            inpath = os.path.join(args.indir, infile)
            print('\nOutput file {} does not already exist.'.format(outpath))
            print('Loading unannotated stories from {}...'.format(inpath))
            with open(inpath, 'r') as f:
                sampleid2sample = json.load(f)  # str(int) -> dict
                print('Finished loading.')
                sampleid2sample = {
                    int(sample_id): Sample(sample)
                    for sample_id, sample in sampleid2sample.items()
                }  # int -> Sample

        # Load spacy annotations if necessary
        if any([uses_spacy(metric) for metric in metrics]):
            spacy_filepath = os.path.join(
                args.spacydir, infile.replace('.json', '.spacy_annotated.pkl'))
            print('\nLoading spacy annotations from {}...'.format(
                spacy_filepath))
            with open(spacy_filepath, 'rb') as f:
                sampleid2spacy = pickle.load(f)
            print('Finished loading.')

            # Put the spacy annotations in the Samples
            print('\nPutting spacy annotations in the Samples...')
            for sample_id, sample in sampleid2sample.items():
                if int(sample_id) not in sampleid2spacy:
                    raise Exception(
                        'sample_id {} does not have a spacy annotation in {}'.
                        format(sample_id, spacy_filepath))
                (spacy_annotated_prompt,
                 spacy_annotated_story) = sampleid2spacy[sample_id]
                sample.spacy_annotated_prompt = spacy_annotated_prompt
                sample.spacy_annotated_story = spacy_annotated_story
            print('Finished.')

        # Compute the metrics
        for sample_id, sample in sampleid2sample.items():

            # Annotate the sample with the desired metrics.
            # tpm_update is just some logging info about how much time each metric is taking to annotate
            tpm_update = sample.annotate(metrics, args.recompute_metric)
            time_per_metric.update(
                tpm_update)  # keep track of how long each metric is taking

            # Log
            if last_logged is None:  # if you haven't logged at all yet
                last_logged = time.time()  # start the timer now
            if time.time() - last_logged > args.log_every:
                print()
                print("LOGGING:")
                print("Processing file {} of {}".format(
                    infile_idx, len(input_files)))
                print("For this file, processing sample {} of {}".format(
                    sample_id, len(sampleid2sample)))
                time_per_metric.report(
                )  # report how long each metric is taking
                print()
                last_logged = time.time()

        # Write to output file, first removing the spacy annotations, which are too large to include
        for sample in sampleid2sample.values():
            delattr(sample, 'spacy_annotated_prompt')
            delattr(sample, 'spacy_annotated_story')
        print('Writing to {}...'.format(outpath))
        with open(outpath, 'wb') as f:
            pickle.dump(sampleid2sample, f)
        print('Finished writing.')
Пример #20
0
class Core(Collection):
    _tablename = 'cores'

    @classmethod
    def connect(cls, backend):
        cls._table = backend.ctable(cls.tablename())

    #useful notes -- all keys (depths) are converted to millimeter units before
    #being used to reference a Sample value. Keys are still displayed to the
    #user in their selected unit, as those are actually pulled from the sample

    def __init__(self, name='New Core', plans=[], properties={}):
        self.name = name
        self.runs = set(plans)
        self.runs.add('input')
        self.properties = Sample()
        self.properties.update(properties)
        self.loaded = False
        super(Core, self).__init__([])

    def _dbkey(self, key):
        try:
            key = key.rescale('mm')
        except AttributeError:
            key = key
        return float(key)

    def _unitkey(self, depth):
        try:
            return float(depth.rescale('mm').magnitude)
        except AttributeError:
            return float(depth)

    @classmethod
    def makesample(cls, data):
        instance = Sample()
        instance.update(cls._table.loaddictformat(data))
        return instance

    def saveitem(self, key, value):
        return (self._dbkey(key), self._table.formatsavedict(value))

    def new_computation(self, cplan):
        """
        Add a new computation plan to this core, and return a VirtualCore
        with the requested plan set.
        """
        run = Run(cplan)
        self.runs.add(run.name)
        vc = VirtualCore(self, run.name)
        #convenience for this specific case -- the run is still in-creation,
        #so we need to keep the object around until it's done.
        vc.partial_run = run
        return vc

    @property
    def vruns(self):
        return [run for run in self.runs if run != 'input']

    def virtualize(self):
        """
        Returns a full set of virtual cores applicable to this Core
        This is currently returned as a list, sorted by run name.
        """
        if len(self.runs) == 1:
            #return input as its own critter iff it's the only plan in this core
            return [VirtualCore(self, 'input')]
        else:
            cores = []
            for run in sorted(self.runs):
                if run == 'input':
                    continue
                cores.append(VirtualCore(self, run))
            return cores

    def __getitem__(self, key):
        if key == 'all':
            print "Warning: use of 'all' key is deprecated. Use core.properties instead"
            return self.properties
        return self._data[self._unitkey(key)]

    def __setitem__(self, depth, sample):
        if depth == 'all':
            print "Warning: use of 'all' key is deprecated. Use core.properties instead"
            self.properties = sample
            return
        super(Core, self).__setitem__(self._unitkey(depth), sample)
        try:
            self.runs.update(sample.keys())
        except AttributeError:
            #not actually a run, just some background
            pass

    def add(self, sample):
        sample['input']['core'] = self.name
        self[sample['input']['depth']] = sample

    def forcesample(self, depth):
        try:
            return self[depth]
        except KeyError:
            s = Sample(exp_data={'depth': depth})
            self.add(s)
            return s

    def force_load(self):
        #not my favorite hack, but wevs.
        if not self.loaded:
            for sample in self:
                pass

    def __iter__(self):
        #if I'm getting all the keys, I'm going to want the values too, so
        #I might as well pull everything. Whee!
        if self.loaded:
            for key in self._data:
                yield key
        else:
            for key, value in self._table.iter_core_samples(self):
                if key == 'all':
                    #if we've got a core that used to have data in 'all', we want
                    #to put that data nicely in properties for great justice on
                    #load (should only happen on first load...)
                    sam = self.makesample(value)
                    #since it's not a "normal" sample anymore, it doesn't need
                    #depth and core, and life will be easier without them...
                    try:
                        del sam['input']['depth']
                        del sam['input']['core']
                    except KeyError:
                        pass
                    self.properties = sam
                    continue  #not actually part of our iteration, lulz
                numeric = UncertainQuantity(key, 'mm')
                self._data[self._unitkey(numeric)] = self.makesample(value)
                yield numeric
            self.loaded = True
Пример #21
0
    def __init__(self):
        self.samplelist = []
        with open('samplelist.csv', 'r') as file:
            reader = csv.reader(file)
            i = 0
            for row in reader:
                if i == 0:
                    print(f'Column names are {", ".join(row)}')
                    print("hi")
                    i = i + 1
                else:
                    print(row[0])
                    newSample = Sample(row[0], row[1], row[2], row[3], row[4],
                                       row[5], row[6], row[7], row[8], row[9],
                                       row[10], row[11], row[12], row[13],
                                       row[14])  #SampleID, Date, BG pressure
                    self.samplelist.append(newSample)

        Gtk.Window.__init__(self, title="Header Bar")
        self.set_border_width(10)

        self.sampleamount = 1

        header_bar = Gtk.HeaderBar()
        header_bar.set_show_close_button(True)
        header_bar.props.title = "KARIN 2.0 (early alpha 0.10))"
        self.set_titlebar(header_bar)

        #Audio button on the right
        menu_button = Gtk.MenuButton()
        cd_icon = Gio.ThemedIcon(
            name="open-menu-symbolic")  #document-open-symbolic
        image = Gtk.Image.new_from_gicon(cd_icon, Gtk.IconSize.BUTTON)
        menu_button.add(image)
        menumodel = Gio.Menu()
        menumodel.append("New Sample", "win.newsample")
        #menumodel.append("Sample Database", "win.sampledatabase")
        menumodel.append("About", "win.about")
        #    submenu = Gio.Menu()
        #    submenu.append("Quit", "app.quit")
        #    menumodel.append_submenu("Other", submenu)
        menu_button.set_menu_model(menumodel)

        newsample_action = Gio.SimpleAction.new("newsample", None)
        newsample_action.connect("activate", self.newsample)
        self.add_action(newsample_action)
        #sampledatabase_action = Gio.SimpleAction.new("sampledatabase", None)
        #sampledatabase_action.connect("activate", self.sampledatabase)
        #self.add_action(sampledatabase_action)
        about_action = Gio.SimpleAction.new("about", None)
        about_action.connect("activate", self.about_callback)
        self.add_action(about_action)

        header_bar.pack_end(menu_button)

        box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
        Gtk.StyleContext.add_class(box.get_style_context(), "linked")

        openbutton = Gtk.Button()
        openbutton.add(Gtk.Label("Sample DB"))
        openbutton.connect("clicked", self.sampledatabase)
        box.add(openbutton)

        open_extend = Gtk.Button()
        open_extend.add(Gtk.Arrow(Gtk.ArrowType.DOWN, Gtk.ShadowType.NONE))
        open_extend.connect("clicked", self.button_clicked)
        box.add(open_extend)

        header_bar.pack_start(box)

        self.notebook = Gtk.Notebook()
        self.add(self.notebook)
        #first page
        self.page1 = Gtk.Box()
        self.page1.set_border_width(10)

        main_area = Gtk.Stack()
        main_area.set_transition_type(Gtk.StackTransitionType.SLIDE_LEFT_RIGHT)
        main_area.set_transition_duration(540)

        specularlabel = Gtk.Label(
            "Options for specular data will be shown here")

        main_area.add_titled(specularlabel, "specularlabel", "Specular")

        offspeclabel = Gtk.Label(
            "Options for off-specular data will be shown here")
        main_area.add_titled(offspeclabel, "offspecularlabel", "Off-specular")

        stack_switcher = Gtk.StackSwitcher()
        stack_switcher.set_stack(main_area)

        self.hbox = Gtk.Box(spacing=10)
        self.hbox.set_homogeneous(
            False)  #False -> all children do not get equal space
        self.vbox_left = Gtk.Box(orientation=Gtk.Orientation.VERTICAL,
                                 spacing=20)
        self.vbox_left.set_homogeneous(False)
        self.vbox_right = Gtk.Box(orientation=Gtk.Orientation.VERTICAL,
                                  spacing=20)
        self.vbox_right.set_homogeneous(False)

        self.hbox.pack_start(self.vbox_left, True, True, 0)
        self.hbox.pack_start(self.vbox_right, True, True, 0)

        label = Gtk.Label("This is a plain label")
        label.set_justify(Gtk.Justification.LEFT)
        self.vbox_left.pack_start(stack_switcher, False, True, 0)
        self.vbox_left.pack_start(main_area, True, True, 0)

        # Line wrap
        self.page1.add(self.hbox)
        self.f = plt.figure()
        self.canvas = FigureCanvas(self.f)  # a Gtk.DrawingArea
        self.vbox_right.pack_start(self.canvas, False, True, 0)
        self.ax = self.f.add_subplot(111)
        plt.xlabel('Grazing incidence angle θ (°)')
        plt.ylabel('Intensity (arb. u)')
        plt.yscale('log')
        #
        self.canvas.set_size_request(1200, 800)
        #self.draw_plot()
        #      self.page1.add(vbox_right)

        self.notebook.append_page(self.page1, Gtk.Label('X-Ray reflectivity'))

        self.page3 = Gtk.Box()
        self.page3.set_border_width(10)
        self.page3.add(Gtk.Label('Hey there durr durr I am page three'))
        icon = Gtk.Image.new_from_icon_name("open-menu-symbolic",
                                            Gtk.IconSize.MENU)
        self.notebook.append_page(self.page3, Gtk.Label('2D Maps'))

        #second page
        self.page4 = Gtk.Grid()
        #self.page3.set_row_homogeneous(True)
        self.page4.set_column_homogeneous(False)
        self.page4.set_border_width(10)
        self.button = Gtk.Button(label="Simulate")
        self.button.connect("clicked", self.button_clicked)
        self.bilayerfield = Gtk.Entry()
        self.bilayerfield.set_text("10")

        bilayerlabel = Gtk.Label("Bilayers N      ")
        bilayerlabel.set_alignment(1, 0.5)

        self.interfacewidthfield = Gtk.Entry()
        self.interfacewidthfield.set_text("4.5")
        interfacewidthlabel = Gtk.Label("Interface Width (Å)      ")
        interfacewidthlabel.set_alignment(1, 0.5)

        self.lattercorlengthfield = Gtk.Entry()
        self.lattercorlengthfield.set_text("200")
        lattercorrlengthlabel = Gtk.Label(
            "Latteral correlation length (Å)      ")
        lattercorrlengthlabel.set_alignment(1, 0.5)

        self.crosscorlengthfield = Gtk.Entry()
        self.crosscorlengthfield.set_text("10000")
        crosscorrlengthlabel = Gtk.Label("Cross correlation length (Å)      ")
        crosscorrlengthlabel.set_alignment(1, 0.5)

        self.page4.attach(bilayerlabel, 1, 1, 1, 1)
        self.page4.attach(self.bilayerfield, 2, 1, 1, 1)
        self.page4.attach(interfacewidthlabel, 1, 2, 1, 1)
        self.page4.attach(self.interfacewidthfield, 2, 2, 1, 1)

        self.page4.attach(lattercorrlengthlabel, 1, 3, 1, 1)
        self.page4.attach(self.lattercorlengthfield, 2, 3, 1, 1)
        self.page4.attach(crosscorrlengthlabel, 1, 4, 1, 1)
        self.page4.attach(self.crosscorlengthfield, 2, 4, 1, 1)

        self.simulatebutton = Gtk.Button(label="Simulate")
        self.simulatebutton.set_vexpand(False)
        self.simulatebutton.set_hexpand(False)
        self.simulatebutton.connect("clicked", self.button_clicked)
        self.page4.attach(self.simulatebutton, 2, 5, 1, 1)

        self.spinner = Gtk.Spinner()
        self.spinner.start()
        self.spinner.stop()
        self.page4.attach(self.spinner, 2, 6, 1, 1)

        fig = plt.figure()
        plt.yscale("log")
        plt.draw()
        ax = fig.add_subplot(111)

        baplotcanvas = FigureCanvas(fig)  # a Gtk.DrawingArea
        baplotcanvas.set_size_request(1100, 800)
        self.page4.attach(baplotcanvas, 3, 1, 5, 400)

        self.notebook.append_page(self.page4, Gtk.Label("BornAgain"))
Пример #22
0
 def reweightfrom(self):
     from samples import Sample
     if self.productionmode == "ggH":
         if self.analysis in ("fa2", "fa3"):
             result = [
                 Sample(self.production, "ggH", "0+"),
                 Sample(self.production, "ggH", "a2"),
                 Sample(self.production, "ggH", "0-"),
                 Sample(self.production, "ggH", "L1"),
                 Sample(self.production, "ggH", "fa20.5"),
                 Sample(self.production, "ggH", "fa30.5"),
                 #Sample(self.production, "ggH", "fL10.5"),   #NOT fL1 for now
             ]
         if self.analysis == "fL1":
             if self.hypothesis in ("0+", "L1"):
                 result = [
                     Sample(self.production, "ggH", "0+"),
                     Sample(self.production, "ggH", "a2"),
                     Sample(self.production, "ggH", "0-"),
                     Sample(self.production, "ggH", "L1"),
                     Sample(self.production, "ggH", "fa20.5"),
                     Sample(self.production, "ggH", "fa30.5"),
                     #Sample(self.production, "ggH", "fL10.5"),   #NOT fL1 for now
                 ]
             elif self.hypothesis == "fL10.5":
                 result = [
                     #Sample(self.production, "ggH", "0+"),
                     Sample(self.production, "ggH", "a2"),
                     #Sample(self.production, "ggH", "0-"),
                     Sample(self.production, "ggH", "L1"),
                     Sample(self.production, "ggH", "fa20.5"),
                     Sample(self.production, "ggH", "fa30.5"),
                     Sample(self.production, "ggH", "fL10.5"),
                 ]
     if self.productionmode in ("qqZZ", "ZX"):
         result = [Sample(self.production, self.productionmode)]
     if self.productionmode == "ggZZ":
         result = [
             Sample(self.production, self.productionmode, flavor)
             for flavor in flavors
         ]
     if self.productionmode == "data":
         result = [
             Sample(self.production, self.productionmode, self.blindstatus)
         ]
     result = [
         sample for sample in result if
         tfiles[sample.withdiscriminantsfile()].candTree.GetEntries() != 0
     ]
     assert result
     return result
Пример #23
0
 def signalsamples(self):
     from samples import Sample
     return [
         Sample(reweightingsample, self.production)
         for reweightingsample in self.analysis.signalsamples()
     ]
Пример #24
0
    def add_sample(self, name, labels, value, timestamp=None, exemplar=None):
        """Add a sample to the metric.

        Internal-only, do not use."""
        self.samples.append(Sample(name, labels, value, timestamp, exemplar))
Пример #25
0
 def originaltreefile(self):
     from samples import Sample
     return Sample("data", self.production,
                   "unblind").withdiscriminantsfile()